Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Thu, 31 Jan 2019 11:22:49
Message-Id: 1548933736.b13a2c729503c4f75dd3091914dceafe4dfe1133.mpagano@gentoo
commit:     b13a2c729503c4f75dd3091914dceafe4dfe1133
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jan 31 11:22:16 2019 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jan 31 11:22:16 2019 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b13a2c72

proj/linux-patches: Linux patch 4.9.154

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1153_linux-4.9.154.patch | 1657 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1661 insertions(+)

diff --git a/0000_README b/0000_README
index f17614f..dfd18f6 100644
--- a/0000_README
+++ b/0000_README
@@ -655,6 +655,10 @@ Patch:  1152_linux-4.9.153.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.9.153
 
+Patch:  1153_linux-4.9.154.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.9.154
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1153_linux-4.9.154.patch b/1153_linux-4.9.154.patch
new file mode 100644
index 0000000..f397bc0
--- /dev/null
+++ b/1153_linux-4.9.154.patch
@@ -0,0 +1,1657 @@
+diff --git a/Makefile b/Makefile
+index 44a487ee24d2..9964792e200f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 153
++SUBLEVEL = 154
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+ 
+diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
+index 9185541035cc..6958545390f0 100644
+--- a/arch/arc/include/asm/perf_event.h
++++ b/arch/arc/include/asm/perf_event.h
+@@ -103,7 +103,8 @@ static const char * const arc_pmu_ev_hw_map[] = {
+ 
+ 	/* counts condition */
+ 	[PERF_COUNT_HW_INSTRUCTIONS] = "iall",
+-	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */
++	/* All jump instructions that are taken */
++	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
+ 	[PERF_COUNT_ARC_BPOK]         = "bpok",	  /* NP-NT, PT-T, PNT-NT */
+ #ifdef CONFIG_ISA_ARCV2
+ 	[PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
+diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S
+index 62ad4bcb841a..f230bb7092fd 100644
+--- a/arch/arc/lib/memset-archs.S
++++ b/arch/arc/lib/memset-archs.S
+@@ -7,11 +7,39 @@
+  */
+ 
+ #include <linux/linkage.h>
++#include <asm/cache.h>
+ 
+-#undef PREALLOC_NOT_AVAIL
++/*
++ * The memset implementation below is optimized to use prefetchw and prealloc
++ * instruction in case of CPU with 64B L1 data cache line (L1_CACHE_SHIFT == 6)
++ * If you want to implement optimized memset for other possible L1 data cache
++ * line lengths (32B and 128B) you should rewrite code carefully checking
++ * we don't call any prefetchw/prealloc instruction for L1 cache lines which
++ * don't belongs to memset area.
++ */
++
++#if L1_CACHE_SHIFT == 6
++
++.macro PREALLOC_INSTR	reg, off
++	prealloc	[\reg, \off]
++.endm
++
++.macro PREFETCHW_INSTR	reg, off
++	prefetchw	[\reg, \off]
++.endm
++
++#else
++
++.macro PREALLOC_INSTR
++.endm
++
++.macro PREFETCHW_INSTR
++.endm
++
++#endif
+ 
+ ENTRY_CFI(memset)
+-	prefetchw [r0]		; Prefetch the write location
++	PREFETCHW_INSTR	r0, 0	; Prefetch the first write location
+ 	mov.f	0, r2
+ ;;; if size is zero
+ 	jz.d	[blink]
+@@ -48,11 +76,8 @@ ENTRY_CFI(memset)
+ 
+ 	lpnz	@.Lset64bytes
+ 	;; LOOP START
+-#ifdef PREALLOC_NOT_AVAIL
+-	prefetchw [r3, 64]	;Prefetch the next write location
+-#else
+-	prealloc  [r3, 64]
+-#endif
++	PREALLOC_INSTR	r3, 64	; alloc next line w/o fetching
++
+ #ifdef CONFIG_ARC_HAS_LL64
+ 	std.ab	r4, [r3, 8]
+ 	std.ab	r4, [r3, 8]
+@@ -85,7 +110,6 @@ ENTRY_CFI(memset)
+ 	lsr.f	lp_count, r2, 5 ;Last remaining  max 124 bytes
+ 	lpnz	.Lset32bytes
+ 	;; LOOP START
+-	prefetchw   [r3, 32]	;Prefetch the next write location
+ #ifdef CONFIG_ARC_HAS_LL64
+ 	std.ab	r4, [r3, 8]
+ 	std.ab	r4, [r3, 8]
+diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
+index 0c7a7d5d95f1..a651c2bc94ef 100644
+--- a/arch/s390/kernel/early.c
++++ b/arch/s390/kernel/early.c
+@@ -224,10 +224,10 @@ static noinline __init void detect_machine_type(void)
+ 	if (stsi(vmms, 3, 2, 2) || !vmms->count)
+ 		return;
+ 
+-	/* Running under KVM? If not we assume z/VM */
++	/* Detect known hypervisors */
+ 	if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
+ 		S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
+-	else
++	else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
+ 		S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
+ }
+ 
+diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
+index feb9d97a9d14..a559908d180e 100644
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -863,6 +863,8 @@ void __init setup_arch(char **cmdline_p)
+ 		pr_info("Linux is running under KVM in 64-bit mode\n");
+ 	else if (MACHINE_IS_LPAR)
+ 		pr_info("Linux is running natively in 64-bit mode\n");
++	else
++		pr_info("Linux is running as a guest in 64-bit mode\n");
+ 
+ 	/* Have one command line that is parsed and saved in /proc/cmdline */
+ 	/* boot_command_line has been already set up in early.c */
+diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
+index 0a31110f41f6..d52a94e9f57f 100644
+--- a/arch/s390/kernel/smp.c
++++ b/arch/s390/kernel/smp.c
+@@ -357,9 +357,13 @@ void smp_call_online_cpu(void (*func)(void *), void *data)
+  */
+ void smp_call_ipl_cpu(void (*func)(void *), void *data)
+ {
++	struct lowcore *lc = pcpu_devices->lowcore;
++
++	if (pcpu_devices[0].address == stap())
++		lc = &S390_lowcore;
++
+ 	pcpu_delegate(&pcpu_devices[0], func, data,
+-		      pcpu_devices->lowcore->panic_stack -
+-		      PANIC_FRAME_OFFSET + PAGE_SIZE);
++		      lc->panic_stack - PANIC_FRAME_OFFSET + PAGE_SIZE);
+ }
+ 
+ int smp_find_processor_id(u16 address)
+@@ -1139,7 +1143,11 @@ static ssize_t __ref rescan_store(struct device *dev,
+ {
+ 	int rc;
+ 
++	rc = lock_device_hotplug_sysfs();
++	if (rc)
++		return rc;
+ 	rc = smp_rescan_cpus();
++	unlock_device_hotplug();
+ 	return rc ? rc : count;
+ }
+ static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 46e0ad71b4da..851e9d6c864f 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -5795,8 +5795,7 @@ restart:
+ 		toggle_interruptibility(vcpu, ctxt->interruptibility);
+ 		vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
+ 		kvm_rip_write(vcpu, ctxt->eip);
+-		if (r == EMULATE_DONE &&
+-		    (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
++		if (r == EMULATE_DONE && ctxt->tf)
+ 			kvm_vcpu_do_singlestep(vcpu, &r);
+ 		if (!ctxt->have_exception ||
+ 		    exception_type(ctxt->exception.vector) == EXCPT_TRAP)
+diff --git a/arch/x86/lib/kaslr.c b/arch/x86/lib/kaslr.c
+index 0c7fe444dcdd..d8d868070e24 100644
+--- a/arch/x86/lib/kaslr.c
++++ b/arch/x86/lib/kaslr.c
+@@ -35,8 +35,8 @@ static inline u16 i8254(void)
+ 	u16 status, timer;
+ 
+ 	do {
+-		outb(I8254_PORT_CONTROL,
+-		     I8254_CMD_READBACK | I8254_SELECT_COUNTER0);
++		outb(I8254_CMD_READBACK | I8254_SELECT_COUNTER0,
++		     I8254_PORT_CONTROL);
+ 		status = inb(I8254_PORT_COUNTER0);
+ 		timer  = inb(I8254_PORT_COUNTER0);
+ 		timer |= inb(I8254_PORT_COUNTER0) << 8;
+diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
+index ef32e5766a01..06cf7427d0c4 100644
+--- a/drivers/acpi/nfit/core.c
++++ b/drivers/acpi/nfit/core.c
+@@ -185,6 +185,32 @@ static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
+ 	return 0;
+ }
+ 
++static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
++		struct nd_cmd_pkg *call_pkg)
++{
++	if (call_pkg) {
++		int i;
++
++		if (nfit_mem->family != call_pkg->nd_family)
++			return -ENOTTY;
++
++		for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
++			if (call_pkg->nd_reserved2[i])
++				return -EINVAL;
++		return call_pkg->nd_command;
++	}
++
++	/* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */
++	if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
++		return cmd;
++
++	/*
++	 * Force function number validation to fail since 0 is never
++	 * published as a valid function in dsm_mask.
++	 */
++	return 0;
++}
++
+ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
+ 		unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
+ {
+@@ -197,17 +223,11 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
+ 	unsigned long cmd_mask, dsm_mask;
+ 	u32 offset, fw_status = 0;
+ 	acpi_handle handle;
+-	unsigned int func;
+ 	const u8 *uuid;
+-	int rc, i;
++	int func, rc, i;
+ 
+ 	if (cmd_rc)
+ 		*cmd_rc = -EINVAL;
+-	func = cmd;
+-	if (cmd == ND_CMD_CALL) {
+-		call_pkg = buf;
+-		func = call_pkg->nd_command;
+-	}
+ 
+ 	if (nvdimm) {
+ 		struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+@@ -215,9 +235,12 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
+ 
+ 		if (!adev)
+ 			return -ENOTTY;
+-		if (call_pkg && nfit_mem->family != call_pkg->nd_family)
+-			return -ENOTTY;
+ 
++		if (cmd == ND_CMD_CALL)
++			call_pkg = buf;
++		func = cmd_to_func(nfit_mem, cmd, call_pkg);
++		if (func < 0)
++			return func;
+ 		dimm_name = nvdimm_name(nvdimm);
+ 		cmd_name = nvdimm_cmd_name(cmd);
+ 		cmd_mask = nvdimm_cmd_mask(nvdimm);
+@@ -228,6 +251,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
+ 	} else {
+ 		struct acpi_device *adev = to_acpi_dev(acpi_desc);
+ 
++		func = cmd;
+ 		cmd_name = nvdimm_bus_cmd_name(cmd);
+ 		cmd_mask = nd_desc->cmd_mask;
+ 		dsm_mask = cmd_mask;
+@@ -240,7 +264,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
+ 	if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
+ 		return -ENOTTY;
+ 
+-	if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask))
++	/*
++	 * Check for a valid command.  For ND_CMD_CALL, we also have to
++	 * make sure that the DSM function is supported.
++	 */
++	if (cmd == ND_CMD_CALL && !test_bit(func, &dsm_mask))
++		return -ENOTTY;
++	else if (!test_bit(cmd, &cmd_mask))
+ 		return -ENOTTY;
+ 
+ 	in_obj.type = ACPI_TYPE_PACKAGE;
+@@ -1433,6 +1463,13 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
+ 		return 0;
+ 	}
+ 
++	/*
++	 * Function 0 is the command interrogation function, don't
++	 * export it to potential userspace use, and enable it to be
++	 * used as an error value in acpi_nfit_ctl().
++	 */
++	dsm_mask &= ~1UL;
++
+ 	uuid = to_nfit_uuid(nfit_mem->family);
+ 	for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
+ 		if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i))
+diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c
+index 3a3ff2eb6cba..7a4f9346cccf 100644
+--- a/drivers/char/mwave/mwavedd.c
++++ b/drivers/char/mwave/mwavedd.c
+@@ -59,6 +59,7 @@
+ #include <linux/mutex.h>
+ #include <linux/delay.h>
+ #include <linux/serial_8250.h>
++#include <linux/nospec.h>
+ #include "smapi.h"
+ #include "mwavedd.h"
+ #include "3780i.h"
+@@ -289,6 +290,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
+ 						ipcnum);
+ 				return -EINVAL;
+ 			}
++			ipcnum = array_index_nospec(ipcnum,
++						    ARRAY_SIZE(pDrvData->IPCs));
+ 			PRINTK_3(TRACE_MWAVE,
+ 				"mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
+ 				" ipcnum %x entry usIntCount %x\n",
+@@ -317,6 +320,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
+ 						" Invalid ipcnum %x\n", ipcnum);
+ 				return -EINVAL;
+ 			}
++			ipcnum = array_index_nospec(ipcnum,
++						    ARRAY_SIZE(pDrvData->IPCs));
+ 			PRINTK_3(TRACE_MWAVE,
+ 				"mwavedd::mwave_ioctl IOCTL_MW_GET_IPC"
+ 				" ipcnum %x, usIntCount %x\n",
+@@ -383,6 +388,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
+ 						ipcnum);
+ 				return -EINVAL;
+ 			}
++			ipcnum = array_index_nospec(ipcnum,
++						    ARRAY_SIZE(pDrvData->IPCs));
+ 			mutex_lock(&mwave_mutex);
+ 			if (pDrvData->IPCs[ipcnum].bIsEnabled == true) {
+ 				pDrvData->IPCs[ipcnum].bIsEnabled = false;
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index f55dcdf99bc5..26476a64e663 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -255,6 +255,8 @@ static const struct xpad_device {
+ 	{ 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
+ 	{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
+ 	{ 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
++	{ 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
++	{ 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
+ 	{ 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
+ 	{ 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+ 	{ 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
+@@ -431,6 +433,7 @@ static const struct usb_device_id xpad_table[] = {
+ 	XPAD_XBOXONE_VENDOR(0x0e6f),		/* 0x0e6f X-Box One controllers */
+ 	XPAD_XBOX360_VENDOR(0x0f0d),		/* Hori Controllers */
+ 	XPAD_XBOXONE_VENDOR(0x0f0d),		/* Hori Controllers */
++	XPAD_XBOX360_VENDOR(0x1038),		/* SteelSeries Controllers */
+ 	XPAD_XBOX360_VENDOR(0x11c9),		/* Nacon GC100XF */
+ 	XPAD_XBOX360_VENDOR(0x12ab),		/* X-Box 360 dance pads */
+ 	XPAD_XBOX360_VENDOR(0x1430),		/* RedOctane X-Box 360 controllers */
+diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
+index 022be0e22eba..a306453d40d2 100644
+--- a/drivers/input/misc/uinput.c
++++ b/drivers/input/misc/uinput.c
+@@ -39,6 +39,7 @@
+ #include <linux/fs.h>
+ #include <linux/miscdevice.h>
+ #include <linux/uinput.h>
++#include <linux/overflow.h>
+ #include <linux/input/mt.h>
+ #include "../input-compat.h"
+ 
+@@ -335,7 +336,7 @@ static int uinput_open(struct inode *inode, struct file *file)
+ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
+ 				   const struct input_absinfo *abs)
+ {
+-	int min, max;
++	int min, max, range;
+ 
+ 	min = abs->minimum;
+ 	max = abs->maximum;
+@@ -347,7 +348,7 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
+ 		return -EINVAL;
+ 	}
+ 
+-	if (abs->flat > max - min) {
++	if (!check_sub_overflow(max, min, &range) && abs->flat > range) {
+ 		printk(KERN_DEBUG
+ 		       "%s: abs_flat #%02x out of range: %d (min:%d/max:%d)\n",
+ 		       UINPUT_NAME, code, abs->flat, min, max);
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index 558c7589c329..83ca754250fb 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -1372,13 +1372,14 @@ static void its_free_device(struct its_device *its_dev)
+ 	kfree(its_dev);
+ }
+ 
+-static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
++static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
+ {
+ 	int idx;
+ 
+-	idx = find_first_zero_bit(dev->event_map.lpi_map,
+-				  dev->event_map.nr_lpis);
+-	if (idx == dev->event_map.nr_lpis)
++	idx = bitmap_find_free_region(dev->event_map.lpi_map,
++				      dev->event_map.nr_lpis,
++				      get_count_order(nvecs));
++	if (idx < 0)
+ 		return -ENOSPC;
+ 
+ 	*hwirq = dev->event_map.lpi_base + idx;
+@@ -1464,20 +1465,20 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ 	int err;
+ 	int i;
+ 
+-	for (i = 0; i < nr_irqs; i++) {
+-		err = its_alloc_device_irq(its_dev, &hwirq);
+-		if (err)
+-			return err;
++	err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
++	if (err)
++		return err;
+ 
+-		err = its_irq_gic_domain_alloc(domain, virq + i, hwirq);
++	for (i = 0; i < nr_irqs; i++) {
++		err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
+ 		if (err)
+ 			return err;
+ 
+ 		irq_domain_set_hwirq_and_chip(domain, virq + i,
+-					      hwirq, &its_irq_chip, its_dev);
++					      hwirq + i, &its_irq_chip, its_dev);
+ 		pr_debug("ID:%d pID:%d vID:%d\n",
+-			 (int)(hwirq - its_dev->event_map.lpi_base),
+-			 (int) hwirq, virq + i);
++			 (int)(hwirq + i - its_dev->event_map.lpi_base),
++			 (int)(hwirq + i), virq + i);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
+index 149fbac97cb6..d20f4023f6c1 100644
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -1689,7 +1689,7 @@ int dm_thin_remove_range(struct dm_thin_device *td,
+ 	return r;
+ }
+ 
+-int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
++int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
+ {
+ 	int r;
+ 	uint32_t ref_count;
+@@ -1697,7 +1697,7 @@ int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *resu
+ 	down_read(&pmd->root_lock);
+ 	r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
+ 	if (!r)
+-		*result = (ref_count != 0);
++		*result = (ref_count > 1);
+ 	up_read(&pmd->root_lock);
+ 
+ 	return r;
+diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
+index 35e954ea20a9..f6be0d733c20 100644
+--- a/drivers/md/dm-thin-metadata.h
++++ b/drivers/md/dm-thin-metadata.h
+@@ -195,7 +195,7 @@ int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
+ 
+ int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
+ 
+-int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
++int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
+ 
+ int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
+ int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index 81309d7836c5..914c8a6bf93c 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -1017,7 +1017,7 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
+ 	 * passdown we have to check that these blocks are now unused.
+ 	 */
+ 	int r = 0;
+-	bool used = true;
++	bool shared = true;
+ 	struct thin_c *tc = m->tc;
+ 	struct pool *pool = tc->pool;
+ 	dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
+@@ -1027,11 +1027,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
+ 	while (b != end) {
+ 		/* find start of unmapped run */
+ 		for (; b < end; b++) {
+-			r = dm_pool_block_is_used(pool->pmd, b, &used);
++			r = dm_pool_block_is_shared(pool->pmd, b, &shared);
+ 			if (r)
+ 				goto out;
+ 
+-			if (!used)
++			if (!shared)
+ 				break;
+ 		}
+ 
+@@ -1040,11 +1040,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
+ 
+ 		/* find end of run */
+ 		for (e = b + 1; e != end; e++) {
+-			r = dm_pool_block_is_used(pool->pmd, e, &used);
++			r = dm_pool_block_is_shared(pool->pmd, e, &shared);
+ 			if (r)
+ 				goto out;
+ 
+-			if (used)
++			if (shared)
+ 				break;
+ 		}
+ 
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index ff3d9fc0f1b3..214a48703a4e 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -456,8 +456,6 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb);
+ struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
+ {
+ 	struct can_priv *priv = netdev_priv(dev);
+-	struct sk_buff *skb = priv->echo_skb[idx];
+-	struct canfd_frame *cf;
+ 
+ 	if (idx >= priv->echo_skb_max) {
+ 		netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
+@@ -465,20 +463,21 @@ struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8
+ 		return NULL;
+ 	}
+ 
+-	if (!skb) {
+-		netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n",
+-			   __func__, idx);
+-		return NULL;
+-	}
++	if (priv->echo_skb[idx]) {
++		/* Using "struct canfd_frame::len" for the frame
++		 * length is supported on both CAN and CANFD frames.
++		 */
++		struct sk_buff *skb = priv->echo_skb[idx];
++		struct canfd_frame *cf = (struct canfd_frame *)skb->data;
++		u8 len = cf->len;
++
++		*len_ptr = len;
++		priv->echo_skb[idx] = NULL;
+ 
+-	/* Using "struct canfd_frame::len" for the frame
+-	 * length is supported on both CAN and CANFD frames.
+-	 */
+-	cf = (struct canfd_frame *)skb->data;
+-	*len_ptr = cf->len;
+-	priv->echo_skb[idx] = NULL;
++		return skb;
++	}
+ 
+-	return skb;
++	return NULL;
+ }
+ 
+ /*
+diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
+index fa2c7bd638be..8c93ed5c9763 100644
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -442,6 +442,7 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
+ 	if (pskb_trim_rcsum(skb, len))
+ 		goto drop;
+ 
++	ph = pppoe_hdr(skb);
+ 	pn = pppoe_pernet(dev_net(dev));
+ 
+ 	/* Note that get_item does a sock_hold(), so sk_pppox(po)
+diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
+index 486393fa4f3e..0f1201e6e957 100644
+--- a/drivers/nvme/target/rdma.c
++++ b/drivers/nvme/target/rdma.c
+@@ -137,6 +137,10 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
+ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
+ static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
+ static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
++static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
++				struct nvmet_rdma_rsp *r);
++static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
++				struct nvmet_rdma_rsp *r);
+ 
+ static struct nvmet_fabrics_ops nvmet_rdma_ops;
+ 
+@@ -175,9 +179,17 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
+ 	spin_unlock_irqrestore(&queue->rsps_lock, flags);
+ 
+ 	if (unlikely(!rsp)) {
+-		rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
++		int ret;
++
++		rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
+ 		if (unlikely(!rsp))
+ 			return NULL;
++		ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
++		if (unlikely(ret)) {
++			kfree(rsp);
++			return NULL;
++		}
++
+ 		rsp->allocated = true;
+ 	}
+ 
+@@ -189,7 +201,8 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
+ {
+ 	unsigned long flags;
+ 
+-	if (rsp->allocated) {
++	if (unlikely(rsp->allocated)) {
++		nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
+ 		kfree(rsp);
+ 		return;
+ 	}
+diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
+index 1406fb688a26..73e2f89aded8 100644
+--- a/drivers/s390/char/sclp_config.c
++++ b/drivers/s390/char/sclp_config.c
+@@ -59,7 +59,9 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
+ 
+ static void __ref sclp_cpu_change_notify(struct work_struct *work)
+ {
++	lock_device_hotplug();
+ 	smp_rescan_cpus();
++	unlock_device_hotplug();
+ }
+ 
+ static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
+diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+index 0f63a36a519e..d22360849b88 100644
+--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
++++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+@@ -43,6 +43,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
+ 	{USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
+ 	{USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
+ 	{USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
++	{USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */
+ 	{USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
+ 	{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
+ 	{USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
+diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
+index 6d1e2f746ab4..8d6253903f24 100644
+--- a/drivers/tty/n_hdlc.c
++++ b/drivers/tty/n_hdlc.c
+@@ -598,6 +598,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
+ 				/* too large for caller's buffer */
+ 				ret = -EOVERFLOW;
+ 			} else {
++				__set_current_state(TASK_RUNNING);
+ 				if (copy_to_user(buf, rbuf->buf, rbuf->count))
+ 					ret = -EFAULT;
+ 				else
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index bcfdaf6ddbb2..95fc7e893fd2 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -540,10 +540,12 @@ static int uart_put_char(struct tty_struct *tty, unsigned char c)
+ 	int ret = 0;
+ 
+ 	circ = &state->xmit;
+-	if (!circ->buf)
++	port = uart_port_lock(state, flags);
++	if (!circ->buf) {
++		uart_port_unlock(port, flags);
+ 		return 0;
++	}
+ 
+-	port = uart_port_lock(state, flags);
+ 	if (port && uart_circ_chars_free(circ) != 0) {
+ 		circ->buf[circ->head] = c;
+ 		circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1);
+@@ -576,11 +578,13 @@ static int uart_write(struct tty_struct *tty,
+ 		return -EL3HLT;
+ 	}
+ 
++	port = uart_port_lock(state, flags);
+ 	circ = &state->xmit;
+-	if (!circ->buf)
++	if (!circ->buf) {
++		uart_port_unlock(port, flags);
+ 		return 0;
++	}
+ 
+-	port = uart_port_lock(state, flags);
+ 	while (port) {
+ 		c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE);
+ 		if (count < c)
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index f61f8650665f..19fe1e8fc124 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -2324,7 +2324,8 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
+ 	ld = tty_ldisc_ref_wait(tty);
+ 	if (!ld)
+ 		return -EIO;
+-	ld->ops->receive_buf(tty, &ch, &mbz, 1);
++	if (ld->ops->receive_buf)
++		ld->ops->receive_buf(tty, &ch, &mbz, 1);
+ 	tty_ldisc_deref(ld);
+ 	return 0;
+ }
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index 9d3e413f48c6..232cb0a760b9 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -956,6 +956,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
+ 	if (con_is_visible(vc))
+ 		update_screen(vc);
+ 	vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num);
++	notify_update(vc);
+ 	return err;
+ }
+ 
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index 4966768d3c98..9706d214c409 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -47,6 +47,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) },
+ 	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
+ 	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) },
++	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_TB) },
+ 	{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
+ 	{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
+ 	{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
+diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
+index a84f0959ab34..d84c3b3d477b 100644
+--- a/drivers/usb/serial/pl2303.h
++++ b/drivers/usb/serial/pl2303.h
+@@ -13,6 +13,7 @@
+ 
+ #define PL2303_VENDOR_ID	0x067b
+ #define PL2303_PRODUCT_ID	0x2303
++#define PL2303_PRODUCT_ID_TB		0x2304
+ #define PL2303_PRODUCT_ID_RSAQ2		0x04bb
+ #define PL2303_PRODUCT_ID_DCU11		0x1234
+ #define PL2303_PRODUCT_ID_PHAROS	0xaaa0
+@@ -25,6 +26,7 @@
+ #define PL2303_PRODUCT_ID_MOTOROLA	0x0307
+ #define PL2303_PRODUCT_ID_ZTEK		0xe1f1
+ 
++
+ #define ATEN_VENDOR_ID		0x0557
+ #define ATEN_VENDOR_ID2		0x0547
+ #define ATEN_PRODUCT_ID		0x2008
+diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
+index 6d6acf2c07c3..511242111403 100644
+--- a/drivers/usb/serial/usb-serial-simple.c
++++ b/drivers/usb/serial/usb-serial-simple.c
+@@ -88,7 +88,8 @@ DEVICE(moto_modem, MOTO_IDS);
+ /* Motorola Tetra driver */
+ #define MOTOROLA_TETRA_IDS()			\
+ 	{ USB_DEVICE(0x0cad, 0x9011) },	/* Motorola Solutions TETRA PEI */ \
+-	{ USB_DEVICE(0x0cad, 0x9012) }	/* MTP6550 */
++	{ USB_DEVICE(0x0cad, 0x9012) },	/* MTP6550 */ \
++	{ USB_DEVICE(0x0cad, 0x9016) }	/* TPG2200 */
+ DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
+ 
+ /* Novatel Wireless GPS driver */
+diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
+index 353c93bc459b..681d0eade82f 100644
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -751,7 +751,8 @@ static void handle_rx(struct vhost_net *net)
+ 		vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
+ 					    headcount);
+ 		if (unlikely(vq_log))
+-			vhost_log_write(vq, vq_log, log, vhost_len);
++			vhost_log_write(vq, vq_log, log, vhost_len,
++					vq->iov, in);
+ 		total_len += vhost_len;
+ 		if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
+ 			vhost_poll_queue(&vq->poll);
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index 53b1b3cfce84..dc387a974325 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -1646,13 +1646,87 @@ static int log_write(void __user *log_base,
+ 	return r;
+ }
+ 
++static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
++{
++	struct vhost_umem *umem = vq->umem;
++	struct vhost_umem_node *u;
++	u64 start, end, l, min;
++	int r;
++	bool hit = false;
++
++	while (len) {
++		min = len;
++		/* More than one GPAs can be mapped into a single HVA. So
++		 * iterate all possible umems here to be safe.
++		 */
++		list_for_each_entry(u, &umem->umem_list, link) {
++			if (u->userspace_addr > hva - 1 + len ||
++			    u->userspace_addr - 1 + u->size < hva)
++				continue;
++			start = max(u->userspace_addr, hva);
++			end = min(u->userspace_addr - 1 + u->size,
++				  hva - 1 + len);
++			l = end - start + 1;
++			r = log_write(vq->log_base,
++				      u->start + start - u->userspace_addr,
++				      l);
++			if (r < 0)
++				return r;
++			hit = true;
++			min = min(l, min);
++		}
++
++		if (!hit)
++			return -EFAULT;
++
++		len -= min;
++		hva += min;
++	}
++
++	return 0;
++}
++
++static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
++{
++	struct iovec iov[64];
++	int i, ret;
++
++	if (!vq->iotlb)
++		return log_write(vq->log_base, vq->log_addr + used_offset, len);
++
++	ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
++			     len, iov, 64, VHOST_ACCESS_WO);
++	if (ret)
++		return ret;
++
++	for (i = 0; i < ret; i++) {
++		ret = log_write_hva(vq,	(uintptr_t)iov[i].iov_base,
++				    iov[i].iov_len);
++		if (ret)
++			return ret;
++	}
++
++	return 0;
++}
++
+ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
+-		    unsigned int log_num, u64 len)
++		    unsigned int log_num, u64 len, struct iovec *iov, int count)
+ {
+ 	int i, r;
+ 
+ 	/* Make sure data written is seen before log. */
+ 	smp_wmb();
++
++	if (vq->iotlb) {
++		for (i = 0; i < count; i++) {
++			r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
++					  iov[i].iov_len);
++			if (r < 0)
++				return r;
++		}
++		return 0;
++	}
++
+ 	for (i = 0; i < log_num; ++i) {
+ 		u64 l = min(log[i].len, len);
+ 		r = log_write(vq->log_base, log[i].addr, l);
+@@ -1682,9 +1756,8 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
+ 		smp_wmb();
+ 		/* Log used flag write. */
+ 		used = &vq->used->flags;
+-		log_write(vq->log_base, vq->log_addr +
+-			  (used - (void __user *)vq->used),
+-			  sizeof vq->used->flags);
++		log_used(vq, (used - (void __user *)vq->used),
++			 sizeof vq->used->flags);
+ 		if (vq->log_ctx)
+ 			eventfd_signal(vq->log_ctx, 1);
+ 	}
+@@ -1702,9 +1775,8 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
+ 		smp_wmb();
+ 		/* Log avail event write */
+ 		used = vhost_avail_event(vq);
+-		log_write(vq->log_base, vq->log_addr +
+-			  (used - (void __user *)vq->used),
+-			  sizeof *vhost_avail_event(vq));
++		log_used(vq, (used - (void __user *)vq->used),
++			 sizeof *vhost_avail_event(vq));
+ 		if (vq->log_ctx)
+ 			eventfd_signal(vq->log_ctx, 1);
+ 	}
+@@ -2103,10 +2175,8 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
+ 		/* Make sure data is seen before log. */
+ 		smp_wmb();
+ 		/* Log used ring entry write. */
+-		log_write(vq->log_base,
+-			  vq->log_addr +
+-			   ((void __user *)used - (void __user *)vq->used),
+-			  count * sizeof *used);
++		log_used(vq, ((void __user *)used - (void __user *)vq->used),
++			 count * sizeof *used);
+ 	}
+ 	old = vq->last_used_idx;
+ 	new = (vq->last_used_idx += count);
+@@ -2148,9 +2218,8 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
+ 		/* Make sure used idx is seen before log. */
+ 		smp_wmb();
+ 		/* Log used index update. */
+-		log_write(vq->log_base,
+-			  vq->log_addr + offsetof(struct vring_used, idx),
+-			  sizeof vq->used->idx);
++		log_used(vq, offsetof(struct vring_used, idx),
++			 sizeof vq->used->idx);
+ 		if (vq->log_ctx)
+ 			eventfd_signal(vq->log_ctx, 1);
+ 	}
+diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
+index 78f3c5fc02e4..e8efe1af7487 100644
+--- a/drivers/vhost/vhost.h
++++ b/drivers/vhost/vhost.h
+@@ -199,7 +199,8 @@ bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
+ bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
+ 
+ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
+-		    unsigned int log_num, u64 len);
++		    unsigned int log_num, u64 len,
++		    struct iovec *iov, int count);
+ int vq_iotlb_prefetch(struct vhost_virtqueue *vq);
+ 
+ struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
+diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
+index b450adf65236..fb973cc0af66 100644
+--- a/fs/btrfs/dev-replace.c
++++ b/fs/btrfs/dev-replace.c
+@@ -350,6 +350,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
+ 		break;
+ 	case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
+ 	case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
++		ASSERT(0);
+ 		ret = BTRFS_IOCTL_DEV_REPLACE_RESULT_ALREADY_STARTED;
+ 		goto leave;
+ 	}
+@@ -394,6 +395,10 @@ int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
+ 	if (IS_ERR(trans)) {
+ 		ret = PTR_ERR(trans);
+ 		btrfs_dev_replace_lock(dev_replace, 1);
++		dev_replace->replace_state =
++			BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED;
++		dev_replace->srcdev = NULL;
++		dev_replace->tgtdev = NULL;
+ 		goto leave;
+ 	}
+ 
+@@ -415,8 +420,6 @@ int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
+ 	return ret;
+ 
+ leave:
+-	dev_replace->srcdev = NULL;
+-	dev_replace->tgtdev = NULL;
+ 	btrfs_dev_replace_unlock(dev_replace, 1);
+ 	btrfs_destroy_dev_replace_tgtdev(fs_info, tgt_device);
+ 	return ret;
+@@ -784,6 +787,8 @@ int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info)
+ 			   "cannot continue dev_replace, tgtdev is missing");
+ 		btrfs_info(fs_info,
+ 			   "you may cancel the operation after 'mount -o degraded'");
++		dev_replace->replace_state =
++					BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED;
+ 		btrfs_dev_replace_unlock(dev_replace, 1);
+ 		return 0;
+ 	}
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 08c1c86c2ad9..2db7968febfe 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -148,14 +148,14 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
+ 
+ 			scredits = server->credits;
+ 			/* can deadlock with reopen */
+-			if (scredits == 1) {
++			if (scredits <= 8) {
+ 				*num = SMB2_MAX_BUFFER_SIZE;
+ 				*credits = 0;
+ 				break;
+ 			}
+ 
+-			/* leave one credit for a possible reopen */
+-			scredits--;
++			/* leave some credits for reopen and other ops */
++			scredits -= 8;
+ 			*num = min_t(unsigned int, size,
+ 				     scredits * SMB2_MAX_BUFFER_SIZE);
+ 
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
+index f4fe54047fb7..d87c48e4a9ba 100644
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -656,6 +656,7 @@ static void truncate_node(struct dnode_of_data *dn)
+ {
+ 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
+ 	struct node_info ni;
++	pgoff_t index;
+ 
+ 	get_node_info(sbi, dn->nid, &ni);
+ 	if (dn->inode->i_blocks == 0) {
+@@ -678,10 +679,11 @@ invalidate:
+ 	clear_node_page_dirty(dn->node_page);
+ 	set_sbi_flag(sbi, SBI_IS_DIRTY);
+ 
++	index = dn->node_page->index;
+ 	f2fs_put_page(dn->node_page, 1);
+ 
+ 	invalidate_mapping_pages(NODE_MAPPING(sbi),
+-			dn->node_page->index, dn->node_page->index);
++			index, index);
+ 
+ 	dn->node_page = NULL;
+ 	trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
+diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
+index 21c88a7ac23b..697988be62df 100644
+--- a/include/linux/compiler-clang.h
++++ b/include/linux/compiler-clang.h
+@@ -23,3 +23,17 @@
+ #ifdef __noretpoline
+ #undef __noretpoline
+ #endif
++
++/*
++ * Not all versions of clang implement the the type-generic versions
++ * of the builtin overflow checkers. Fortunately, clang implements
++ * __has_builtin allowing us to avoid awkward version
++ * checks. Unfortunately, we don't know which version of gcc clang
++ * pretends to be, so the macro may or may not be defined.
++ */
++#undef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
++#if __has_builtin(__builtin_mul_overflow) && \
++    __has_builtin(__builtin_add_overflow) && \
++    __has_builtin(__builtin_sub_overflow)
++#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
++#endif
+diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
+index 8e82e3373eaf..8e9b0cb8db41 100644
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -334,3 +334,7 @@
+  * code
+  */
+ #define uninitialized_var(x) x = x
++
++#if GCC_VERSION >= 50100
++#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
++#endif
+diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
+index d4c71132d07f..8c9897b1b953 100644
+--- a/include/linux/compiler-intel.h
++++ b/include/linux/compiler-intel.h
+@@ -43,3 +43,7 @@
+ #define __builtin_bswap16 _bswap16
+ #endif
+ 
++/*
++ * icc defines __GNUC__, but does not implement the builtin overflow checkers.
++ */
++#undef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
+diff --git a/include/linux/overflow.h b/include/linux/overflow.h
+new file mode 100644
+index 000000000000..c8890ec358a7
+--- /dev/null
++++ b/include/linux/overflow.h
+@@ -0,0 +1,205 @@
++/* SPDX-License-Identifier: GPL-2.0 OR MIT */
++#ifndef __LINUX_OVERFLOW_H
++#define __LINUX_OVERFLOW_H
++
++#include <linux/compiler.h>
++
++/*
++ * In the fallback code below, we need to compute the minimum and
++ * maximum values representable in a given type. These macros may also
++ * be useful elsewhere, so we provide them outside the
++ * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block.
++ *
++ * It would seem more obvious to do something like
++ *
++ * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0)
++ * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0)
++ *
++ * Unfortunately, the middle expressions, strictly speaking, have
++ * undefined behaviour, and at least some versions of gcc warn about
++ * the type_max expression (but not if -fsanitize=undefined is in
++ * effect; in that case, the warning is deferred to runtime...).
++ *
++ * The slightly excessive casting in type_min is to make sure the
++ * macros also produce sensible values for the exotic type _Bool. [The
++ * overflow checkers only almost work for _Bool, but that's
++ * a-feature-not-a-bug, since people shouldn't be doing arithmetic on
++ * _Bools. Besides, the gcc builtins don't allow _Bool* as third
++ * argument.]
++ *
++ * Idea stolen from
++ * https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html -
++ * credit to Christian Biere.
++ */
++#define is_signed_type(type)       (((type)(-1)) < (type)1)
++#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type)))
++#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
++#define type_min(T) ((T)((T)-type_max(T)-(T)1))
++
++
++#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
++/*
++ * For simplicity and code hygiene, the fallback code below insists on
++ * a, b and *d having the same type (similar to the min() and max()
++ * macros), whereas gcc's type-generic overflow checkers accept
++ * different types. Hence we don't just make check_add_overflow an
++ * alias for __builtin_add_overflow, but add type checks similar to
++ * below.
++ */
++#define check_add_overflow(a, b, d) ({		\
++	typeof(a) __a = (a);			\
++	typeof(b) __b = (b);			\
++	typeof(d) __d = (d);			\
++	(void) (&__a == &__b);			\
++	(void) (&__a == __d);			\
++	__builtin_add_overflow(__a, __b, __d);	\
++})
++
++#define check_sub_overflow(a, b, d) ({		\
++	typeof(a) __a = (a);			\
++	typeof(b) __b = (b);			\
++	typeof(d) __d = (d);			\
++	(void) (&__a == &__b);			\
++	(void) (&__a == __d);			\
++	__builtin_sub_overflow(__a, __b, __d);	\
++})
++
++#define check_mul_overflow(a, b, d) ({		\
++	typeof(a) __a = (a);			\
++	typeof(b) __b = (b);			\
++	typeof(d) __d = (d);			\
++	(void) (&__a == &__b);			\
++	(void) (&__a == __d);			\
++	__builtin_mul_overflow(__a, __b, __d);	\
++})
++
++#else
++
++
++/* Checking for unsigned overflow is relatively easy without causing UB. */
++#define __unsigned_add_overflow(a, b, d) ({	\
++	typeof(a) __a = (a);			\
++	typeof(b) __b = (b);			\
++	typeof(d) __d = (d);			\
++	(void) (&__a == &__b);			\
++	(void) (&__a == __d);			\
++	*__d = __a + __b;			\
++	*__d < __a;				\
++})
++#define __unsigned_sub_overflow(a, b, d) ({	\
++	typeof(a) __a = (a);			\
++	typeof(b) __b = (b);			\
++	typeof(d) __d = (d);			\
++	(void) (&__a == &__b);			\
++	(void) (&__a == __d);			\
++	*__d = __a - __b;			\
++	__a < __b;				\
++})
++/*
++ * If one of a or b is a compile-time constant, this avoids a division.
++ */
++#define __unsigned_mul_overflow(a, b, d) ({		\
++	typeof(a) __a = (a);				\
++	typeof(b) __b = (b);				\
++	typeof(d) __d = (d);				\
++	(void) (&__a == &__b);				\
++	(void) (&__a == __d);				\
++	*__d = __a * __b;				\
++	__builtin_constant_p(__b) ?			\
++	  __b > 0 && __a > type_max(typeof(__a)) / __b : \
++	  __a > 0 && __b > type_max(typeof(__b)) / __a;	 \
++})
++
++/*
++ * For signed types, detecting overflow is much harder, especially if
++ * we want to avoid UB. But the interface of these macros is such that
++ * we must provide a result in *d, and in fact we must produce the
++ * result promised by gcc's builtins, which is simply the possibly
++ * wrapped-around value. Fortunately, we can just formally do the
++ * operations in the widest relevant unsigned type (u64) and then
++ * truncate the result - gcc is smart enough to generate the same code
++ * with and without the (u64) casts.
++ */
++
++/*
++ * Adding two signed integers can overflow only if they have the same
++ * sign, and overflow has happened iff the result has the opposite
++ * sign.
++ */
++#define __signed_add_overflow(a, b, d) ({	\
++	typeof(a) __a = (a);			\
++	typeof(b) __b = (b);			\
++	typeof(d) __d = (d);			\
++	(void) (&__a == &__b);			\
++	(void) (&__a == __d);			\
++	*__d = (u64)__a + (u64)__b;		\
++	(((~(__a ^ __b)) & (*__d ^ __a))	\
++		& type_min(typeof(__a))) != 0;	\
++})
++
++/*
++ * Subtraction is similar, except that overflow can now happen only
++ * when the signs are opposite. In this case, overflow has happened if
++ * the result has the opposite sign of a.
++ */
++#define __signed_sub_overflow(a, b, d) ({	\
++	typeof(a) __a = (a);			\
++	typeof(b) __b = (b);			\
++	typeof(d) __d = (d);			\
++	(void) (&__a == &__b);			\
++	(void) (&__a == __d);			\
++	*__d = (u64)__a - (u64)__b;		\
++	((((__a ^ __b)) & (*__d ^ __a))		\
++		& type_min(typeof(__a))) != 0;	\
++})
++
++/*
++ * Signed multiplication is rather hard. gcc always follows C99, so
++ * division is truncated towards 0. This means that we can write the
++ * overflow check like this:
++ *
++ * (a > 0 && (b > MAX/a || b < MIN/a)) ||
++ * (a < -1 && (b > MIN/a || b < MAX/a) ||
++ * (a == -1 && b == MIN)
++ *
++ * The redundant casts of -1 are to silence an annoying -Wtype-limits
++ * (included in -Wextra) warning: When the type is u8 or u16, the
++ * __b_c_e in check_mul_overflow obviously selects
++ * __unsigned_mul_overflow, but unfortunately gcc still parses this
++ * code and warns about the limited range of __b.
++ */
++
++#define __signed_mul_overflow(a, b, d) ({				\
++	typeof(a) __a = (a);						\
++	typeof(b) __b = (b);						\
++	typeof(d) __d = (d);						\
++	typeof(a) __tmax = type_max(typeof(a));				\
++	typeof(a) __tmin = type_min(typeof(a));				\
++	(void) (&__a == &__b);						\
++	(void) (&__a == __d);						\
++	*__d = (u64)__a * (u64)__b;					\
++	(__b > 0   && (__a > __tmax/__b || __a < __tmin/__b)) ||	\
++	(__b < (typeof(__b))-1  && (__a > __tmin/__b || __a < __tmax/__b)) || \
++	(__b == (typeof(__b))-1 && __a == __tmin);			\
++})
++
++
++#define check_add_overflow(a, b, d)					\
++	__builtin_choose_expr(is_signed_type(typeof(a)),		\
++			__signed_add_overflow(a, b, d),			\
++			__unsigned_add_overflow(a, b, d))
++
++#define check_sub_overflow(a, b, d)					\
++	__builtin_choose_expr(is_signed_type(typeof(a)),		\
++			__signed_sub_overflow(a, b, d),			\
++			__unsigned_sub_overflow(a, b, d))
++
++#define check_mul_overflow(a, b, d)					\
++	__builtin_choose_expr(is_signed_type(typeof(a)),		\
++			__signed_mul_overflow(a, b, d),			\
++			__unsigned_mul_overflow(a, b, d))
++
++
++#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */
++
++#endif /* __LINUX_OVERFLOW_H */
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index e90fe6b83e00..ed329a39d621 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -2962,6 +2962,7 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
+  *
+  *	This is exactly the same as pskb_trim except that it ensures the
+  *	checksum of received packets are still valid after the operation.
++ *	It can change skb pointers.
+  */
+ 
+ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
+diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
+index a6446d72c5d9..a85028e06b1e 100644
+--- a/include/net/ip_fib.h
++++ b/include/net/ip_fib.h
+@@ -242,7 +242,7 @@ int fib_table_insert(struct net *, struct fib_table *, struct fib_config *);
+ int fib_table_delete(struct net *, struct fib_table *, struct fib_config *);
+ int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
+ 		   struct netlink_callback *cb);
+-int fib_table_flush(struct net *net, struct fib_table *table);
++int fib_table_flush(struct net *net, struct fib_table *table, bool flush_all);
+ struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
+ void fib_table_flush_external(struct fib_table *table);
+ void fib_free_table(struct fib_table *tb);
+diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
+index 8498e3503605..5b675695c661 100644
+--- a/net/bridge/br_forward.c
++++ b/net/bridge/br_forward.c
+@@ -35,10 +35,10 @@ static inline int should_deliver(const struct net_bridge_port *p,
+ 
+ int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
+ {
++	skb_push(skb, ETH_HLEN);
+ 	if (!is_skb_forwardable(skb->dev, skb))
+ 		goto drop;
+ 
+-	skb_push(skb, ETH_HLEN);
+ 	br_drop_fake_rtable(skb);
+ 
+ 	if (skb->ip_summed == CHECKSUM_PARTIAL &&
+@@ -96,12 +96,11 @@ static void __br_forward(const struct net_bridge_port *to,
+ 		net = dev_net(indev);
+ 	} else {
+ 		if (unlikely(netpoll_tx_running(to->br->dev))) {
+-			if (!is_skb_forwardable(skb->dev, skb)) {
++			skb_push(skb, ETH_HLEN);
++			if (!is_skb_forwardable(skb->dev, skb))
+ 				kfree_skb(skb);
+-			} else {
+-				skb_push(skb, ETH_HLEN);
++			else
+ 				br_netpoll_send_skb(to, skb);
+-			}
+ 			return;
+ 		}
+ 		br_hook = NF_BR_LOCAL_OUT;
+diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
+index 5989661c659f..a1b57cb07f1e 100644
+--- a/net/bridge/br_netfilter_ipv6.c
++++ b/net/bridge/br_netfilter_ipv6.c
+@@ -131,6 +131,7 @@ int br_validate_ipv6(struct net *net, struct sk_buff *skb)
+ 					IPSTATS_MIB_INDISCARDS);
+ 			goto drop;
+ 		}
++		hdr = ipv6_hdr(skb);
+ 	}
+ 	if (hdr->nexthdr == NEXTHDR_HOP && br_nf_check_hbh_len(skb))
+ 		goto drop;
+diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
+index 4b3df6b0e3b9..d94aaf7c7685 100644
+--- a/net/bridge/netfilter/nft_reject_bridge.c
++++ b/net/bridge/netfilter/nft_reject_bridge.c
+@@ -236,6 +236,7 @@ static bool reject6_br_csum_ok(struct sk_buff *skb, int hook)
+ 	    pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
+ 		return false;
+ 
++	ip6h = ipv6_hdr(skb);
+ 	thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
+ 	if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
+ 		return false;
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index e4f694dfcf83..c99e7c75eeee 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -67,6 +67,9 @@
+  */
+ #define MAX_NFRAMES 256
+ 
++/* limit timers to 400 days for sending/timeouts */
++#define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60)
++
+ /* use of last_frames[index].flags */
+ #define RX_RECV    0x40 /* received data for this element */
+ #define RX_THR     0x80 /* element not been sent due to throttle feature */
+@@ -142,6 +145,22 @@ static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv)
+ 	return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
+ }
+ 
++/* check limitations for timeval provided by user */
++static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head)
++{
++	if ((msg_head->ival1.tv_sec < 0) ||
++	    (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) ||
++	    (msg_head->ival1.tv_usec < 0) ||
++	    (msg_head->ival1.tv_usec >= USEC_PER_SEC) ||
++	    (msg_head->ival2.tv_sec < 0) ||
++	    (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) ||
++	    (msg_head->ival2.tv_usec < 0) ||
++	    (msg_head->ival2.tv_usec >= USEC_PER_SEC))
++		return true;
++
++	return false;
++}
++
+ #define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU)
+ #define OPSIZ sizeof(struct bcm_op)
+ #define MHSIZ sizeof(struct bcm_msg_head)
+@@ -884,6 +903,10 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
+ 	if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
+ 		return -EINVAL;
+ 
++	/* check timeval limitations */
++	if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
++		return -EINVAL;
++
+ 	/* check the given can_id */
+ 	op = bcm_find_op(&bo->tx_ops, msg_head, ifindex);
+ 	if (op) {
+@@ -1063,6 +1086,10 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
+ 	     (!(msg_head->can_id & CAN_RTR_FLAG))))
+ 		return -EINVAL;
+ 
++	/* check timeval limitations */
++	if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
++		return -EINVAL;
++
+ 	/* check the given can_id */
+ 	op = bcm_find_op(&bo->rx_ops, msg_head, ifindex);
+ 	if (op) {
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index 9364c39d0555..cbe3fdba4a2c 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -193,7 +193,7 @@ static void fib_flush(struct net *net)
+ 		struct fib_table *tb;
+ 
+ 		hlist_for_each_entry_safe(tb, tmp, head, tb_hlist)
+-			flushed += fib_table_flush(net, tb);
++			flushed += fib_table_flush(net, tb, false);
+ 	}
+ 
+ 	if (flushed)
+@@ -1277,7 +1277,7 @@ static void ip_fib_net_exit(struct net *net)
+ 
+ 		hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
+ 			hlist_del(&tb->tb_hlist);
+-			fib_table_flush(net, tb);
++			fib_table_flush(net, tb, true);
+ 			fib_free_table(tb);
+ 		}
+ 	}
+diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
+index ef40bb659a7a..36f0a8c581d0 100644
+--- a/net/ipv4/fib_trie.c
++++ b/net/ipv4/fib_trie.c
+@@ -1826,7 +1826,7 @@ void fib_table_flush_external(struct fib_table *tb)
+ }
+ 
+ /* Caller must hold RTNL. */
+-int fib_table_flush(struct net *net, struct fib_table *tb)
++int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all)
+ {
+ 	struct trie *t = (struct trie *)tb->tb_data;
+ 	struct key_vector *pn = t->kv;
+@@ -1874,7 +1874,17 @@ int fib_table_flush(struct net *net, struct fib_table *tb)
+ 		hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
+ 			struct fib_info *fi = fa->fa_info;
+ 
+-			if (!fi || !(fi->fib_flags & RTNH_F_DEAD)) {
++			if (!fi ||
++			    (!(fi->fib_flags & RTNH_F_DEAD) &&
++			     !fib_props[fa->fa_type].error)) {
++				slen = fa->fa_slen;
++				continue;
++			}
++
++			/* Do not flush error routes if network namespace is
++			 * not being dismantled
++			 */
++			if (!flush_all && fib_props[fa->fa_type].error) {
+ 				slen = fa->fa_slen;
+ 				continue;
+ 			}
+diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
+index 5a8c26c9872d..0fb49dedc9fb 100644
+--- a/net/ipv4/inet_fragment.c
++++ b/net/ipv4/inet_fragment.c
+@@ -90,7 +90,7 @@ static void inet_frags_free_cb(void *ptr, void *arg)
+ 
+ void inet_frags_exit_net(struct netns_frags *nf)
+ {
+-	nf->low_thresh = 0; /* prevent creation of new frags */
++	nf->high_thresh = 0; /* prevent creation of new frags */
+ 
+ 	rhashtable_free_and_destroy(&nf->rhashtable, inet_frags_free_cb, NULL);
+ }
+diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
+index d6feabb03516..bcadca26523b 100644
+--- a/net/ipv4/ip_input.c
++++ b/net/ipv4/ip_input.c
+@@ -475,6 +475,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
+ 		goto drop;
+ 	}
+ 
++	iph = ip_hdr(skb);
+ 	skb->transport_header = skb->network_header + iph->ihl*4;
+ 
+ 	/* Remove any debris in the socket control block */
+diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
+index 326945d9be5f..3bd4d5d0c346 100644
+--- a/net/openvswitch/flow_netlink.c
++++ b/net/openvswitch/flow_netlink.c
+@@ -409,7 +409,7 @@ static int __parse_flow_nlattrs(const struct nlattr *attr,
+ 			return -EINVAL;
+ 		}
+ 
+-		if (!nz || !is_all_zero(nla_data(nla), expected_len)) {
++		if (!nz || !is_all_zero(nla_data(nla), nla_len(nla))) {
+ 			attrs |= 1 << type;
+ 			a[type] = nla;
+ 		}
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index ea13df1be067..912ed9b901ac 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -1850,7 +1850,6 @@ done:
+ int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+ 		struct tcf_result *res, bool compat_mode)
+ {
+-	__be16 protocol = tc_skb_protocol(skb);
+ #ifdef CONFIG_NET_CLS_ACT
+ 	const struct tcf_proto *old_tp = tp;
+ 	int limit = 0;
+@@ -1858,6 +1857,7 @@ int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+ reclassify:
+ #endif
+ 	for (; tp; tp = rcu_dereference_bh(tp->next)) {
++		__be16 protocol = tc_skb_protocol(skb);
+ 		int err;
+ 
+ 		if (tp->protocol != protocol &&
+@@ -1884,7 +1884,6 @@ reset:
+ 	}
+ 
+ 	tp = old_tp;
+-	protocol = tc_skb_protocol(skb);
+ 	goto reclassify;
+ #endif
+ }
+diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c
+index 09103aab0cb2..7d410e39d1a0 100644
+--- a/sound/soc/codecs/rt5514-spi.c
++++ b/sound/soc/codecs/rt5514-spi.c
+@@ -253,6 +253,8 @@ static int rt5514_spi_pcm_probe(struct snd_soc_platform *platform)
+ 
+ 	rt5514_dsp = devm_kzalloc(platform->dev, sizeof(*rt5514_dsp),
+ 			GFP_KERNEL);
++	if (!rt5514_dsp)
++		return -ENOMEM;
+ 
+ 	rt5514_dsp->dev = &rt5514_spi->dev;
+ 	mutex_init(&rt5514_dsp->dma_lock);
+diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+index f5a8050351b5..e83e314a76a5 100644
+--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
++++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+@@ -399,7 +399,13 @@ static int sst_media_hw_params(struct snd_pcm_substream *substream,
+ 				struct snd_pcm_hw_params *params,
+ 				struct snd_soc_dai *dai)
+ {
+-	snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
++	int ret;
++
++	ret =
++		snd_pcm_lib_malloc_pages(substream,
++				params_buffer_bytes(params));
++	if (ret)
++		return ret;
+ 	memset(substream->runtime->dma_area, 0, params_buffer_bytes(params));
+ 	return 0;
+ }
+diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
+index b46e1cf347e5..046a4850e3df 100644
+--- a/tools/perf/util/unwind-libdw.c
++++ b/tools/perf/util/unwind-libdw.c
+@@ -42,13 +42,13 @@ static int __report_module(struct addr_location *al, u64 ip,
+ 		Dwarf_Addr s;
+ 
+ 		dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL);
+-		if (s != al->map->start)
++		if (s != al->map->start - al->map->pgoff)
+ 			mod = 0;
+ 	}
+ 
+ 	if (!mod)
+ 		mod = dwfl_report_elf(ui->dwfl, dso->short_name,
+-				      dso->long_name, -1, al->map->start,
++				      (dso->symsrc_filename ? dso->symsrc_filename : dso->long_name), -1, al->map->start - al->map->pgoff,
+ 				      false);
+ 
+ 	return mod && dwfl_addrmodule(ui->dwfl, ip) == mod ? 0 : -1;
+diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
+index 85a78eba0a93..874972ccfc95 100644
+--- a/tools/testing/selftests/x86/protection_keys.c
++++ b/tools/testing/selftests/x86/protection_keys.c
+@@ -1129,6 +1129,21 @@ void test_pkey_syscalls_bad_args(int *ptr, u16 pkey)
+ 	pkey_assert(err);
+ }
+ 
++void become_child(void)
++{
++	pid_t forkret;
++
++	forkret = fork();
++	pkey_assert(forkret >= 0);
++	dprintf3("[%d] fork() ret: %d\n", getpid(), forkret);
++
++	if (!forkret) {
++		/* in the child */
++		return;
++	}
++	exit(0);
++}
++
+ /* Assumes that all pkeys other than 'pkey' are unallocated */
+ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
+ {
+@@ -1139,7 +1154,7 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
+ 	int nr_allocated_pkeys = 0;
+ 	int i;
+ 
+-	for (i = 0; i < NR_PKEYS*2; i++) {
++	for (i = 0; i < NR_PKEYS*3; i++) {
+ 		int new_pkey;
+ 		dprintf1("%s() alloc loop: %d\n", __func__, i);
+ 		new_pkey = alloc_pkey();
+@@ -1150,20 +1165,26 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
+ 		if ((new_pkey == -1) && (errno == ENOSPC)) {
+ 			dprintf2("%s() failed to allocate pkey after %d tries\n",
+ 				__func__, nr_allocated_pkeys);
+-			break;
++		} else {
++			/*
++			 * Ensure the number of successes never
++			 * exceeds the number of keys supported
++			 * in the hardware.
++			 */
++			pkey_assert(nr_allocated_pkeys < NR_PKEYS);
++			allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
+ 		}
+-		pkey_assert(nr_allocated_pkeys < NR_PKEYS);
+-		allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
++
++		/*
++		 * Make sure that allocation state is properly
++		 * preserved across fork().
++		 */
++		if (i == NR_PKEYS*2)
++			become_child();
+ 	}
+ 
+ 	dprintf3("%s()::%d\n", __func__, __LINE__);
+ 
+-	/*
+-	 * ensure it did not reach the end of the loop without
+-	 * failure:
+-	 */
+-	pkey_assert(i < NR_PKEYS*2);
+-
+ 	/*
+ 	 * There are 16 pkeys supported in hardware.  One is taken
+ 	 * up for the default (0) and another can be taken up by