Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Wed, 03 Apr 2019 10:49:57
Message-Id: 1554288419.296b65bf1d5e66edd9c5be7cb619e8550f4e18c3.mpagano@gentoo
1 commit: 296b65bf1d5e66edd9c5be7cb619e8550f4e18c3
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Apr 3 10:46:59 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Apr 3 10:46:59 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=296b65bf
7
8 Linux patch 4.4.178
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 1177_linux-4.4.178.patch | 3818 ++++++++++++++++++++++++++++++++++++++++++++++
13 1 file changed, 3818 insertions(+)
14
15 diff --git a/1177_linux-4.4.178.patch b/1177_linux-4.4.178.patch
16 new file mode 100644
17 index 0000000..0cb5643
18 --- /dev/null
19 +++ b/1177_linux-4.4.178.patch
20 @@ -0,0 +1,3818 @@
21 +diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
22 +index df8ab4fc240a..496673adcb6b 100644
23 +--- a/Documentation/virtual/kvm/api.txt
24 ++++ b/Documentation/virtual/kvm/api.txt
25 +@@ -13,7 +13,7 @@ of a virtual machine. The ioctls belong to three classes
26 +
27 + - VM ioctls: These query and set attributes that affect an entire virtual
28 + machine, for example memory layout. In addition a VM ioctl is used to
29 +- create virtual cpus (vcpus).
30 ++ create virtual cpus (vcpus) and devices.
31 +
32 + Only run VM ioctls from the same process (address space) that was used
33 + to create the VM.
34 +@@ -24,6 +24,11 @@ of a virtual machine. The ioctls belong to three classes
35 + Only run vcpu ioctls from the same thread that was used to create the
36 + vcpu.
37 +
38 ++ - device ioctls: These query and set attributes that control the operation
39 ++ of a single device.
40 ++
41 ++ device ioctls must be issued from the same process (address space) that
42 ++ was used to create the VM.
43 +
44 + 2. File descriptors
45 + -------------------
46 +@@ -32,10 +37,11 @@ The kvm API is centered around file descriptors. An initial
47 + open("/dev/kvm") obtains a handle to the kvm subsystem; this handle
48 + can be used to issue system ioctls. A KVM_CREATE_VM ioctl on this
49 + handle will create a VM file descriptor which can be used to issue VM
50 +-ioctls. A KVM_CREATE_VCPU ioctl on a VM fd will create a virtual cpu
51 +-and return a file descriptor pointing to it. Finally, ioctls on a vcpu
52 +-fd can be used to control the vcpu, including the important task of
53 +-actually running guest code.
54 ++ioctls. A KVM_CREATE_VCPU or KVM_CREATE_DEVICE ioctl on a VM fd will
55 ++create a virtual cpu or device and return a file descriptor pointing to
56 ++the new resource. Finally, ioctls on a vcpu or device fd can be used
57 ++to control the vcpu or device. For vcpus, this includes the important
58 ++task of actually running guest code.
59 +
60 + In general file descriptors can be migrated among processes by means
61 + of fork() and the SCM_RIGHTS facility of unix domain socket. These
62 +diff --git a/Makefile b/Makefile
63 +index 1de443248119..35be7983ef2d 100644
64 +--- a/Makefile
65 ++++ b/Makefile
66 +@@ -1,6 +1,6 @@
67 + VERSION = 4
68 + PATCHLEVEL = 4
69 +-SUBLEVEL = 177
70 ++SUBLEVEL = 178
71 + EXTRAVERSION =
72 + NAME = Blurry Fish Butt
73 +
74 +diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
75 +index 3a0277c6c060..737c8b0dda84 100644
76 +--- a/arch/arm/Kconfig
77 ++++ b/arch/arm/Kconfig
78 +@@ -1422,8 +1422,7 @@ config BIG_LITTLE
79 +
80 + config BL_SWITCHER
81 + bool "big.LITTLE switcher support"
82 +- depends on BIG_LITTLE && MCPM && HOTPLUG_CPU
83 +- select ARM_CPU_SUSPEND
84 ++ depends on BIG_LITTLE && MCPM && HOTPLUG_CPU && ARM_GIC
85 + select CPU_PM
86 + help
87 + The big.LITTLE "switcher" provides the core functionality to
88 +@@ -2141,7 +2140,8 @@ config ARCH_SUSPEND_POSSIBLE
89 + def_bool y
90 +
91 + config ARM_CPU_SUSPEND
92 +- def_bool PM_SLEEP
93 ++ def_bool PM_SLEEP || BL_SWITCHER
94 ++ depends on ARCH_SUSPEND_POSSIBLE
95 +
96 + config ARCH_HIBERNATION_POSSIBLE
97 + bool
98 +diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c
99 +index 353bb8774112..ec74c2812c1a 100644
100 +--- a/arch/arm/mach-imx/cpuidle-imx6q.c
101 ++++ b/arch/arm/mach-imx/cpuidle-imx6q.c
102 +@@ -14,30 +14,23 @@
103 + #include "cpuidle.h"
104 + #include "hardware.h"
105 +
106 +-static atomic_t master = ATOMIC_INIT(0);
107 +-static DEFINE_SPINLOCK(master_lock);
108 ++static int num_idle_cpus = 0;
109 ++static DEFINE_SPINLOCK(cpuidle_lock);
110 +
111 + static int imx6q_enter_wait(struct cpuidle_device *dev,
112 + struct cpuidle_driver *drv, int index)
113 + {
114 +- if (atomic_inc_return(&master) == num_online_cpus()) {
115 +- /*
116 +- * With this lock, we prevent other cpu to exit and enter
117 +- * this function again and become the master.
118 +- */
119 +- if (!spin_trylock(&master_lock))
120 +- goto idle;
121 ++ spin_lock(&cpuidle_lock);
122 ++ if (++num_idle_cpus == num_online_cpus())
123 + imx6_set_lpm(WAIT_UNCLOCKED);
124 +- cpu_do_idle();
125 +- imx6_set_lpm(WAIT_CLOCKED);
126 +- spin_unlock(&master_lock);
127 +- goto done;
128 +- }
129 ++ spin_unlock(&cpuidle_lock);
130 +
131 +-idle:
132 + cpu_do_idle();
133 +-done:
134 +- atomic_dec(&master);
135 ++
136 ++ spin_lock(&cpuidle_lock);
137 ++ if (num_idle_cpus-- == num_online_cpus())
138 ++ imx6_set_lpm(WAIT_CLOCKED);
139 ++ spin_unlock(&cpuidle_lock);
140 +
141 + return index;
142 + }
143 +diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
144 +index e47cffd25c6c..aead23f15213 100644
145 +--- a/arch/arm/mm/mmu.c
146 ++++ b/arch/arm/mm/mmu.c
147 +@@ -572,7 +572,7 @@ static void __init build_mem_type_table(void)
148 + * in the Short-descriptor translation table format descriptors.
149 + */
150 + if (cpu_arch == CPU_ARCH_ARMv7 &&
151 +- (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xF) == 4) {
152 ++ (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xF) >= 4) {
153 + user_pmd_table |= PMD_PXNTABLE;
154 + }
155 + #endif
156 +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
157 +index 5b47218809e0..f18b8c26a959 100644
158 +--- a/arch/arm64/Kconfig
159 ++++ b/arch/arm64/Kconfig
160 +@@ -89,7 +89,6 @@ config ARM64
161 + select PERF_USE_VMALLOC
162 + select POWER_RESET
163 + select POWER_SUPPLY
164 +- select RTC_LIB
165 + select SPARSE_IRQ
166 + select SYSCTL_EXCEPTION_TRACE
167 + select HAVE_CONTEXT_TRACKING
168 +@@ -819,6 +818,10 @@ config SYSVIPC_COMPAT
169 + def_bool y
170 + depends on COMPAT && SYSVIPC
171 +
172 ++config KEYS_COMPAT
173 ++ def_bool y
174 ++ depends on COMPAT && KEYS
175 ++
176 + endmenu
177 +
178 + menu "Power management options"
179 +diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
180 +index 9b2f5a9d019d..fbafd0ad16df 100644
181 +--- a/arch/arm64/include/asm/page.h
182 ++++ b/arch/arm64/include/asm/page.h
183 +@@ -19,6 +19,8 @@
184 + #ifndef __ASM_PAGE_H
185 + #define __ASM_PAGE_H
186 +
187 ++#include <linux/const.h>
188 ++
189 + /* PAGE_SHIFT determines the page size */
190 + /* CONT_SHIFT determines the number of pages which can be tracked together */
191 + #ifdef CONFIG_ARM64_64K_PAGES
192 +diff --git a/arch/arm64/include/asm/shmparam.h b/arch/arm64/include/asm/shmparam.h
193 +index 4df608a8459e..e368a55ebd22 100644
194 +--- a/arch/arm64/include/asm/shmparam.h
195 ++++ b/arch/arm64/include/asm/shmparam.h
196 +@@ -21,7 +21,7 @@
197 + * alignment value. Since we don't have aliasing D-caches, the rest of
198 + * the time we can safely use PAGE_SIZE.
199 + */
200 +-#define COMPAT_SHMLBA 0x4000
201 ++#define COMPAT_SHMLBA (4 * PAGE_SIZE)
202 +
203 + #include <asm-generic/shmparam.h>
204 +
205 +diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
206 +index 3028d9b028c7..586326981769 100644
207 +--- a/arch/arm64/kernel/entry.S
208 ++++ b/arch/arm64/kernel/entry.S
209 +@@ -243,7 +243,7 @@ END(vectors)
210 + * Invalid mode handlers
211 + */
212 + .macro inv_entry, el, reason, regsize = 64
213 +- kernel_entry el, \regsize
214 ++ kernel_entry \el, \regsize
215 + mov x0, sp
216 + mov x1, #\reason
217 + mrs x2, esr_el1
218 +diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
219 +index bc2abb8b1599..999633bd7294 100644
220 +--- a/arch/arm64/kernel/image.h
221 ++++ b/arch/arm64/kernel/image.h
222 +@@ -64,6 +64,16 @@
223 +
224 + #ifdef CONFIG_EFI
225 +
226 ++/*
227 ++ * Prevent the symbol aliases below from being emitted into the kallsyms
228 ++ * table, by forcing them to be absolute symbols (which are conveniently
229 ++ * ignored by scripts/kallsyms) rather than section relative symbols.
230 ++ * The distinction is only relevant for partial linking, and only for symbols
231 ++ * that are defined within a section declaration (which is not the case for
232 ++ * the definitions below) so the resulting values will be identical.
233 ++ */
234 ++#define KALLSYMS_HIDE(sym) ABSOLUTE(sym)
235 ++
236 + /*
237 + * The EFI stub has its own symbol namespace prefixed by __efistub_, to
238 + * isolate it from the kernel proper. The following symbols are legally
239 +@@ -73,25 +83,25 @@
240 + * linked at. The routines below are all implemented in assembler in a
241 + * position independent manner
242 + */
243 +-__efistub_memcmp = __pi_memcmp;
244 +-__efistub_memchr = __pi_memchr;
245 +-__efistub_memcpy = __pi_memcpy;
246 +-__efistub_memmove = __pi_memmove;
247 +-__efistub_memset = __pi_memset;
248 +-__efistub_strlen = __pi_strlen;
249 +-__efistub_strcmp = __pi_strcmp;
250 +-__efistub_strncmp = __pi_strncmp;
251 +-__efistub___flush_dcache_area = __pi___flush_dcache_area;
252 ++__efistub_memcmp = KALLSYMS_HIDE(__pi_memcmp);
253 ++__efistub_memchr = KALLSYMS_HIDE(__pi_memchr);
254 ++__efistub_memcpy = KALLSYMS_HIDE(__pi_memcpy);
255 ++__efistub_memmove = KALLSYMS_HIDE(__pi_memmove);
256 ++__efistub_memset = KALLSYMS_HIDE(__pi_memset);
257 ++__efistub_strlen = KALLSYMS_HIDE(__pi_strlen);
258 ++__efistub_strcmp = KALLSYMS_HIDE(__pi_strcmp);
259 ++__efistub_strncmp = KALLSYMS_HIDE(__pi_strncmp);
260 ++__efistub___flush_dcache_area = KALLSYMS_HIDE(__pi___flush_dcache_area);
261 +
262 + #ifdef CONFIG_KASAN
263 +-__efistub___memcpy = __pi_memcpy;
264 +-__efistub___memmove = __pi_memmove;
265 +-__efistub___memset = __pi_memset;
266 ++__efistub___memcpy = KALLSYMS_HIDE(__pi_memcpy);
267 ++__efistub___memmove = KALLSYMS_HIDE(__pi_memmove);
268 ++__efistub___memset = KALLSYMS_HIDE(__pi_memset);
269 + #endif
270 +
271 +-__efistub__text = _text;
272 +-__efistub__end = _end;
273 +-__efistub__edata = _edata;
274 ++__efistub__text = KALLSYMS_HIDE(_text);
275 ++__efistub__end = KALLSYMS_HIDE(_end);
276 ++__efistub__edata = KALLSYMS_HIDE(_edata);
277 +
278 + #endif
279 +
280 +diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
281 +index 5d270ca76aec..6b4579e07aa2 100644
282 +--- a/arch/arm64/kernel/traps.c
283 ++++ b/arch/arm64/kernel/traps.c
284 +@@ -239,10 +239,12 @@ void die(const char *str, struct pt_regs *regs, int err)
285 + {
286 + struct thread_info *thread = current_thread_info();
287 + int ret;
288 ++ unsigned long flags;
289 ++
290 ++ raw_spin_lock_irqsave(&die_lock, flags);
291 +
292 + oops_enter();
293 +
294 +- raw_spin_lock_irq(&die_lock);
295 + console_verbose();
296 + bust_spinlocks(1);
297 + ret = __die(str, err, thread, regs);
298 +@@ -252,13 +254,15 @@ void die(const char *str, struct pt_regs *regs, int err)
299 +
300 + bust_spinlocks(0);
301 + add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
302 +- raw_spin_unlock_irq(&die_lock);
303 + oops_exit();
304 +
305 + if (in_interrupt())
306 + panic("Fatal exception in interrupt");
307 + if (panic_on_oops)
308 + panic("Fatal exception");
309 ++
310 ++ raw_spin_unlock_irqrestore(&die_lock, flags);
311 ++
312 + if (ret != NOTIFY_STOP)
313 + do_exit(SIGSEGV);
314 + }
315 +diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
316 +index be7f8416809f..04c4b88706d8 100644
317 +--- a/arch/arm64/mm/fault.c
318 ++++ b/arch/arm64/mm/fault.c
319 +@@ -595,20 +595,33 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
320 + {
321 + const struct fault_info *inf = debug_fault_info + DBG_ESR_EVT(esr);
322 + struct siginfo info;
323 ++ int rv;
324 +
325 +- if (!inf->fn(addr, esr, regs))
326 +- return 1;
327 ++ /*
328 ++ * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
329 ++ * already disabled to preserve the last enabled/disabled addresses.
330 ++ */
331 ++ if (interrupts_enabled(regs))
332 ++ trace_hardirqs_off();
333 +
334 +- pr_alert("Unhandled debug exception: %s (0x%08x) at 0x%016lx\n",
335 +- inf->name, esr, addr);
336 ++ if (!inf->fn(addr, esr, regs)) {
337 ++ rv = 1;
338 ++ } else {
339 ++ pr_alert("Unhandled debug exception: %s (0x%08x) at 0x%016lx\n",
340 ++ inf->name, esr, addr);
341 ++
342 ++ info.si_signo = inf->sig;
343 ++ info.si_errno = 0;
344 ++ info.si_code = inf->code;
345 ++ info.si_addr = (void __user *)addr;
346 ++ arm64_notify_die("", regs, &info, 0);
347 ++ rv = 0;
348 ++ }
349 +
350 +- info.si_signo = inf->sig;
351 +- info.si_errno = 0;
352 +- info.si_code = inf->code;
353 +- info.si_addr = (void __user *)addr;
354 +- arm64_notify_die("", regs, &info, 0);
355 ++ if (interrupts_enabled(regs))
356 ++ trace_hardirqs_on();
357 +
358 +- return 0;
359 ++ return rv;
360 + }
361 +
362 + #ifdef CONFIG_ARM64_PAN
363 +diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
364 +index e77672539e8e..e4456e450f94 100644
365 +--- a/arch/mips/include/asm/jump_label.h
366 ++++ b/arch/mips/include/asm/jump_label.h
367 +@@ -21,15 +21,15 @@
368 + #endif
369 +
370 + #ifdef CONFIG_CPU_MICROMIPS
371 +-#define NOP_INSN "nop32"
372 ++#define B_INSN "b32"
373 + #else
374 +-#define NOP_INSN "nop"
375 ++#define B_INSN "b"
376 + #endif
377 +
378 + static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
379 + {
380 +- asm_volatile_goto("1:\t" NOP_INSN "\n\t"
381 +- "nop\n\t"
382 ++ asm_volatile_goto("1:\t" B_INSN " 2f\n\t"
383 ++ "2:\tnop\n\t"
384 + ".pushsection __jump_table, \"aw\"\n\t"
385 + WORD_INSN " 1b, %l[l_yes], %0\n\t"
386 + ".popsection\n\t"
387 +diff --git a/arch/mips/loongson64/lemote-2f/irq.c b/arch/mips/loongson64/lemote-2f/irq.c
388 +index cab5f43e0e29..d371f0294cbb 100644
389 +--- a/arch/mips/loongson64/lemote-2f/irq.c
390 ++++ b/arch/mips/loongson64/lemote-2f/irq.c
391 +@@ -102,7 +102,7 @@ static struct irqaction ip6_irqaction = {
392 + static struct irqaction cascade_irqaction = {
393 + .handler = no_action,
394 + .name = "cascade",
395 +- .flags = IRQF_NO_THREAD,
396 ++ .flags = IRQF_NO_THREAD | IRQF_NO_SUSPEND,
397 + };
398 +
399 + void __init mach_init_irq(void)
400 +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
401 +index 9beee7f364ad..4598d087dec2 100644
402 +--- a/arch/x86/Kconfig
403 ++++ b/arch/x86/Kconfig
404 +@@ -1970,14 +1970,8 @@ config PHYSICAL_ALIGN
405 + Don't change this unless you know what you are doing.
406 +
407 + config HOTPLUG_CPU
408 +- bool "Support for hot-pluggable CPUs"
409 ++ def_bool y
410 + depends on SMP
411 +- ---help---
412 +- Say Y here to allow turning CPUs off and on. CPUs can be
413 +- controlled through /sys/devices/system/cpu.
414 +- ( Note: power management support will enable this option
415 +- automatically on SMP systems. )
416 +- Say N if you want to disable CPU hotplug.
417 +
418 + config BOOTPARAM_HOTPLUG_CPU0
419 + bool "Set default setting of cpu0_hotpluggable"
420 +diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
421 +index 2b2fecffb1ad..c6a7c9ddf0ac 100644
422 +--- a/drivers/extcon/extcon-usb-gpio.c
423 ++++ b/drivers/extcon/extcon-usb-gpio.c
424 +@@ -192,6 +192,9 @@ static int usb_extcon_resume(struct device *dev)
425 + }
426 +
427 + enable_irq(info->id_irq);
428 ++ if (!device_may_wakeup(dev))
429 ++ queue_delayed_work(system_power_efficient_wq,
430 ++ &info->wq_detcable, 0);
431 +
432 + return ret;
433 + }
434 +diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
435 +index 88bd6829a358..edb45f72b34c 100644
436 +--- a/drivers/firmware/efi/libstub/Makefile
437 ++++ b/drivers/firmware/efi/libstub/Makefile
438 +@@ -8,7 +8,7 @@ cflags-$(CONFIG_X86_32) := -march=i386
439 + cflags-$(CONFIG_X86_64) := -mcmodel=small
440 + cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 \
441 + -fPIC -fno-strict-aliasing -mno-red-zone \
442 +- -mno-mmx -mno-sse -DDISABLE_BRANCH_PROFILING
443 ++ -mno-mmx -mno-sse
444 +
445 + cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie
446 + cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
447 +@@ -16,7 +16,7 @@ cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
448 +
449 + cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt
450 +
451 +-KBUILD_CFLAGS := $(cflags-y) \
452 ++KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \
453 + $(call cc-option,-ffreestanding) \
454 + $(call cc-option,-fno-stack-protector)
455 +
456 +diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
457 +index d3d0a90fe542..995b2be45982 100644
458 +--- a/drivers/gpio/gpio-adnp.c
459 ++++ b/drivers/gpio/gpio-adnp.c
460 +@@ -137,8 +137,10 @@ static int adnp_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
461 + if (err < 0)
462 + goto out;
463 +
464 +- if (err & BIT(pos))
465 +- err = -EACCES;
466 ++ if (value & BIT(pos)) {
467 ++ err = -EPERM;
468 ++ goto out;
469 ++ }
470 +
471 + err = 0;
472 +
473 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
474 +index aec6e9eef489..55884cb5a0fc 100644
475 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
476 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
477 +@@ -531,11 +531,9 @@ static int vmw_fb_set_par(struct fb_info *info)
478 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
479 + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
480 + };
481 +- struct drm_display_mode *old_mode;
482 + struct drm_display_mode *mode;
483 + int ret;
484 +
485 +- old_mode = par->set_mode;
486 + mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
487 + if (!mode) {
488 + DRM_ERROR("Could not create new fb mode.\n");
489 +@@ -546,11 +544,7 @@ static int vmw_fb_set_par(struct fb_info *info)
490 + mode->vdisplay = var->yres;
491 + vmw_guess_mode_timing(mode);
492 +
493 +- if (old_mode && drm_mode_equal(old_mode, mode)) {
494 +- drm_mode_destroy(vmw_priv->dev, mode);
495 +- mode = old_mode;
496 +- old_mode = NULL;
497 +- } else if (!vmw_kms_validate_mode_vram(vmw_priv,
498 ++ if (!vmw_kms_validate_mode_vram(vmw_priv,
499 + mode->hdisplay *
500 + DIV_ROUND_UP(var->bits_per_pixel, 8),
501 + mode->vdisplay)) {
502 +@@ -613,8 +607,8 @@ static int vmw_fb_set_par(struct fb_info *info)
503 + schedule_delayed_work(&par->local_work, 0);
504 +
505 + out_unlock:
506 +- if (old_mode)
507 +- drm_mode_destroy(vmw_priv->dev, old_mode);
508 ++ if (par->set_mode)
509 ++ drm_mode_destroy(vmw_priv->dev, par->set_mode);
510 + par->set_mode = mode;
511 +
512 + drm_modeset_unlock_all(vmw_priv->dev);
513 +diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
514 +index 92870cdb52d9..8efaa88329aa 100644
515 +--- a/drivers/hid/hid-sensor-hub.c
516 ++++ b/drivers/hid/hid-sensor-hub.c
517 +@@ -218,7 +218,8 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
518 + goto done_proc;
519 + }
520 +
521 +- remaining_bytes = do_div(buffer_size, sizeof(__s32));
522 ++ remaining_bytes = buffer_size % sizeof(__s32);
523 ++ buffer_size = buffer_size / sizeof(__s32);
524 + if (buffer_size) {
525 + for (i = 0; i < buffer_size; ++i) {
526 + hid_set_field(report->field[field_index], i,
527 +diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
528 +index 77d0f9c1118d..92969dae739d 100644
529 +--- a/drivers/hwtracing/coresight/coresight-etb10.c
530 ++++ b/drivers/hwtracing/coresight/coresight-etb10.c
531 +@@ -489,15 +489,6 @@ err_misc_register:
532 + return ret;
533 + }
534 +
535 +-static int etb_remove(struct amba_device *adev)
536 +-{
537 +- struct etb_drvdata *drvdata = amba_get_drvdata(adev);
538 +-
539 +- misc_deregister(&drvdata->miscdev);
540 +- coresight_unregister(drvdata->csdev);
541 +- return 0;
542 +-}
543 +-
544 + #ifdef CONFIG_PM
545 + static int etb_runtime_suspend(struct device *dev)
546 + {
547 +@@ -537,10 +528,10 @@ static struct amba_driver etb_driver = {
548 + .name = "coresight-etb10",
549 + .owner = THIS_MODULE,
550 + .pm = &etb_dev_pm_ops,
551 ++ .suppress_bind_attrs = true,
552 +
553 + },
554 + .probe = etb_probe,
555 +- .remove = etb_remove,
556 + .id_table = etb_ids,
557 + };
558 +
559 +diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
560 +index d630b7ece735..5981fcc69960 100644
561 +--- a/drivers/hwtracing/coresight/coresight-etm3x.c
562 ++++ b/drivers/hwtracing/coresight/coresight-etm3x.c
563 +@@ -1877,17 +1877,6 @@ err_arch_supported:
564 + return ret;
565 + }
566 +
567 +-static int etm_remove(struct amba_device *adev)
568 +-{
569 +- struct etm_drvdata *drvdata = amba_get_drvdata(adev);
570 +-
571 +- coresight_unregister(drvdata->csdev);
572 +- if (--etm_count == 0)
573 +- unregister_hotcpu_notifier(&etm_cpu_notifier);
574 +-
575 +- return 0;
576 +-}
577 +-
578 + #ifdef CONFIG_PM
579 + static int etm_runtime_suspend(struct device *dev)
580 + {
581 +@@ -1948,9 +1937,9 @@ static struct amba_driver etm_driver = {
582 + .name = "coresight-etm3x",
583 + .owner = THIS_MODULE,
584 + .pm = &etm_dev_pm_ops,
585 ++ .suppress_bind_attrs = true,
586 + },
587 + .probe = etm_probe,
588 +- .remove = etm_remove,
589 + .id_table = etm_ids,
590 + };
591 +
592 +diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
593 +index a6707642bb23..0edc10b44004 100644
594 +--- a/drivers/hwtracing/coresight/coresight-etm4x.c
595 ++++ b/drivers/hwtracing/coresight/coresight-etm4x.c
596 +@@ -2219,7 +2219,7 @@ static ssize_t name##_show(struct device *_dev, \
597 + return scnprintf(buf, PAGE_SIZE, "0x%x\n", \
598 + readl_relaxed(drvdata->base + offset)); \
599 + } \
600 +-DEVICE_ATTR_RO(name)
601 ++static DEVICE_ATTR_RO(name)
602 +
603 + coresight_simple_func(trcoslsr, TRCOSLSR);
604 + coresight_simple_func(trcpdcr, TRCPDCR);
605 +@@ -2684,17 +2684,6 @@ err_coresight_register:
606 + return ret;
607 + }
608 +
609 +-static int etm4_remove(struct amba_device *adev)
610 +-{
611 +- struct etmv4_drvdata *drvdata = amba_get_drvdata(adev);
612 +-
613 +- coresight_unregister(drvdata->csdev);
614 +- if (--etm4_count == 0)
615 +- unregister_hotcpu_notifier(&etm4_cpu_notifier);
616 +-
617 +- return 0;
618 +-}
619 +-
620 + static struct amba_id etm4_ids[] = {
621 + { /* ETM 4.0 - Qualcomm */
622 + .id = 0x0003b95d,
623 +@@ -2712,9 +2701,9 @@ static struct amba_id etm4_ids[] = {
624 + static struct amba_driver etm4x_driver = {
625 + .drv = {
626 + .name = "coresight-etm4x",
627 ++ .suppress_bind_attrs = true,
628 + },
629 + .probe = etm4_probe,
630 +- .remove = etm4_remove,
631 + .id_table = etm4_ids,
632 + };
633 +
634 +diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
635 +index 2e36bde7fcb4..25e8ea140a09 100644
636 +--- a/drivers/hwtracing/coresight/coresight-funnel.c
637 ++++ b/drivers/hwtracing/coresight/coresight-funnel.c
638 +@@ -226,14 +226,6 @@ static int funnel_probe(struct amba_device *adev, const struct amba_id *id)
639 + return 0;
640 + }
641 +
642 +-static int funnel_remove(struct amba_device *adev)
643 +-{
644 +- struct funnel_drvdata *drvdata = amba_get_drvdata(adev);
645 +-
646 +- coresight_unregister(drvdata->csdev);
647 +- return 0;
648 +-}
649 +-
650 + #ifdef CONFIG_PM
651 + static int funnel_runtime_suspend(struct device *dev)
652 + {
653 +@@ -273,9 +265,9 @@ static struct amba_driver funnel_driver = {
654 + .name = "coresight-funnel",
655 + .owner = THIS_MODULE,
656 + .pm = &funnel_dev_pm_ops,
657 ++ .suppress_bind_attrs = true,
658 + },
659 + .probe = funnel_probe,
660 +- .remove = funnel_remove,
661 + .id_table = funnel_ids,
662 + };
663 +
664 +diff --git a/drivers/hwtracing/coresight/coresight-replicator-qcom.c b/drivers/hwtracing/coresight/coresight-replicator-qcom.c
665 +index 584059e9e866..444815179460 100644
666 +--- a/drivers/hwtracing/coresight/coresight-replicator-qcom.c
667 ++++ b/drivers/hwtracing/coresight/coresight-replicator-qcom.c
668 +@@ -156,15 +156,6 @@ static int replicator_probe(struct amba_device *adev, const struct amba_id *id)
669 + return 0;
670 + }
671 +
672 +-static int replicator_remove(struct amba_device *adev)
673 +-{
674 +- struct replicator_state *drvdata = amba_get_drvdata(adev);
675 +-
676 +- pm_runtime_disable(&adev->dev);
677 +- coresight_unregister(drvdata->csdev);
678 +- return 0;
679 +-}
680 +-
681 + #ifdef CONFIG_PM
682 + static int replicator_runtime_suspend(struct device *dev)
683 + {
684 +@@ -206,9 +197,9 @@ static struct amba_driver replicator_driver = {
685 + .drv = {
686 + .name = "coresight-replicator-qcom",
687 + .pm = &replicator_dev_pm_ops,
688 ++ .suppress_bind_attrs = true,
689 + },
690 + .probe = replicator_probe,
691 +- .remove = replicator_remove,
692 + .id_table = replicator_ids,
693 + };
694 +
695 +diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
696 +index 963ac197c253..b77d700a3f0e 100644
697 +--- a/drivers/hwtracing/coresight/coresight-replicator.c
698 ++++ b/drivers/hwtracing/coresight/coresight-replicator.c
699 +@@ -127,20 +127,6 @@ out_disable_pm:
700 + return ret;
701 + }
702 +
703 +-static int replicator_remove(struct platform_device *pdev)
704 +-{
705 +- struct replicator_drvdata *drvdata = platform_get_drvdata(pdev);
706 +-
707 +- coresight_unregister(drvdata->csdev);
708 +- pm_runtime_get_sync(&pdev->dev);
709 +- if (!IS_ERR(drvdata->atclk))
710 +- clk_disable_unprepare(drvdata->atclk);
711 +- pm_runtime_put_noidle(&pdev->dev);
712 +- pm_runtime_disable(&pdev->dev);
713 +-
714 +- return 0;
715 +-}
716 +-
717 + #ifdef CONFIG_PM
718 + static int replicator_runtime_suspend(struct device *dev)
719 + {
720 +@@ -175,11 +161,11 @@ static const struct of_device_id replicator_match[] = {
721 +
722 + static struct platform_driver replicator_driver = {
723 + .probe = replicator_probe,
724 +- .remove = replicator_remove,
725 + .driver = {
726 + .name = "coresight-replicator",
727 + .of_match_table = replicator_match,
728 + .pm = &replicator_dev_pm_ops,
729 ++ .suppress_bind_attrs = true,
730 + },
731 + };
732 +
733 +diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
734 +index a57c7ec1661f..c4fa70ed14ce 100644
735 +--- a/drivers/hwtracing/coresight/coresight-tmc.c
736 ++++ b/drivers/hwtracing/coresight/coresight-tmc.c
737 +@@ -124,7 +124,7 @@ struct tmc_drvdata {
738 + bool reading;
739 + char *buf;
740 + dma_addr_t paddr;
741 +- void __iomem *vaddr;
742 ++ void *vaddr;
743 + u32 size;
744 + bool enable;
745 + enum tmc_config_type config_type;
746 +@@ -766,23 +766,10 @@ err_misc_register:
747 + err_devm_kzalloc:
748 + if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
749 + dma_free_coherent(dev, drvdata->size,
750 +- &drvdata->paddr, GFP_KERNEL);
751 ++ drvdata->vaddr, drvdata->paddr);
752 + return ret;
753 + }
754 +
755 +-static int tmc_remove(struct amba_device *adev)
756 +-{
757 +- struct tmc_drvdata *drvdata = amba_get_drvdata(adev);
758 +-
759 +- misc_deregister(&drvdata->miscdev);
760 +- coresight_unregister(drvdata->csdev);
761 +- if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
762 +- dma_free_coherent(drvdata->dev, drvdata->size,
763 +- &drvdata->paddr, GFP_KERNEL);
764 +-
765 +- return 0;
766 +-}
767 +-
768 + static struct amba_id tmc_ids[] = {
769 + {
770 + .id = 0x0003b961,
771 +@@ -795,9 +782,9 @@ static struct amba_driver tmc_driver = {
772 + .drv = {
773 + .name = "coresight-tmc",
774 + .owner = THIS_MODULE,
775 ++ .suppress_bind_attrs = true,
776 + },
777 + .probe = tmc_probe,
778 +- .remove = tmc_remove,
779 + .id_table = tmc_ids,
780 + };
781 +
782 +diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
783 +index fe3a2b19a5db..105c192eb2c1 100644
784 +--- a/drivers/hwtracing/coresight/coresight-tpiu.c
785 ++++ b/drivers/hwtracing/coresight/coresight-tpiu.c
786 +@@ -180,14 +180,6 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
787 + return 0;
788 + }
789 +
790 +-static int tpiu_remove(struct amba_device *adev)
791 +-{
792 +- struct tpiu_drvdata *drvdata = amba_get_drvdata(adev);
793 +-
794 +- coresight_unregister(drvdata->csdev);
795 +- return 0;
796 +-}
797 +-
798 + #ifdef CONFIG_PM
799 + static int tpiu_runtime_suspend(struct device *dev)
800 + {
801 +@@ -231,9 +223,9 @@ static struct amba_driver tpiu_driver = {
802 + .name = "coresight-tpiu",
803 + .owner = THIS_MODULE,
804 + .pm = &tpiu_dev_pm_ops,
805 ++ .suppress_bind_attrs = true,
806 + },
807 + .probe = tpiu_probe,
808 +- .remove = tpiu_remove,
809 + .id_table = tpiu_ids,
810 + };
811 +
812 +diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
813 +index 902ee6efd09c..c6aea4795d0b 100644
814 +--- a/drivers/hwtracing/coresight/coresight.c
815 ++++ b/drivers/hwtracing/coresight/coresight.c
816 +@@ -484,6 +484,8 @@ static void coresight_device_release(struct device *dev)
817 + {
818 + struct coresight_device *csdev = to_coresight_device(dev);
819 +
820 ++ kfree(csdev->conns);
821 ++ kfree(csdev->refcnt);
822 + kfree(csdev);
823 + }
824 +
825 +@@ -571,6 +573,8 @@ static void coresight_fixup_device_conns(struct coresight_device *csdev)
826 +
827 + if (dev) {
828 + conn->child_dev = to_coresight_device(dev);
829 ++ /* and put reference from 'bus_find_device()' */
830 ++ put_device(dev);
831 + } else {
832 + csdev->orphan = true;
833 + conn->child_dev = NULL;
834 +@@ -578,6 +582,50 @@ static void coresight_fixup_device_conns(struct coresight_device *csdev)
835 + }
836 + }
837 +
838 ++static int coresight_remove_match(struct device *dev, void *data)
839 ++{
840 ++ int i;
841 ++ struct coresight_device *csdev, *iterator;
842 ++ struct coresight_connection *conn;
843 ++
844 ++ csdev = data;
845 ++ iterator = to_coresight_device(dev);
846 ++
847 ++ /* No need to check oneself */
848 ++ if (csdev == iterator)
849 ++ return 0;
850 ++
851 ++ /*
852 ++ * Circle throuch all the connection of that component. If we find
853 ++ * a connection whose name matches @csdev, remove it.
854 ++ */
855 ++ for (i = 0; i < iterator->nr_outport; i++) {
856 ++ conn = &iterator->conns[i];
857 ++
858 ++ if (conn->child_dev == NULL)
859 ++ continue;
860 ++
861 ++ if (!strcmp(dev_name(&csdev->dev), conn->child_name)) {
862 ++ iterator->orphan = true;
863 ++ conn->child_dev = NULL;
864 ++ /* No need to continue */
865 ++ break;
866 ++ }
867 ++ }
868 ++
869 ++ /*
870 ++ * Returning '0' ensures that all known component on the
871 ++ * bus will be checked.
872 ++ */
873 ++ return 0;
874 ++}
875 ++
876 ++static void coresight_remove_conns(struct coresight_device *csdev)
877 ++{
878 ++ bus_for_each_dev(&coresight_bustype, NULL,
879 ++ csdev, coresight_remove_match);
880 ++}
881 ++
882 + /**
883 + * coresight_timeout - loop until a bit has changed to a specific state.
884 + * @addr: base address of the area of interest.
885 +@@ -716,12 +764,9 @@ EXPORT_SYMBOL_GPL(coresight_register);
886 +
887 + void coresight_unregister(struct coresight_device *csdev)
888 + {
889 +- mutex_lock(&coresight_mutex);
890 +-
891 +- kfree(csdev->conns);
892 ++ /* Remove references of that device in the topology */
893 ++ coresight_remove_conns(csdev);
894 + device_unregister(&csdev->dev);
895 +-
896 +- mutex_unlock(&coresight_mutex);
897 + }
898 + EXPORT_SYMBOL_GPL(coresight_unregister);
899 +
900 +diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
901 +index 7d2bb1549608..fb7597b1c66f 100644
902 +--- a/drivers/hwtracing/coresight/of_coresight.c
903 ++++ b/drivers/hwtracing/coresight/of_coresight.c
904 +@@ -86,7 +86,7 @@ static int of_coresight_alloc_memory(struct device *dev,
905 + return -ENOMEM;
906 +
907 + /* Children connected to this component via @outports */
908 +- pdata->child_names = devm_kzalloc(dev, pdata->nr_outport *
909 ++ pdata->child_names = devm_kzalloc(dev, pdata->nr_outport *
910 + sizeof(*pdata->child_names),
911 + GFP_KERNEL);
912 + if (!pdata->child_names)
913 +diff --git a/drivers/hwtracing/stm/Kconfig b/drivers/hwtracing/stm/Kconfig
914 +index e7a348807f0c..e0ac75395526 100644
915 +--- a/drivers/hwtracing/stm/Kconfig
916 ++++ b/drivers/hwtracing/stm/Kconfig
917 +@@ -9,6 +9,8 @@ config STM
918 +
919 + Say Y here to enable System Trace Module device support.
920 +
921 ++if STM
922 ++
923 + config STM_DUMMY
924 + tristate "Dummy STM driver"
925 + help
926 +@@ -25,3 +27,5 @@ config STM_SOURCE_CONSOLE
927 +
928 + If you want to send kernel console messages over STM devices,
929 + say Y.
930 ++
931 ++endif
932 +diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
933 +index 92ab51aa8a74..b6cc841de79d 100644
934 +--- a/drivers/hwtracing/stm/core.c
935 ++++ b/drivers/hwtracing/stm/core.c
936 +@@ -114,6 +114,7 @@ struct stm_device *stm_find_device(const char *buf)
937 +
938 + stm = to_stm_device(dev);
939 + if (!try_module_get(stm->owner)) {
940 ++ /* matches class_find_device() above */
941 + put_device(dev);
942 + return NULL;
943 + }
944 +@@ -126,7 +127,7 @@ struct stm_device *stm_find_device(const char *buf)
945 + * @stm: stm device, previously acquired by stm_find_device()
946 + *
947 + * This drops the module reference and device reference taken by
948 +- * stm_find_device().
949 ++ * stm_find_device() or stm_char_open().
950 + */
951 + void stm_put_device(struct stm_device *stm)
952 + {
953 +@@ -186,6 +187,9 @@ static void stm_output_claim(struct stm_device *stm, struct stm_output *output)
954 + {
955 + struct stp_master *master = stm_master(stm, output->master);
956 +
957 ++ lockdep_assert_held(&stm->mc_lock);
958 ++ lockdep_assert_held(&output->lock);
959 ++
960 + if (WARN_ON_ONCE(master->nr_free < output->nr_chans))
961 + return;
962 +
963 +@@ -200,6 +204,9 @@ stm_output_disclaim(struct stm_device *stm, struct stm_output *output)
964 + {
965 + struct stp_master *master = stm_master(stm, output->master);
966 +
967 ++ lockdep_assert_held(&stm->mc_lock);
968 ++ lockdep_assert_held(&output->lock);
969 ++
970 + bitmap_release_region(&master->chan_map[0], output->channel,
971 + ilog2(output->nr_chans));
972 +
973 +@@ -292,6 +299,7 @@ static int stm_output_assign(struct stm_device *stm, unsigned int width,
974 + }
975 +
976 + spin_lock(&stm->mc_lock);
977 ++ spin_lock(&output->lock);
978 + /* output is already assigned -- shouldn't happen */
979 + if (WARN_ON_ONCE(output->nr_chans))
980 + goto unlock;
981 +@@ -308,6 +316,7 @@ static int stm_output_assign(struct stm_device *stm, unsigned int width,
982 +
983 + ret = 0;
984 + unlock:
985 ++ spin_unlock(&output->lock);
986 + spin_unlock(&stm->mc_lock);
987 +
988 + return ret;
989 +@@ -316,11 +325,18 @@ unlock:
990 + static void stm_output_free(struct stm_device *stm, struct stm_output *output)
991 + {
992 + spin_lock(&stm->mc_lock);
993 ++ spin_lock(&output->lock);
994 + if (output->nr_chans)
995 + stm_output_disclaim(stm, output);
996 ++ spin_unlock(&output->lock);
997 + spin_unlock(&stm->mc_lock);
998 + }
999 +
1000 ++static void stm_output_init(struct stm_output *output)
1001 ++{
1002 ++ spin_lock_init(&output->lock);
1003 ++}
1004 ++
1005 + static int major_match(struct device *dev, const void *data)
1006 + {
1007 + unsigned int major = *(unsigned int *)data;
1008 +@@ -343,6 +359,7 @@ static int stm_char_open(struct inode *inode, struct file *file)
1009 + if (!stmf)
1010 + return -ENOMEM;
1011 +
1012 ++ stm_output_init(&stmf->output);
1013 + stmf->stm = to_stm_device(dev);
1014 +
1015 + if (!try_module_get(stmf->stm->owner))
1016 +@@ -353,6 +370,8 @@ static int stm_char_open(struct inode *inode, struct file *file)
1017 + return nonseekable_open(inode, file);
1018 +
1019 + err_free:
1020 ++ /* matches class_find_device() above */
1021 ++ put_device(dev);
1022 + kfree(stmf);
1023 +
1024 + return err;
1025 +@@ -363,6 +382,11 @@ static int stm_char_release(struct inode *inode, struct file *file)
1026 + struct stm_file *stmf = file->private_data;
1027 +
1028 + stm_output_free(stmf->stm, &stmf->output);
1029 ++
1030 ++ /*
1031 ++ * matches the stm_char_open()'s
1032 ++ * class_find_device() + try_module_get()
1033 ++ */
1034 + stm_put_device(stmf->stm);
1035 + kfree(stmf);
1036 +
1037 +@@ -410,6 +434,9 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf,
1038 + char *kbuf;
1039 + int err;
1040 +
1041 ++ if (count + 1 > PAGE_SIZE)
1042 ++ count = PAGE_SIZE - 1;
1043 ++
1044 + /*
1045 + * if no m/c have been assigned to this writer up to this
1046 + * point, use "default" policy entry
1047 +@@ -521,10 +548,8 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
1048 + ret = stm->data->link(stm->data, stmf->output.master,
1049 + stmf->output.channel);
1050 +
1051 +- if (ret) {
1052 ++ if (ret)
1053 + stm_output_free(stmf->stm, &stmf->output);
1054 +- stm_put_device(stmf->stm);
1055 +- }
1056 +
1057 + err_free:
1058 + kfree(id);
1059 +@@ -639,17 +664,11 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
1060 + stm->dev.parent = parent;
1061 + stm->dev.release = stm_device_release;
1062 +
1063 +- err = kobject_set_name(&stm->dev.kobj, "%s", stm_data->name);
1064 +- if (err)
1065 +- goto err_device;
1066 +-
1067 +- err = device_add(&stm->dev);
1068 +- if (err)
1069 +- goto err_device;
1070 +-
1071 ++ mutex_init(&stm->link_mutex);
1072 + spin_lock_init(&stm->link_lock);
1073 + INIT_LIST_HEAD(&stm->link_list);
1074 +
1075 ++ /* initialize the object before it is accessible via sysfs */
1076 + spin_lock_init(&stm->mc_lock);
1077 + mutex_init(&stm->policy_mutex);
1078 + stm->sw_nmasters = nmasters;
1079 +@@ -657,9 +676,20 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
1080 + stm->data = stm_data;
1081 + stm_data->stm = stm;
1082 +
1083 ++ err = kobject_set_name(&stm->dev.kobj, "%s", stm_data->name);
1084 ++ if (err)
1085 ++ goto err_device;
1086 ++
1087 ++ err = device_add(&stm->dev);
1088 ++ if (err)
1089 ++ goto err_device;
1090 ++
1091 + return 0;
1092 +
1093 + err_device:
1094 ++ unregister_chrdev(stm->major, stm_data->name);
1095 ++
1096 ++ /* matches device_initialize() above */
1097 + put_device(&stm->dev);
1098 + err_free:
1099 + vfree(stm);
1100 +@@ -668,20 +698,28 @@ err_free:
1101 + }
1102 + EXPORT_SYMBOL_GPL(stm_register_device);
1103 +
1104 +-static void __stm_source_link_drop(struct stm_source_device *src,
1105 +- struct stm_device *stm);
1106 ++static int __stm_source_link_drop(struct stm_source_device *src,
1107 ++ struct stm_device *stm);
1108 +
1109 + void stm_unregister_device(struct stm_data *stm_data)
1110 + {
1111 + struct stm_device *stm = stm_data->stm;
1112 + struct stm_source_device *src, *iter;
1113 +- int i;
1114 ++ int i, ret;
1115 +
1116 +- spin_lock(&stm->link_lock);
1117 ++ mutex_lock(&stm->link_mutex);
1118 + list_for_each_entry_safe(src, iter, &stm->link_list, link_entry) {
1119 +- __stm_source_link_drop(src, stm);
1120 ++ ret = __stm_source_link_drop(src, stm);
1121 ++ /*
1122 ++ * src <-> stm link must not change under the same
1123 ++ * stm::link_mutex, so complain loudly if it has;
1124 ++ * also in this situation ret!=0 means this src is
1125 ++ * not connected to this stm and it should be otherwise
1126 ++ * safe to proceed with the tear-down of stm.
1127 ++ */
1128 ++ WARN_ON_ONCE(ret);
1129 + }
1130 +- spin_unlock(&stm->link_lock);
1131 ++ mutex_unlock(&stm->link_mutex);
1132 +
1133 + synchronize_srcu(&stm_source_srcu);
1134 +
1135 +@@ -700,6 +738,17 @@ void stm_unregister_device(struct stm_data *stm_data)
1136 + }
1137 + EXPORT_SYMBOL_GPL(stm_unregister_device);
1138 +
1139 ++/*
1140 ++ * stm::link_list access serialization uses a spinlock and a mutex; holding
1141 ++ * either of them guarantees that the list is stable; modification requires
1142 ++ * holding both of them.
1143 ++ *
1144 ++ * Lock ordering is as follows:
1145 ++ * stm::link_mutex
1146 ++ * stm::link_lock
1147 ++ * src::link_lock
1148 ++ */
1149 ++
1150 + /**
1151 + * stm_source_link_add() - connect an stm_source device to an stm device
1152 + * @src: stm_source device
1153 +@@ -716,6 +765,7 @@ static int stm_source_link_add(struct stm_source_device *src,
1154 + char *id;
1155 + int err;
1156 +
1157 ++ mutex_lock(&stm->link_mutex);
1158 + spin_lock(&stm->link_lock);
1159 + spin_lock(&src->link_lock);
1160 +
1161 +@@ -725,6 +775,7 @@ static int stm_source_link_add(struct stm_source_device *src,
1162 +
1163 + spin_unlock(&src->link_lock);
1164 + spin_unlock(&stm->link_lock);
1165 ++ mutex_unlock(&stm->link_mutex);
1166 +
1167 + id = kstrdup(src->data->name, GFP_KERNEL);
1168 + if (id) {
1169 +@@ -759,9 +810,9 @@ static int stm_source_link_add(struct stm_source_device *src,
1170 +
1171 + fail_free_output:
1172 + stm_output_free(stm, &src->output);
1173 +- stm_put_device(stm);
1174 +
1175 + fail_detach:
1176 ++ mutex_lock(&stm->link_mutex);
1177 + spin_lock(&stm->link_lock);
1178 + spin_lock(&src->link_lock);
1179 +
1180 +@@ -770,6 +821,7 @@ fail_detach:
1181 +
1182 + spin_unlock(&src->link_lock);
1183 + spin_unlock(&stm->link_lock);
1184 ++ mutex_unlock(&stm->link_mutex);
1185 +
1186 + return err;
1187 + }
1188 +@@ -782,28 +834,45 @@ fail_detach:
1189 + * If @stm is @src::link, disconnect them from one another and put the
1190 + * reference on the @stm device.
1191 + *
1192 +- * Caller must hold stm::link_lock.
1193 ++ * Caller must hold stm::link_mutex.
1194 + */
1195 +-static void __stm_source_link_drop(struct stm_source_device *src,
1196 +- struct stm_device *stm)
1197 ++static int __stm_source_link_drop(struct stm_source_device *src,
1198 ++ struct stm_device *stm)
1199 + {
1200 + struct stm_device *link;
1201 ++ int ret = 0;
1202 +
1203 ++ lockdep_assert_held(&stm->link_mutex);
1204 ++
1205 ++ /* for stm::link_list modification, we hold both mutex and spinlock */
1206 ++ spin_lock(&stm->link_lock);
1207 + spin_lock(&src->link_lock);
1208 + link = srcu_dereference_check(src->link, &stm_source_srcu, 1);
1209 +- if (WARN_ON_ONCE(link != stm)) {
1210 +- spin_unlock(&src->link_lock);
1211 +- return;
1212 ++
1213 ++ /*
1214 ++ * The linked device may have changed since we last looked, because
1215 ++ * we weren't holding the src::link_lock back then; if this is the
1216 ++ * case, tell the caller to retry.
1217 ++ */
1218 ++ if (link != stm) {
1219 ++ ret = -EAGAIN;
1220 ++ goto unlock;
1221 + }
1222 +
1223 + stm_output_free(link, &src->output);
1224 +- /* caller must hold stm::link_lock */
1225 + list_del_init(&src->link_entry);
1226 + /* matches stm_find_device() from stm_source_link_store() */
1227 + stm_put_device(link);
1228 + rcu_assign_pointer(src->link, NULL);
1229 +
1230 ++unlock:
1231 + spin_unlock(&src->link_lock);
1232 ++ spin_unlock(&stm->link_lock);
1233 ++
1234 ++ if (!ret && src->data->unlink)
1235 ++ src->data->unlink(src->data);
1236 ++
1237 ++ return ret;
1238 + }
1239 +
1240 + /**
1241 +@@ -819,21 +888,29 @@ static void __stm_source_link_drop(struct stm_source_device *src,
1242 + static void stm_source_link_drop(struct stm_source_device *src)
1243 + {
1244 + struct stm_device *stm;
1245 +- int idx;
1246 ++ int idx, ret;
1247 +
1248 ++retry:
1249 + idx = srcu_read_lock(&stm_source_srcu);
1250 ++ /*
1251 ++ * The stm device will be valid for the duration of this
1252 ++ * read section, but the link may change before we grab
1253 ++ * the src::link_lock in __stm_source_link_drop().
1254 ++ */
1255 + stm = srcu_dereference(src->link, &stm_source_srcu);
1256 +
1257 ++ ret = 0;
1258 + if (stm) {
1259 +- if (src->data->unlink)
1260 +- src->data->unlink(src->data);
1261 +-
1262 +- spin_lock(&stm->link_lock);
1263 +- __stm_source_link_drop(src, stm);
1264 +- spin_unlock(&stm->link_lock);
1265 ++ mutex_lock(&stm->link_mutex);
1266 ++ ret = __stm_source_link_drop(src, stm);
1267 ++ mutex_unlock(&stm->link_mutex);
1268 + }
1269 +
1270 + srcu_read_unlock(&stm_source_srcu, idx);
1271 ++
1272 ++ /* if it did change, retry */
1273 ++ if (ret == -EAGAIN)
1274 ++ goto retry;
1275 + }
1276 +
1277 + static ssize_t stm_source_link_show(struct device *dev,
1278 +@@ -868,8 +945,10 @@ static ssize_t stm_source_link_store(struct device *dev,
1279 + return -EINVAL;
1280 +
1281 + err = stm_source_link_add(src, link);
1282 +- if (err)
1283 ++ if (err) {
1284 ++ /* matches the stm_find_device() above */
1285 + stm_put_device(link);
1286 ++ }
1287 +
1288 + return err ? : count;
1289 + }
1290 +@@ -931,6 +1010,7 @@ int stm_source_register_device(struct device *parent,
1291 + if (err)
1292 + goto err;
1293 +
1294 ++ stm_output_init(&src->output);
1295 + spin_lock_init(&src->link_lock);
1296 + INIT_LIST_HEAD(&src->link_entry);
1297 + src->data = data;
1298 +diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c
1299 +index 11ab6d01adf6..1db189657b2b 100644
1300 +--- a/drivers/hwtracing/stm/policy.c
1301 ++++ b/drivers/hwtracing/stm/policy.c
1302 +@@ -272,13 +272,17 @@ void stp_policy_unbind(struct stp_policy *policy)
1303 + {
1304 + struct stm_device *stm = policy->stm;
1305 +
1306 ++ /*
1307 ++ * stp_policy_release() will not call here if the policy is already
1308 ++ * unbound; other users should not either, as no link exists between
1309 ++ * this policy and anything else in that case
1310 ++ */
1311 + if (WARN_ON_ONCE(!policy->stm))
1312 + return;
1313 +
1314 +- mutex_lock(&stm->policy_mutex);
1315 +- stm->policy = NULL;
1316 +- mutex_unlock(&stm->policy_mutex);
1317 ++ lockdep_assert_held(&stm->policy_mutex);
1318 +
1319 ++ stm->policy = NULL;
1320 + policy->stm = NULL;
1321 +
1322 + stm_put_device(stm);
1323 +@@ -287,8 +291,16 @@ void stp_policy_unbind(struct stp_policy *policy)
1324 + static void stp_policy_release(struct config_item *item)
1325 + {
1326 + struct stp_policy *policy = to_stp_policy(item);
1327 ++ struct stm_device *stm = policy->stm;
1328 +
1329 ++ /* a policy *can* be unbound and still exist in configfs tree */
1330 ++ if (!stm)
1331 ++ return;
1332 ++
1333 ++ mutex_lock(&stm->policy_mutex);
1334 + stp_policy_unbind(policy);
1335 ++ mutex_unlock(&stm->policy_mutex);
1336 ++
1337 + kfree(policy);
1338 + }
1339 +
1340 +@@ -320,10 +332,11 @@ stp_policies_make(struct config_group *group, const char *name)
1341 +
1342 + /*
1343 + * node must look like <device_name>.<policy_name>, where
1344 +- * <device_name> is the name of an existing stm device and
1345 +- * <policy_name> is an arbitrary string
1346 ++ * <device_name> is the name of an existing stm device; may
1347 ++ * contain dots;
1348 ++ * <policy_name> is an arbitrary string; may not contain dots
1349 + */
1350 +- p = strchr(devname, '.');
1351 ++ p = strrchr(devname, '.');
1352 + if (!p) {
1353 + kfree(devname);
1354 + return ERR_PTR(-EINVAL);
1355 +diff --git a/drivers/hwtracing/stm/stm.h b/drivers/hwtracing/stm/stm.h
1356 +index 95ece0292c99..4e8c6926260f 100644
1357 +--- a/drivers/hwtracing/stm/stm.h
1358 ++++ b/drivers/hwtracing/stm/stm.h
1359 +@@ -45,6 +45,7 @@ struct stm_device {
1360 + int major;
1361 + unsigned int sw_nmasters;
1362 + struct stm_data *data;
1363 ++ struct mutex link_mutex;
1364 + spinlock_t link_lock;
1365 + struct list_head link_list;
1366 + /* master allocation */
1367 +@@ -56,6 +57,7 @@ struct stm_device {
1368 + container_of((_d), struct stm_device, dev)
1369 +
1370 + struct stm_output {
1371 ++ spinlock_t lock;
1372 + unsigned int master;
1373 + unsigned int channel;
1374 + unsigned int nr_chans;
1375 +diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
1376 +index 28543d795188..9a27809bdaf2 100644
1377 +--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
1378 ++++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
1379 +@@ -4370,7 +4370,8 @@ setup_pci(struct hfc_multi *hc, struct pci_dev *pdev,
1380 + if (m->clock2)
1381 + test_and_set_bit(HFC_CHIP_CLOCK2, &hc->chip);
1382 +
1383 +- if (ent->device == 0xB410) {
1384 ++ if (ent->vendor == PCI_VENDOR_ID_DIGIUM &&
1385 ++ ent->device == PCI_DEVICE_ID_DIGIUM_HFC4S) {
1386 + test_and_set_bit(HFC_CHIP_B410P, &hc->chip);
1387 + test_and_set_bit(HFC_CHIP_PCM_MASTER, &hc->chip);
1388 + test_and_clear_bit(HFC_CHIP_PCM_SLAVE, &hc->chip);
1389 +diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
1390 +index 618e4e2b4207..fea09a33c6c8 100644
1391 +--- a/drivers/media/usb/uvc/uvc_ctrl.c
1392 ++++ b/drivers/media/usb/uvc/uvc_ctrl.c
1393 +@@ -1202,7 +1202,7 @@ static void uvc_ctrl_fill_event(struct uvc_video_chain *chain,
1394 +
1395 + __uvc_query_v4l2_ctrl(chain, ctrl, mapping, &v4l2_ctrl);
1396 +
1397 +- memset(ev->reserved, 0, sizeof(ev->reserved));
1398 ++ memset(ev, 0, sizeof(*ev));
1399 + ev->type = V4L2_EVENT_CTRL;
1400 + ev->id = v4l2_ctrl.id;
1401 + ev->u.ctrl.value = value;
1402 +diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
1403 +index 523758e71fe6..70097cc3a35d 100644
1404 +--- a/drivers/media/v4l2-core/v4l2-ctrls.c
1405 ++++ b/drivers/media/v4l2-core/v4l2-ctrls.c
1406 +@@ -1212,7 +1212,7 @@ static u32 user_flags(const struct v4l2_ctrl *ctrl)
1407 +
1408 + static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes)
1409 + {
1410 +- memset(ev->reserved, 0, sizeof(ev->reserved));
1411 ++ memset(ev, 0, sizeof(*ev));
1412 + ev->type = V4L2_EVENT_CTRL;
1413 + ev->id = ctrl->id;
1414 + ev->u.ctrl.changes = changes;
1415 +diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
1416 +index f2b733275a0a..f600bdcaf5b4 100644
1417 +--- a/drivers/mmc/card/block.c
1418 ++++ b/drivers/mmc/card/block.c
1419 +@@ -47,13 +47,10 @@
1420 + #include "queue.h"
1421 +
1422 + MODULE_ALIAS("mmc:block");
1423 +-
1424 +-#ifdef KERNEL
1425 + #ifdef MODULE_PARAM_PREFIX
1426 + #undef MODULE_PARAM_PREFIX
1427 + #endif
1428 + #define MODULE_PARAM_PREFIX "mmcblk."
1429 +-#endif
1430 +
1431 + #define INAND_CMD38_ARG_EXT_CSD 113
1432 + #define INAND_CMD38_ARG_ERASE 0x00
1433 +@@ -171,11 +168,7 @@ static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
1434 +
1435 + static inline int mmc_get_devidx(struct gendisk *disk)
1436 + {
1437 +- int devmaj = MAJOR(disk_devt(disk));
1438 +- int devidx = MINOR(disk_devt(disk)) / perdev_minors;
1439 +-
1440 +- if (!devmaj)
1441 +- devidx = disk->first_minor / perdev_minors;
1442 ++ int devidx = disk->first_minor / perdev_minors;
1443 + return devidx;
1444 + }
1445 +
1446 +@@ -2252,6 +2245,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
1447 + md->disk->queue = md->queue.queue;
1448 + md->disk->driverfs_dev = parent;
1449 + set_disk_ro(md->disk, md->read_only || default_ro);
1450 ++ md->disk->flags = GENHD_FL_EXT_DEVT;
1451 + if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
1452 + md->disk->flags |= GENHD_FL_NO_PART_SCAN;
1453 +
1454 +diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
1455 +index 299a83f1ad38..df074f8c7cb7 100644
1456 +--- a/drivers/mmc/core/core.c
1457 ++++ b/drivers/mmc/core/core.c
1458 +@@ -1039,7 +1039,7 @@ static inline void mmc_set_ios(struct mmc_host *host)
1459 + "width %u timing %u\n",
1460 + mmc_hostname(host), ios->clock, ios->bus_mode,
1461 + ios->power_mode, ios->chip_select, ios->vdd,
1462 +- ios->bus_width, ios->timing);
1463 ++ 1 << ios->bus_width, ios->timing);
1464 +
1465 + host->ops->set_ios(host, ios);
1466 + }
1467 +@@ -1220,8 +1220,12 @@ int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
1468 +
1469 + voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
1470 + num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
1471 +- if (!voltage_ranges || !num_ranges) {
1472 +- pr_info("%s: voltage-ranges unspecified\n", np->full_name);
1473 ++ if (!voltage_ranges) {
1474 ++ pr_debug("%s: voltage-ranges unspecified\n", np->full_name);
1475 ++ return -EINVAL;
1476 ++ }
1477 ++ if (!num_ranges) {
1478 ++ pr_err("%s: voltage-ranges empty\n", np->full_name);
1479 + return -EINVAL;
1480 + }
1481 +
1482 +diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
1483 +index 154aced0b91b..705586dcd9fa 100644
1484 +--- a/drivers/mmc/core/debugfs.c
1485 ++++ b/drivers/mmc/core/debugfs.c
1486 +@@ -220,7 +220,7 @@ static int mmc_clock_opt_set(void *data, u64 val)
1487 + struct mmc_host *host = data;
1488 +
1489 + /* We need this check due to input value is u64 */
1490 +- if (val > host->f_max)
1491 ++ if (val != 0 && (val > host->f_max || val < host->f_min))
1492 + return -EINVAL;
1493 +
1494 + mmc_claim_host(host);
1495 +diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
1496 +index a31789be0840..7844baecf306 100644
1497 +--- a/drivers/mmc/core/mmc.c
1498 ++++ b/drivers/mmc/core/mmc.c
1499 +@@ -508,7 +508,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
1500 + card->ext_csd.raw_bkops_status =
1501 + ext_csd[EXT_CSD_BKOPS_STATUS];
1502 + if (!card->ext_csd.man_bkops_en)
1503 +- pr_info("%s: MAN_BKOPS_EN bit is not set\n",
1504 ++ pr_debug("%s: MAN_BKOPS_EN bit is not set\n",
1505 + mmc_hostname(card->host));
1506 + }
1507 +
1508 +@@ -952,7 +952,7 @@ static int mmc_select_bus_width(struct mmc_card *card)
1509 + break;
1510 + } else {
1511 + pr_warn("%s: switch to bus width %d failed\n",
1512 +- mmc_hostname(host), ext_csd_bits[idx]);
1513 ++ mmc_hostname(host), 1 << bus_width);
1514 + }
1515 + }
1516 +
1517 +@@ -1251,10 +1251,11 @@ static int mmc_select_hs200(struct mmc_card *card)
1518 + {
1519 + struct mmc_host *host = card->host;
1520 + bool send_status = true;
1521 +- unsigned int old_timing;
1522 ++ unsigned int old_timing, old_signal_voltage;
1523 + int err = -EINVAL;
1524 + u8 val;
1525 +
1526 ++ old_signal_voltage = host->ios.signal_voltage;
1527 + if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_2V)
1528 + err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
1529 +
1530 +@@ -1263,7 +1264,7 @@ static int mmc_select_hs200(struct mmc_card *card)
1531 +
1532 + /* If fails try again during next card power cycle */
1533 + if (err)
1534 +- goto err;
1535 ++ return err;
1536 +
1537 + mmc_select_driver_type(card);
1538 +
1539 +@@ -1297,9 +1298,14 @@ static int mmc_select_hs200(struct mmc_card *card)
1540 + }
1541 + }
1542 + err:
1543 +- if (err)
1544 ++ if (err) {
1545 ++ /* fall back to the old signal voltage, if fails report error */
1546 ++ if (__mmc_set_signal_voltage(host, old_signal_voltage))
1547 ++ err = -EIO;
1548 ++
1549 + pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
1550 + __func__, err);
1551 ++ }
1552 + return err;
1553 + }
1554 +
1555 +diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
1556 +index 1f444269ebbe..76b49b9772d0 100644
1557 +--- a/drivers/mmc/core/mmc_ops.c
1558 ++++ b/drivers/mmc/core/mmc_ops.c
1559 +@@ -542,7 +542,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
1560 + timeout_ms = MMC_OPS_TIMEOUT_MS;
1561 +
1562 + /* Must check status to be sure of no errors. */
1563 +- timeout = jiffies + msecs_to_jiffies(timeout_ms);
1564 ++ timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
1565 + do {
1566 + if (send_status) {
1567 + err = __mmc_send_status(card, &status, ignore_crc);
1568 +diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c
1569 +index d10538bb5e07..96f45caea109 100644
1570 +--- a/drivers/mmc/core/pwrseq_simple.c
1571 ++++ b/drivers/mmc/core/pwrseq_simple.c
1572 +@@ -29,15 +29,18 @@ struct mmc_pwrseq_simple {
1573 + static void mmc_pwrseq_simple_set_gpios_value(struct mmc_pwrseq_simple *pwrseq,
1574 + int value)
1575 + {
1576 +- int i;
1577 + struct gpio_descs *reset_gpios = pwrseq->reset_gpios;
1578 +- int values[reset_gpios->ndescs];
1579 +
1580 +- for (i = 0; i < reset_gpios->ndescs; i++)
1581 +- values[i] = value;
1582 ++ if (!IS_ERR(reset_gpios)) {
1583 ++ int i;
1584 ++ int values[reset_gpios->ndescs];
1585 +
1586 +- gpiod_set_array_value_cansleep(reset_gpios->ndescs, reset_gpios->desc,
1587 +- values);
1588 ++ for (i = 0; i < reset_gpios->ndescs; i++)
1589 ++ values[i] = value;
1590 ++
1591 ++ gpiod_set_array_value_cansleep(
1592 ++ reset_gpios->ndescs, reset_gpios->desc, values);
1593 ++ }
1594 + }
1595 +
1596 + static void mmc_pwrseq_simple_pre_power_on(struct mmc_host *host)
1597 +@@ -79,7 +82,8 @@ static void mmc_pwrseq_simple_free(struct mmc_host *host)
1598 + struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
1599 + struct mmc_pwrseq_simple, pwrseq);
1600 +
1601 +- gpiod_put_array(pwrseq->reset_gpios);
1602 ++ if (!IS_ERR(pwrseq->reset_gpios))
1603 ++ gpiod_put_array(pwrseq->reset_gpios);
1604 +
1605 + if (!IS_ERR(pwrseq->ext_clk))
1606 + clk_put(pwrseq->ext_clk);
1607 +@@ -112,7 +116,9 @@ struct mmc_pwrseq *mmc_pwrseq_simple_alloc(struct mmc_host *host,
1608 + }
1609 +
1610 + pwrseq->reset_gpios = gpiod_get_array(dev, "reset", GPIOD_OUT_HIGH);
1611 +- if (IS_ERR(pwrseq->reset_gpios)) {
1612 ++ if (IS_ERR(pwrseq->reset_gpios) &&
1613 ++ PTR_ERR(pwrseq->reset_gpios) != -ENOENT &&
1614 ++ PTR_ERR(pwrseq->reset_gpios) != -ENOSYS) {
1615 + ret = PTR_ERR(pwrseq->reset_gpios);
1616 + goto clk_put;
1617 + }
1618 +diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
1619 +index 72bbb12fb938..1d57c12b191c 100644
1620 +--- a/drivers/mmc/host/pxamci.c
1621 ++++ b/drivers/mmc/host/pxamci.c
1622 +@@ -181,7 +181,7 @@ static void pxamci_dma_irq(void *param);
1623 + static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
1624 + {
1625 + struct dma_async_tx_descriptor *tx;
1626 +- enum dma_data_direction direction;
1627 ++ enum dma_transfer_direction direction;
1628 + struct dma_slave_config config;
1629 + struct dma_chan *chan;
1630 + unsigned int nob = data->blocks;
1631 +diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
1632 +index a10fde40b6c3..3c7c3a1c8f4f 100644
1633 +--- a/drivers/mmc/host/tmio_mmc_pio.c
1634 ++++ b/drivers/mmc/host/tmio_mmc_pio.c
1635 +@@ -716,7 +716,7 @@ irqreturn_t tmio_mmc_sdio_irq(int irq, void *devid)
1636 + unsigned int sdio_status;
1637 +
1638 + if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
1639 +- return IRQ_HANDLED;
1640 ++ return IRQ_NONE;
1641 +
1642 + status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
1643 + ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdcard_irq_mask;
1644 +@@ -730,7 +730,7 @@ irqreturn_t tmio_mmc_sdio_irq(int irq, void *devid)
1645 + if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
1646 + mmc_signal_sdio_irq(mmc);
1647 +
1648 +- return IRQ_HANDLED;
1649 ++ return IRQ_RETVAL(ireg);
1650 + }
1651 + EXPORT_SYMBOL(tmio_mmc_sdio_irq);
1652 +
1653 +@@ -747,9 +747,7 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid)
1654 + if (__tmio_mmc_sdcard_irq(host, ireg, status))
1655 + return IRQ_HANDLED;
1656 +
1657 +- tmio_mmc_sdio_irq(irq, devid);
1658 +-
1659 +- return IRQ_HANDLED;
1660 ++ return tmio_mmc_sdio_irq(irq, devid);
1661 + }
1662 + EXPORT_SYMBOL(tmio_mmc_irq);
1663 +
1664 +diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
1665 +index b9283901136e..0fdc9ad32a2e 100644
1666 +--- a/drivers/net/ethernet/8390/mac8390.c
1667 ++++ b/drivers/net/ethernet/8390/mac8390.c
1668 +@@ -156,8 +156,6 @@ static void dayna_block_output(struct net_device *dev, int count,
1669 + #define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
1670 + #define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
1671 +
1672 +-#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c))
1673 +-
1674 + /* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
1675 + static void slow_sane_get_8390_hdr(struct net_device *dev,
1676 + struct e8390_pkt_hdr *hdr, int ring_page);
1677 +@@ -237,19 +235,26 @@ static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
1678 +
1679 + static enum mac8390_access __init mac8390_testio(volatile unsigned long membase)
1680 + {
1681 +- unsigned long outdata = 0xA5A0B5B0;
1682 +- unsigned long indata = 0x00000000;
1683 ++ u32 outdata = 0xA5A0B5B0;
1684 ++ u32 indata = 0;
1685 ++
1686 + /* Try writing 32 bits */
1687 +- memcpy_toio(membase, &outdata, 4);
1688 +- /* Now compare them */
1689 +- if (memcmp_withio(&outdata, membase, 4) == 0)
1690 ++ nubus_writel(outdata, membase);
1691 ++ /* Now read it back */
1692 ++ indata = nubus_readl(membase);
1693 ++ if (outdata == indata)
1694 + return ACCESS_32;
1695 ++
1696 ++ outdata = 0xC5C0D5D0;
1697 ++ indata = 0;
1698 ++
1699 + /* Write 16 bit output */
1700 + word_memcpy_tocard(membase, &outdata, 4);
1701 + /* Now read it back */
1702 + word_memcpy_fromcard(&indata, membase, 4);
1703 + if (outdata == indata)
1704 + return ACCESS_16;
1705 ++
1706 + return ACCESS_UNKNOWN;
1707 + }
1708 +
1709 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1710 +index 7bba30f24135..059113dce6e0 100644
1711 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1712 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1713 +@@ -2529,6 +2529,20 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1714 + return ret;
1715 + }
1716 +
1717 ++static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
1718 ++{
1719 ++ struct stmmac_priv *priv = netdev_priv(ndev);
1720 ++ int ret = 0;
1721 ++
1722 ++ ret = eth_mac_addr(ndev, addr);
1723 ++ if (ret)
1724 ++ return ret;
1725 ++
1726 ++ priv->hw->mac->set_umac_addr(priv->hw, ndev->dev_addr, 0);
1727 ++
1728 ++ return ret;
1729 ++}
1730 ++
1731 + #ifdef CONFIG_DEBUG_FS
1732 + static struct dentry *stmmac_fs_dir;
1733 +
1734 +@@ -2730,7 +2744,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
1735 + #ifdef CONFIG_NET_POLL_CONTROLLER
1736 + .ndo_poll_controller = stmmac_poll_controller,
1737 + #endif
1738 +- .ndo_set_mac_address = eth_mac_addr,
1739 ++ .ndo_set_mac_address = stmmac_set_mac_address,
1740 + };
1741 +
1742 + /**
1743 +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
1744 +index 5dadfc508ade..835129152fc4 100644
1745 +--- a/drivers/net/vxlan.c
1746 ++++ b/drivers/net/vxlan.c
1747 +@@ -3276,10 +3276,8 @@ static void __net_exit vxlan_exit_net(struct net *net)
1748 + /* If vxlan->dev is in the same netns, it has already been added
1749 + * to the list by the previous loop.
1750 + */
1751 +- if (!net_eq(dev_net(vxlan->dev), net)) {
1752 +- gro_cells_destroy(&vxlan->gro_cells);
1753 ++ if (!net_eq(dev_net(vxlan->dev), net))
1754 + unregister_netdevice_queue(vxlan->dev, &list);
1755 +- }
1756 + }
1757 +
1758 + unregister_netdevice_many(&list);
1759 +diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
1760 +index f201e50447d8..b867875aa6e6 100644
1761 +--- a/drivers/net/wireless/ath/ath10k/wmi.c
1762 ++++ b/drivers/net/wireless/ath/ath10k/wmi.c
1763 +@@ -4065,7 +4065,7 @@ static void ath10k_tpc_config_disp_tables(struct ath10k *ar,
1764 + rate_code[i],
1765 + type);
1766 + snprintf(buff, sizeof(buff), "%8d ", tpc[j]);
1767 +- strncat(tpc_value, buff, strlen(buff));
1768 ++ strlcat(tpc_value, buff, sizeof(tpc_value));
1769 + }
1770 + tpc_stats->tpc_table[type].pream_idx[i] = pream_idx;
1771 + tpc_stats->tpc_table[type].rate_code[i] = rate_code[i];
1772 +diff --git a/drivers/rtc/rtc-lib.c b/drivers/rtc/rtc-lib.c
1773 +index e6bfb9c42a10..5b136bdc03d4 100644
1774 +--- a/drivers/rtc/rtc-lib.c
1775 ++++ b/drivers/rtc/rtc-lib.c
1776 +@@ -52,13 +52,11 @@ EXPORT_SYMBOL(rtc_year_days);
1777 + */
1778 + void rtc_time64_to_tm(time64_t time, struct rtc_time *tm)
1779 + {
1780 +- unsigned int month, year;
1781 +- unsigned long secs;
1782 ++ unsigned int month, year, secs;
1783 + int days;
1784 +
1785 + /* time must be positive */
1786 +- days = div_s64(time, 86400);
1787 +- secs = time - (unsigned int) days * 86400;
1788 ++ days = div_s64_rem(time, 86400, &secs);
1789 +
1790 + /* day of the week, 1970-01-01 was a Thursday */
1791 + tm->tm_wday = (days + 4) % 7;
1792 +diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
1793 +index 2abcd331b05d..abe460eac712 100644
1794 +--- a/drivers/s390/scsi/zfcp_erp.c
1795 ++++ b/drivers/s390/scsi/zfcp_erp.c
1796 +@@ -652,6 +652,20 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
1797 + add_timer(&erp_action->timer);
1798 + }
1799 +
1800 ++void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
1801 ++ int clear, char *dbftag)
1802 ++{
1803 ++ unsigned long flags;
1804 ++ struct zfcp_port *port;
1805 ++
1806 ++ write_lock_irqsave(&adapter->erp_lock, flags);
1807 ++ read_lock(&adapter->port_list_lock);
1808 ++ list_for_each_entry(port, &adapter->port_list, list)
1809 ++ _zfcp_erp_port_forced_reopen(port, clear, dbftag);
1810 ++ read_unlock(&adapter->port_list_lock);
1811 ++ write_unlock_irqrestore(&adapter->erp_lock, flags);
1812 ++}
1813 ++
1814 + static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
1815 + int clear, char *id)
1816 + {
1817 +@@ -1306,6 +1320,9 @@ static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
1818 + struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
1819 + int lun_status;
1820 +
1821 ++ if (sdev->sdev_state == SDEV_DEL ||
1822 ++ sdev->sdev_state == SDEV_CANCEL)
1823 ++ continue;
1824 + if (zsdev->port != port)
1825 + continue;
1826 + /* LUN under port of interest */
1827 +diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
1828 +index b326f05c7f89..a39a74500e23 100644
1829 +--- a/drivers/s390/scsi/zfcp_ext.h
1830 ++++ b/drivers/s390/scsi/zfcp_ext.h
1831 +@@ -68,6 +68,8 @@ extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32);
1832 + extern int zfcp_erp_port_reopen(struct zfcp_port *, int, char *);
1833 + extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
1834 + extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
1835 ++extern void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
1836 ++ int clear, char *dbftag);
1837 + extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
1838 + extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
1839 + extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *);
1840 +diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
1841 +index 3afb200b2829..bdb257eaa2e5 100644
1842 +--- a/drivers/s390/scsi/zfcp_scsi.c
1843 ++++ b/drivers/s390/scsi/zfcp_scsi.c
1844 +@@ -326,6 +326,10 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
1845 + struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
1846 + int ret = SUCCESS, fc_ret;
1847 +
1848 ++ if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) {
1849 ++ zfcp_erp_port_forced_reopen_all(adapter, 0, "schrh_p");
1850 ++ zfcp_erp_wait(adapter);
1851 ++ }
1852 + zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
1853 + zfcp_erp_wait(adapter);
1854 + fc_ret = fc_block_scsi_eh(scpnt);
1855 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
1856 +index ec80a0077ace..62adaca8fb97 100644
1857 +--- a/drivers/scsi/sd.c
1858 ++++ b/drivers/scsi/sd.c
1859 +@@ -1276,11 +1276,6 @@ static void sd_release(struct gendisk *disk, fmode_t mode)
1860 + scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
1861 + }
1862 +
1863 +- /*
1864 +- * XXX and what if there are packets in flight and this close()
1865 +- * XXX is followed by a "rmmod sd_mod"?
1866 +- */
1867 +-
1868 + scsi_disk_put(sdkp);
1869 + }
1870 +
1871 +@@ -3227,11 +3222,23 @@ static void scsi_disk_release(struct device *dev)
1872 + {
1873 + struct scsi_disk *sdkp = to_scsi_disk(dev);
1874 + struct gendisk *disk = sdkp->disk;
1875 +-
1876 ++ struct request_queue *q = disk->queue;
1877 ++
1878 + spin_lock(&sd_index_lock);
1879 + ida_remove(&sd_index_ida, sdkp->index);
1880 + spin_unlock(&sd_index_lock);
1881 +
1882 ++ /*
1883 ++ * Wait until all requests that are in progress have completed.
1884 ++ * This is necessary to avoid that e.g. scsi_end_request() crashes
1885 ++ * due to clearing the disk->private_data pointer. Wait from inside
1886 ++ * scsi_disk_release() instead of from sd_release() to avoid that
1887 ++ * freezing and unfreezing the request queue affects user space I/O
1888 ++ * in case multiple processes open a /dev/sd... node concurrently.
1889 ++ */
1890 ++ blk_mq_freeze_queue(q);
1891 ++ blk_mq_unfreeze_queue(q);
1892 ++
1893 + disk->private_data = NULL;
1894 + put_disk(disk);
1895 + put_device(&sdkp->device->sdev_gendev);
1896 +diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
1897 +index e9c74c41aece..b4c425383f99 100644
1898 +--- a/drivers/staging/android/ashmem.c
1899 ++++ b/drivers/staging/android/ashmem.c
1900 +@@ -447,7 +447,9 @@ ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1901 + if (!(sc->gfp_mask & __GFP_FS))
1902 + return SHRINK_STOP;
1903 +
1904 +- mutex_lock(&ashmem_mutex);
1905 ++ if (!mutex_trylock(&ashmem_mutex))
1906 ++ return -1;
1907 ++
1908 + list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
1909 + loff_t start = range->pgstart * PAGE_SIZE;
1910 + loff_t end = (range->pgend + 1) * PAGE_SIZE;
1911 +diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c
1912 +index 9156d8238c97..e702ce6461fc 100644
1913 +--- a/drivers/staging/android/ion/ion_carveout_heap.c
1914 ++++ b/drivers/staging/android/ion/ion_carveout_heap.c
1915 +@@ -167,7 +167,7 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
1916 + if (!carveout_heap)
1917 + return ERR_PTR(-ENOMEM);
1918 +
1919 +- carveout_heap->pool = gen_pool_create(12, -1);
1920 ++ carveout_heap->pool = gen_pool_create(PAGE_SHIFT, -1);
1921 + if (!carveout_heap->pool) {
1922 + kfree(carveout_heap);
1923 + return ERR_PTR(-ENOMEM);
1924 +diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
1925 +index f83e00c78051..50a9945da27e 100644
1926 +--- a/drivers/staging/android/sync.c
1927 ++++ b/drivers/staging/android/sync.c
1928 +@@ -519,12 +519,10 @@ static const struct fence_ops android_fence_ops = {
1929 + static void sync_fence_free(struct kref *kref)
1930 + {
1931 + struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
1932 +- int i, status = atomic_read(&fence->status);
1933 ++ int i;
1934 +
1935 + for (i = 0; i < fence->num_fences; ++i) {
1936 +- if (status)
1937 +- fence_remove_callback(fence->cbs[i].sync_pt,
1938 +- &fence->cbs[i].cb);
1939 ++ fence_remove_callback(fence->cbs[i].sync_pt, &fence->cbs[i].cb);
1940 + fence_put(fence->cbs[i].sync_pt);
1941 + }
1942 +
1943 +diff --git a/drivers/staging/android/uapi/ashmem.h b/drivers/staging/android/uapi/ashmem.h
1944 +index ba4743c71d6b..13df42d200b7 100644
1945 +--- a/drivers/staging/android/uapi/ashmem.h
1946 ++++ b/drivers/staging/android/uapi/ashmem.h
1947 +@@ -13,6 +13,7 @@
1948 + #define _UAPI_LINUX_ASHMEM_H
1949 +
1950 + #include <linux/ioctl.h>
1951 ++#include <linux/types.h>
1952 +
1953 + #define ASHMEM_NAME_LEN 256
1954 +
1955 +diff --git a/drivers/staging/goldfish/goldfish_audio.c b/drivers/staging/goldfish/goldfish_audio.c
1956 +index b0927e49d0a8..6ca288bf4059 100644
1957 +--- a/drivers/staging/goldfish/goldfish_audio.c
1958 ++++ b/drivers/staging/goldfish/goldfish_audio.c
1959 +@@ -26,6 +26,7 @@
1960 + #include <linux/sched.h>
1961 + #include <linux/dma-mapping.h>
1962 + #include <linux/uaccess.h>
1963 ++#include <linux/slab.h>
1964 + #include <linux/goldfish.h>
1965 +
1966 + MODULE_AUTHOR("Google, Inc.");
1967 +diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
1968 +index 8fd8f3a2d1bf..58b6403458b7 100644
1969 +--- a/drivers/staging/vt6655/device_main.c
1970 ++++ b/drivers/staging/vt6655/device_main.c
1971 +@@ -972,8 +972,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
1972 + return;
1973 + }
1974 +
1975 +- MACvIntDisable(priv->PortOffset);
1976 +-
1977 + spin_lock_irqsave(&priv->lock, flags);
1978 +
1979 + /* Read low level stats */
1980 +@@ -1062,8 +1060,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
1981 + }
1982 +
1983 + spin_unlock_irqrestore(&priv->lock, flags);
1984 +-
1985 +- MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
1986 + }
1987 +
1988 + static void vnt_interrupt_work(struct work_struct *work)
1989 +@@ -1073,14 +1069,17 @@ static void vnt_interrupt_work(struct work_struct *work)
1990 +
1991 + if (priv->vif)
1992 + vnt_interrupt_process(priv);
1993 ++
1994 ++ MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
1995 + }
1996 +
1997 + static irqreturn_t vnt_interrupt(int irq, void *arg)
1998 + {
1999 + struct vnt_private *priv = arg;
2000 +
2001 +- if (priv->vif)
2002 +- schedule_work(&priv->interrupt_work);
2003 ++ schedule_work(&priv->interrupt_work);
2004 ++
2005 ++ MACvIntDisable(priv->PortOffset);
2006 +
2007 + return IRQ_HANDLED;
2008 + }
2009 +diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
2010 +index e0277cf0bf58..f5c4e92b5172 100644
2011 +--- a/drivers/tty/serial/atmel_serial.c
2012 ++++ b/drivers/tty/serial/atmel_serial.c
2013 +@@ -1167,6 +1167,10 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
2014 + sg_dma_len(&atmel_port->sg_rx)/2,
2015 + DMA_DEV_TO_MEM,
2016 + DMA_PREP_INTERRUPT);
2017 ++ if (!desc) {
2018 ++ dev_err(port->dev, "Preparing DMA cyclic failed\n");
2019 ++ goto chan_err;
2020 ++ }
2021 + desc->callback = atmel_complete_rx_dma;
2022 + desc->callback_param = port;
2023 + atmel_port->desc_rx = desc;
2024 +diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
2025 +index f2b0d8cee8ef..0314e78e31ff 100644
2026 +--- a/drivers/tty/serial/kgdboc.c
2027 ++++ b/drivers/tty/serial/kgdboc.c
2028 +@@ -148,8 +148,10 @@ static int configure_kgdboc(void)
2029 + char *cptr = config;
2030 + struct console *cons;
2031 +
2032 +- if (!strlen(config) || isspace(config[0]))
2033 ++ if (!strlen(config) || isspace(config[0])) {
2034 ++ err = 0;
2035 + goto noconfig;
2036 ++ }
2037 +
2038 + kgdboc_io_ops.is_console = 0;
2039 + kgdb_tty_driver = NULL;
2040 +diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
2041 +index d45133056f51..be55fb6def89 100644
2042 +--- a/drivers/tty/serial/max310x.c
2043 ++++ b/drivers/tty/serial/max310x.c
2044 +@@ -1306,6 +1306,8 @@ static int max310x_spi_probe(struct spi_device *spi)
2045 + if (spi->dev.of_node) {
2046 + const struct of_device_id *of_id =
2047 + of_match_device(max310x_dt_ids, &spi->dev);
2048 ++ if (!of_id)
2049 ++ return -ENODEV;
2050 +
2051 + devtype = (struct max310x_devtype *)of_id->data;
2052 + } else {
2053 +diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
2054 +index b63920481b1d..669134e27ed9 100644
2055 +--- a/drivers/tty/serial/sh-sci.c
2056 ++++ b/drivers/tty/serial/sh-sci.c
2057 +@@ -746,19 +746,9 @@ static void sci_transmit_chars(struct uart_port *port)
2058 +
2059 + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
2060 + uart_write_wakeup(port);
2061 +- if (uart_circ_empty(xmit)) {
2062 ++ if (uart_circ_empty(xmit))
2063 + sci_stop_tx(port);
2064 +- } else {
2065 +- ctrl = serial_port_in(port, SCSCR);
2066 +-
2067 +- if (port->type != PORT_SCI) {
2068 +- serial_port_in(port, SCxSR); /* Dummy read */
2069 +- sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port));
2070 +- }
2071 +
2072 +- ctrl |= SCSCR_TIE;
2073 +- serial_port_out(port, SCSCR, ctrl);
2074 +- }
2075 + }
2076 +
2077 + /* On SH3, SCIF may read end-of-break as a space->mark char */
2078 +diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
2079 +index 1e302caaa450..c894eca57e73 100644
2080 +--- a/drivers/tty/serial/sprd_serial.c
2081 ++++ b/drivers/tty/serial/sprd_serial.c
2082 +@@ -36,7 +36,7 @@
2083 + #define SPRD_FIFO_SIZE 128
2084 + #define SPRD_DEF_RATE 26000000
2085 + #define SPRD_BAUD_IO_LIMIT 3000000
2086 +-#define SPRD_TIMEOUT 256
2087 ++#define SPRD_TIMEOUT 256000
2088 +
2089 + /* the offset of serial registers and BITs for them */
2090 + /* data registers */
2091 +@@ -63,6 +63,7 @@
2092 +
2093 + /* interrupt clear register */
2094 + #define SPRD_ICLR 0x0014
2095 ++#define SPRD_ICLR_TIMEOUT BIT(13)
2096 +
2097 + /* line control register */
2098 + #define SPRD_LCR 0x0018
2099 +@@ -298,7 +299,8 @@ static irqreturn_t sprd_handle_irq(int irq, void *dev_id)
2100 + return IRQ_NONE;
2101 + }
2102 +
2103 +- serial_out(port, SPRD_ICLR, ~0);
2104 ++ if (ims & SPRD_IMSR_TIMEOUT)
2105 ++ serial_out(port, SPRD_ICLR, SPRD_ICLR_TIMEOUT);
2106 +
2107 + if (ims & (SPRD_IMSR_RX_FIFO_FULL |
2108 + SPRD_IMSR_BREAK_DETECT | SPRD_IMSR_TIMEOUT))
2109 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
2110 +index 557f08adf644..5e015631413c 100644
2111 +--- a/drivers/usb/dwc3/gadget.c
2112 ++++ b/drivers/usb/dwc3/gadget.c
2113 +@@ -2894,6 +2894,9 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
2114 +
2115 + int dwc3_gadget_suspend(struct dwc3 *dwc)
2116 + {
2117 ++ if (!dwc->gadget_driver)
2118 ++ return 0;
2119 ++
2120 + if (dwc->pullups_connected) {
2121 + dwc3_gadget_disable_irq(dwc);
2122 + dwc3_gadget_run_stop(dwc, true, true);
2123 +@@ -2912,6 +2915,9 @@ int dwc3_gadget_resume(struct dwc3 *dwc)
2124 + struct dwc3_ep *dep;
2125 + int ret;
2126 +
2127 ++ if (!dwc->gadget_driver)
2128 ++ return 0;
2129 ++
2130 + /* Start with SuperSpeed Default */
2131 + dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2132 +
2133 +diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
2134 +index 58f5fbdb6959..8bf54477f472 100644
2135 +--- a/drivers/usb/gadget/composite.c
2136 ++++ b/drivers/usb/gadget/composite.c
2137 +@@ -1819,6 +1819,8 @@ unknown:
2138 + break;
2139 +
2140 + case USB_RECIP_ENDPOINT:
2141 ++ if (!cdev->config)
2142 ++ break;
2143 + endp = ((w_index & 0x80) >> 3) | (w_index & 0x0f);
2144 + list_for_each_entry(f, &cdev->config->functions, list) {
2145 + if (test_bit(endp, f->endpoints))
2146 +diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
2147 +index 6abb6a10ee82..d412e234f336 100644
2148 +--- a/drivers/usb/gadget/configfs.c
2149 ++++ b/drivers/usb/gadget/configfs.c
2150 +@@ -1496,7 +1496,9 @@ void unregister_gadget_item(struct config_item *item)
2151 + {
2152 + struct gadget_info *gi = to_gadget_info(item);
2153 +
2154 ++ mutex_lock(&gi->lock);
2155 + unregister_gadget(gi);
2156 ++ mutex_unlock(&gi->lock);
2157 + }
2158 + EXPORT_SYMBOL_GPL(unregister_gadget_item);
2159 +
2160 +diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
2161 +index 70d3917cc003..2582db38d6a6 100644
2162 +--- a/drivers/usb/gadget/function/rndis.c
2163 ++++ b/drivers/usb/gadget/function/rndis.c
2164 +@@ -680,6 +680,12 @@ static int rndis_reset_response(struct rndis_params *params,
2165 + {
2166 + rndis_reset_cmplt_type *resp;
2167 + rndis_resp_t *r;
2168 ++ u8 *xbuf;
2169 ++ u32 length;
2170 ++
2171 ++ /* drain the response queue */
2172 ++ while ((xbuf = rndis_get_next_response(params, &length)))
2173 ++ rndis_free_response(params, xbuf);
2174 +
2175 + r = rndis_add_response(params, sizeof(rndis_reset_cmplt_type));
2176 + if (!r)
2177 +diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
2178 +index 4ea44f7122ee..d73618475664 100644
2179 +--- a/drivers/usb/gadget/function/u_serial.c
2180 ++++ b/drivers/usb/gadget/function/u_serial.c
2181 +@@ -361,10 +361,15 @@ __acquires(&port->port_lock)
2182 + */
2183 + {
2184 + struct list_head *pool = &port->write_pool;
2185 +- struct usb_ep *in = port->port_usb->in;
2186 ++ struct usb_ep *in;
2187 + int status = 0;
2188 + bool do_tty_wake = false;
2189 +
2190 ++ if (!port->port_usb)
2191 ++ return status;
2192 ++
2193 ++ in = port->port_usb->in;
2194 ++
2195 + while (!port->write_busy && !list_empty(pool)) {
2196 + struct usb_request *req;
2197 + int len;
2198 +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
2199 +index 845fa426fa0d..80192698df87 100644
2200 +--- a/drivers/usb/host/xhci-ring.c
2201 ++++ b/drivers/usb/host/xhci-ring.c
2202 +@@ -1642,10 +1642,13 @@ static void handle_port_status(struct xhci_hcd *xhci,
2203 + }
2204 + }
2205 +
2206 +- if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 &&
2207 +- DEV_SUPERSPEED_ANY(temp)) {
2208 ++ if ((temp & PORT_PLC) &&
2209 ++ DEV_SUPERSPEED_ANY(temp) &&
2210 ++ ((temp & PORT_PLS_MASK) == XDEV_U0 ||
2211 ++ (temp & PORT_PLS_MASK) == XDEV_U1 ||
2212 ++ (temp & PORT_PLS_MASK) == XDEV_U2)) {
2213 + xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
2214 +- /* We've just brought the device into U0 through either the
2215 ++ /* We've just brought the device into U0/1/2 through either the
2216 + * Resume state after a device remote wakeup, or through the
2217 + * U3Exit state after a host-initiated resume. If it's a device
2218 + * initiated remote wake, don't pass up the link state change,
2219 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
2220 +index 0635cea42e6f..b57bee70cdef 100644
2221 +--- a/drivers/usb/host/xhci.h
2222 ++++ b/drivers/usb/host/xhci.h
2223 +@@ -309,6 +309,7 @@ struct xhci_op_regs {
2224 + */
2225 + #define PORT_PLS_MASK (0xf << 5)
2226 + #define XDEV_U0 (0x0 << 5)
2227 ++#define XDEV_U1 (0x1 << 5)
2228 + #define XDEV_U2 (0x2 << 5)
2229 + #define XDEV_U3 (0x3 << 5)
2230 + #define XDEV_INACTIVE (0x6 << 5)
2231 +diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
2232 +index 8647d2c2a8c4..c5553028e616 100644
2233 +--- a/drivers/usb/renesas_usbhs/mod_gadget.c
2234 ++++ b/drivers/usb/renesas_usbhs/mod_gadget.c
2235 +@@ -641,14 +641,11 @@ static int usbhsg_ep_disable(struct usb_ep *ep)
2236 + struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
2237 + struct usbhs_pipe *pipe;
2238 + unsigned long flags;
2239 +- int ret = 0;
2240 +
2241 + spin_lock_irqsave(&uep->lock, flags);
2242 + pipe = usbhsg_uep_to_pipe(uep);
2243 +- if (!pipe) {
2244 +- ret = -EINVAL;
2245 ++ if (!pipe)
2246 + goto out;
2247 +- }
2248 +
2249 + usbhsg_pipe_disable(uep);
2250 + usbhs_pipe_free(pipe);
2251 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
2252 +index b317594a6342..e3ea0fdd3913 100644
2253 +--- a/drivers/usb/serial/cp210x.c
2254 ++++ b/drivers/usb/serial/cp210x.c
2255 +@@ -76,6 +76,7 @@ static const struct usb_device_id id_table[] = {
2256 + { USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */
2257 + { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */
2258 + { USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */
2259 ++ { USB_DEVICE(0x10C4, 0x8056) }, /* Lorenz Messtechnik devices */
2260 + { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
2261 + { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
2262 + { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
2263 +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
2264 +index 4287e2b1c175..af258bb632dd 100644
2265 +--- a/drivers/usb/serial/ftdi_sio.c
2266 ++++ b/drivers/usb/serial/ftdi_sio.c
2267 +@@ -604,6 +604,8 @@ static const struct usb_device_id id_table_combined[] = {
2268 + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
2269 + { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
2270 + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
2271 ++ { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLX_PLUS_PID) },
2272 ++ { USB_DEVICE(FTDI_VID, FTDI_NT_ORION_IO_PID) },
2273 + { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
2274 + { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
2275 + { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
2276 +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
2277 +index ddf5ab983dc9..15d220eaf6e6 100644
2278 +--- a/drivers/usb/serial/ftdi_sio_ids.h
2279 ++++ b/drivers/usb/serial/ftdi_sio_ids.h
2280 +@@ -566,7 +566,9 @@
2281 + /*
2282 + * NovaTech product ids (FTDI_VID)
2283 + */
2284 +-#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
2285 ++#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
2286 ++#define FTDI_NT_ORIONLX_PLUS_PID 0x7c91 /* OrionLX+ Substation Automation Platform */
2287 ++#define FTDI_NT_ORION_IO_PID 0x7c92 /* Orion I/O */
2288 +
2289 + /*
2290 + * Synapse Wireless product ids (FTDI_VID)
2291 +diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
2292 +index 4581fa1dec98..286b43c79d38 100644
2293 +--- a/drivers/usb/serial/mos7720.c
2294 ++++ b/drivers/usb/serial/mos7720.c
2295 +@@ -368,8 +368,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
2296 + if (!urbtrack)
2297 + return -ENOMEM;
2298 +
2299 +- kref_get(&mos_parport->ref_count);
2300 +- urbtrack->mos_parport = mos_parport;
2301 + urbtrack->urb = usb_alloc_urb(0, GFP_ATOMIC);
2302 + if (!urbtrack->urb) {
2303 + kfree(urbtrack);
2304 +@@ -390,6 +388,8 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
2305 + usb_sndctrlpipe(usbdev, 0),
2306 + (unsigned char *)urbtrack->setup,
2307 + NULL, 0, async_complete, urbtrack);
2308 ++ kref_get(&mos_parport->ref_count);
2309 ++ urbtrack->mos_parport = mos_parport;
2310 + kref_init(&urbtrack->ref_count);
2311 + INIT_LIST_HEAD(&urbtrack->urblist_entry);
2312 +
2313 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
2314 +index b2b7c12e5c86..9f96dd274370 100644
2315 +--- a/drivers/usb/serial/option.c
2316 ++++ b/drivers/usb/serial/option.c
2317 +@@ -1066,7 +1066,8 @@ static const struct usb_device_id option_ids[] = {
2318 + .driver_info = RSVD(3) },
2319 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
2320 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
2321 +- { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
2322 ++ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000), /* SIMCom SIM5218 */
2323 ++ .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | NCTRL(3) | RSVD(4) },
2324 + /* Quectel products using Qualcomm vendor ID */
2325 + { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
2326 + { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
2327 +@@ -1941,10 +1942,12 @@ static const struct usb_device_id option_ids[] = {
2328 + .driver_info = RSVD(4) },
2329 + { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
2330 + .driver_info = RSVD(4) },
2331 +- { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
2332 +- { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
2333 +- { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
2334 +- { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
2335 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
2336 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
2337 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
2338 ++ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */
2339 ++ .driver_info = RSVD(4) },
2340 ++ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
2341 + { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
2342 + { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
2343 + { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
2344 +diff --git a/drivers/video/fbdev/goldfishfb.c b/drivers/video/fbdev/goldfishfb.c
2345 +index 14a93cb21310..66d58e93bc32 100644
2346 +--- a/drivers/video/fbdev/goldfishfb.c
2347 ++++ b/drivers/video/fbdev/goldfishfb.c
2348 +@@ -234,7 +234,7 @@ static int goldfish_fb_probe(struct platform_device *pdev)
2349 + fb->fb.var.activate = FB_ACTIVATE_NOW;
2350 + fb->fb.var.height = readl(fb->reg_base + FB_GET_PHYS_HEIGHT);
2351 + fb->fb.var.width = readl(fb->reg_base + FB_GET_PHYS_WIDTH);
2352 +- fb->fb.var.pixclock = 10000;
2353 ++ fb->fb.var.pixclock = 0;
2354 +
2355 + fb->fb.var.red.offset = 11;
2356 + fb->fb.var.red.length = 5;
2357 +diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
2358 +index b9fa99577bf7..2d2a76906786 100644
2359 +--- a/fs/btrfs/raid56.c
2360 ++++ b/fs/btrfs/raid56.c
2361 +@@ -2420,8 +2420,9 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2362 + bitmap_clear(rbio->dbitmap, pagenr, 1);
2363 + kunmap(p);
2364 +
2365 +- for (stripe = 0; stripe < rbio->real_stripes; stripe++)
2366 ++ for (stripe = 0; stripe < nr_data; stripe++)
2367 + kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2368 ++ kunmap(p_page);
2369 + }
2370 +
2371 + __free_page(p_page);
2372 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
2373 +index 63f59f17c97e..c7190f322576 100644
2374 +--- a/fs/btrfs/tree-log.c
2375 ++++ b/fs/btrfs/tree-log.c
2376 +@@ -3321,9 +3321,16 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
2377 + }
2378 + btrfs_release_path(path);
2379 +
2380 +- /* find the first key from this transaction again */
2381 ++ /*
2382 ++ * Find the first key from this transaction again. See the note for
2383 ++ * log_new_dir_dentries, if we're logging a directory recursively we
2384 ++ * won't be holding its i_mutex, which means we can modify the directory
2385 ++ * while we're logging it. If we remove an entry between our first
2386 ++ * search and this search we'll not find the key again and can just
2387 ++ * bail.
2388 ++ */
2389 + ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
2390 +- if (WARN_ON(ret != 0))
2391 ++ if (ret != 0)
2392 + goto done;
2393 +
2394 + /*
2395 +diff --git a/fs/dcache.c b/fs/dcache.c
2396 +index 9ffe60702299..cb554e406545 100644
2397 +--- a/fs/dcache.c
2398 ++++ b/fs/dcache.c
2399 +@@ -1510,7 +1510,7 @@ static void check_and_drop(void *_data)
2400 + {
2401 + struct detach_data *data = _data;
2402 +
2403 +- if (!data->mountpoint && !data->select.found)
2404 ++ if (!data->mountpoint && list_empty(&data->select.dispose))
2405 + __d_drop(data->select.start);
2406 + }
2407 +
2408 +@@ -1552,17 +1552,15 @@ void d_invalidate(struct dentry *dentry)
2409 +
2410 + d_walk(dentry, &data, detach_and_collect, check_and_drop);
2411 +
2412 +- if (data.select.found)
2413 ++ if (!list_empty(&data.select.dispose))
2414 + shrink_dentry_list(&data.select.dispose);
2415 ++ else if (!data.mountpoint)
2416 ++ return;
2417 +
2418 + if (data.mountpoint) {
2419 + detach_mounts(data.mountpoint);
2420 + dput(data.mountpoint);
2421 + }
2422 +-
2423 +- if (!data.mountpoint && !data.select.found)
2424 +- break;
2425 +-
2426 + cond_resched();
2427 + }
2428 + }
2429 +diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
2430 +index f817ed58f5ad..b40e75dbf48c 100644
2431 +--- a/fs/ext4/ext4_jbd2.h
2432 ++++ b/fs/ext4/ext4_jbd2.h
2433 +@@ -372,7 +372,7 @@ static inline void ext4_update_inode_fsync_trans(handle_t *handle,
2434 + {
2435 + struct ext4_inode_info *ei = EXT4_I(inode);
2436 +
2437 +- if (ext4_handle_valid(handle)) {
2438 ++ if (ext4_handle_valid(handle) && !is_handle_aborted(handle)) {
2439 + ei->i_sync_tid = handle->h_transaction->t_tid;
2440 + if (datasync)
2441 + ei->i_datasync_tid = handle->h_transaction->t_tid;
2442 +diff --git a/fs/ext4/file.c b/fs/ext4/file.c
2443 +index debf0707789d..2e5ae183a18a 100644
2444 +--- a/fs/ext4/file.c
2445 ++++ b/fs/ext4/file.c
2446 +@@ -79,7 +79,7 @@ ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
2447 + struct super_block *sb = inode->i_sb;
2448 + int blockmask = sb->s_blocksize - 1;
2449 +
2450 +- if (pos >= i_size_read(inode))
2451 ++ if (pos >= ALIGN(i_size_read(inode), sb->s_blocksize))
2452 + return 0;
2453 +
2454 + if ((pos | iov_iter_alignment(from)) & blockmask)
2455 +diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
2456 +index 355ef9c36c87..8f3e78eb0bbd 100644
2457 +--- a/fs/ext4/indirect.c
2458 ++++ b/fs/ext4/indirect.c
2459 +@@ -1491,10 +1491,14 @@ end_range:
2460 + partial->p + 1,
2461 + partial2->p,
2462 + (chain+n-1) - partial);
2463 +- BUFFER_TRACE(partial->bh, "call brelse");
2464 +- brelse(partial->bh);
2465 +- BUFFER_TRACE(partial2->bh, "call brelse");
2466 +- brelse(partial2->bh);
2467 ++ while (partial > chain) {
2468 ++ BUFFER_TRACE(partial->bh, "call brelse");
2469 ++ brelse(partial->bh);
2470 ++ }
2471 ++ while (partial2 > chain2) {
2472 ++ BUFFER_TRACE(partial2->bh, "call brelse");
2473 ++ brelse(partial2->bh);
2474 ++ }
2475 + return 0;
2476 + }
2477 +
2478 +diff --git a/fs/inode.c b/fs/inode.c
2479 +index a39c2724d8a0..b5c3a6473aaa 100644
2480 +--- a/fs/inode.c
2481 ++++ b/fs/inode.c
2482 +@@ -154,6 +154,12 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
2483 + inode->i_rdev = 0;
2484 + inode->dirtied_when = 0;
2485 +
2486 ++#ifdef CONFIG_CGROUP_WRITEBACK
2487 ++ inode->i_wb_frn_winner = 0;
2488 ++ inode->i_wb_frn_avg_time = 0;
2489 ++ inode->i_wb_frn_history = 0;
2490 ++#endif
2491 ++
2492 + if (security_inode_alloc(inode))
2493 + goto out;
2494 + spin_lock_init(&inode->i_lock);
2495 +diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
2496 +index 5e1054f028af..c7e32a891502 100644
2497 +--- a/fs/proc/proc_sysctl.c
2498 ++++ b/fs/proc/proc_sysctl.c
2499 +@@ -1550,7 +1550,8 @@ static void drop_sysctl_table(struct ctl_table_header *header)
2500 + if (--header->nreg)
2501 + return;
2502 +
2503 +- put_links(header);
2504 ++ if (parent)
2505 ++ put_links(header);
2506 + start_unregistering(header);
2507 + if (!--header->count)
2508 + kfree_rcu(header, rcu);
2509 +diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c
2510 +index 42b8c57795cb..c6ce7503a329 100644
2511 +--- a/fs/udf/truncate.c
2512 ++++ b/fs/udf/truncate.c
2513 +@@ -260,6 +260,9 @@ void udf_truncate_extents(struct inode *inode)
2514 + epos.block = eloc;
2515 + epos.bh = udf_tread(sb,
2516 + udf_get_lb_pblock(sb, &eloc, 0));
2517 ++ /* Error reading indirect block? */
2518 ++ if (!epos.bh)
2519 ++ return;
2520 + if (elen)
2521 + indirect_ext_len =
2522 + (elen + sb->s_blocksize - 1) >>
2523 +diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h
2524 +index 1cbb8338edf3..827e4d3bbc7a 100644
2525 +--- a/include/asm-generic/fixmap.h
2526 ++++ b/include/asm-generic/fixmap.h
2527 +@@ -70,12 +70,12 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
2528 + #endif
2529 +
2530 + /* Return a pointer with offset calculated */
2531 +-#define __set_fixmap_offset(idx, phys, flags) \
2532 +-({ \
2533 +- unsigned long addr; \
2534 +- __set_fixmap(idx, phys, flags); \
2535 +- addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \
2536 +- addr; \
2537 ++#define __set_fixmap_offset(idx, phys, flags) \
2538 ++({ \
2539 ++ unsigned long ________addr; \
2540 ++ __set_fixmap(idx, phys, flags); \
2541 ++ ________addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \
2542 ++ ________addr; \
2543 + })
2544 +
2545 + #define set_fixmap_offset(idx, phys) \
2546 +diff --git a/include/linux/rculist.h b/include/linux/rculist.h
2547 +index 5ed540986019..a579240c64e9 100644
2548 +--- a/include/linux/rculist.h
2549 ++++ b/include/linux/rculist.h
2550 +@@ -401,6 +401,42 @@ static inline void hlist_add_head_rcu(struct hlist_node *n,
2551 + first->pprev = &n->next;
2552 + }
2553 +
2554 ++/**
2555 ++ * hlist_add_tail_rcu
2556 ++ * @n: the element to add to the hash list.
2557 ++ * @h: the list to add to.
2558 ++ *
2559 ++ * Description:
2560 ++ * Adds the specified element to the specified hlist,
2561 ++ * while permitting racing traversals.
2562 ++ *
2563 ++ * The caller must take whatever precautions are necessary
2564 ++ * (such as holding appropriate locks) to avoid racing
2565 ++ * with another list-mutation primitive, such as hlist_add_head_rcu()
2566 ++ * or hlist_del_rcu(), running on this same list.
2567 ++ * However, it is perfectly legal to run concurrently with
2568 ++ * the _rcu list-traversal primitives, such as
2569 ++ * hlist_for_each_entry_rcu(), used to prevent memory-consistency
2570 ++ * problems on Alpha CPUs. Regardless of the type of CPU, the
2571 ++ * list-traversal primitive must be guarded by rcu_read_lock().
2572 ++ */
2573 ++static inline void hlist_add_tail_rcu(struct hlist_node *n,
2574 ++ struct hlist_head *h)
2575 ++{
2576 ++ struct hlist_node *i, *last = NULL;
2577 ++
2578 ++ for (i = hlist_first_rcu(h); i; i = hlist_next_rcu(i))
2579 ++ last = i;
2580 ++
2581 ++ if (last) {
2582 ++ n->next = last->next;
2583 ++ n->pprev = &last->next;
2584 ++ rcu_assign_pointer(hlist_next_rcu(last), n);
2585 ++ } else {
2586 ++ hlist_add_head_rcu(n, h);
2587 ++ }
2588 ++}
2589 ++
2590 + /**
2591 + * hlist_add_before_rcu
2592 + * @n: the new element to add to the hash list.
2593 +diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
2594 +index 3e5d9075960f..73fae8c4a5fb 100644
2595 +--- a/include/linux/vmstat.h
2596 ++++ b/include/linux/vmstat.h
2597 +@@ -189,6 +189,7 @@ extern void __inc_zone_state(struct zone *, enum zone_stat_item);
2598 + extern void dec_zone_state(struct zone *, enum zone_stat_item);
2599 + extern void __dec_zone_state(struct zone *, enum zone_stat_item);
2600 +
2601 ++void quiet_vmstat(void);
2602 + void cpu_vm_stats_fold(int cpu);
2603 + void refresh_zone_stat_thresholds(void);
2604 +
2605 +@@ -249,6 +250,7 @@ static inline void __dec_zone_page_state(struct page *page,
2606 +
2607 + static inline void refresh_zone_stat_thresholds(void) { }
2608 + static inline void cpu_vm_stats_fold(int cpu) { }
2609 ++static inline void quiet_vmstat(void) { }
2610 +
2611 + static inline void drain_zonestat(struct zone *zone,
2612 + struct per_cpu_pageset *pset) { }
2613 +diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
2614 +index 49dcad4fe99e..72599bbc8255 100644
2615 +--- a/include/net/inet_connection_sock.h
2616 ++++ b/include/net/inet_connection_sock.h
2617 +@@ -289,11 +289,6 @@ static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
2618 + return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue);
2619 + }
2620 +
2621 +-static inline int inet_csk_reqsk_queue_young(const struct sock *sk)
2622 +-{
2623 +- return reqsk_queue_len_young(&inet_csk(sk)->icsk_accept_queue);
2624 +-}
2625 +-
2626 + static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
2627 + {
2628 + return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog;
2629 +diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
2630 +index 4a5b9a306c69..803fc26ef0ba 100644
2631 +--- a/include/net/sctp/checksum.h
2632 ++++ b/include/net/sctp/checksum.h
2633 +@@ -60,7 +60,7 @@ static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2,
2634 + static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
2635 + unsigned int offset)
2636 + {
2637 +- struct sctphdr *sh = sctp_hdr(skb);
2638 ++ struct sctphdr *sh = (struct sctphdr *)(skb->data + offset);
2639 + __le32 ret, old = sh->checksum;
2640 + const struct skb_checksum_ops ops = {
2641 + .update = sctp_csum_update,
2642 +diff --git a/include/net/sock.h b/include/net/sock.h
2643 +index 7420299c31f5..0aadd3b03ced 100644
2644 +--- a/include/net/sock.h
2645 ++++ b/include/net/sock.h
2646 +@@ -651,6 +651,12 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
2647 + hlist_add_head_rcu(&sk->sk_node, list);
2648 + }
2649 +
2650 ++static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list)
2651 ++{
2652 ++ sock_hold(sk);
2653 ++ hlist_add_tail_rcu(&sk->sk_node, list);
2654 ++}
2655 ++
2656 + static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
2657 + {
2658 + hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
2659 +diff --git a/kernel/cpu.c b/kernel/cpu.c
2660 +index 40d20bf5de28..42ce0b0ae5c5 100644
2661 +--- a/kernel/cpu.c
2662 ++++ b/kernel/cpu.c
2663 +@@ -183,10 +183,17 @@ void cpu_hotplug_disable(void)
2664 + }
2665 + EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
2666 +
2667 ++static void __cpu_hotplug_enable(void)
2668 ++{
2669 ++ if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
2670 ++ return;
2671 ++ cpu_hotplug_disabled--;
2672 ++}
2673 ++
2674 + void cpu_hotplug_enable(void)
2675 + {
2676 + cpu_maps_update_begin();
2677 +- WARN_ON(--cpu_hotplug_disabled < 0);
2678 ++ __cpu_hotplug_enable();
2679 + cpu_maps_update_done();
2680 + }
2681 + EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
2682 +@@ -626,7 +633,7 @@ void enable_nonboot_cpus(void)
2683 +
2684 + /* Allow everyone to use the CPU hotplug again */
2685 + cpu_maps_update_begin();
2686 +- WARN_ON(--cpu_hotplug_disabled < 0);
2687 ++ __cpu_hotplug_enable();
2688 + if (cpumask_empty(frozen_cpus))
2689 + goto out;
2690 +
2691 +diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
2692 +index 358bb53c1e74..7324d83d6bd8 100644
2693 +--- a/kernel/events/ring_buffer.c
2694 ++++ b/kernel/events/ring_buffer.c
2695 +@@ -288,6 +288,13 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
2696 + if (!rb_has_aux(rb) || !atomic_inc_not_zero(&rb->aux_refcount))
2697 + goto err;
2698 +
2699 ++ /*
2700 ++ * If rb::aux_mmap_count is zero (and rb_has_aux() above went through),
2701 ++ * the aux buffer is in perf_mmap_close(), about to get freed.
2702 ++ */
2703 ++ if (!atomic_read(&rb->aux_mmap_count))
2704 ++ goto err;
2705 ++
2706 + /*
2707 + * Nesting is not supported for AUX area, make sure nested
2708 + * writers are caught early
2709 +@@ -468,6 +475,25 @@ static void rb_free_aux_page(struct ring_buffer *rb, int idx)
2710 + __free_page(page);
2711 + }
2712 +
2713 ++static void __rb_free_aux(struct ring_buffer *rb)
2714 ++{
2715 ++ int pg;
2716 ++
2717 ++ if (rb->aux_priv) {
2718 ++ rb->free_aux(rb->aux_priv);
2719 ++ rb->free_aux = NULL;
2720 ++ rb->aux_priv = NULL;
2721 ++ }
2722 ++
2723 ++ if (rb->aux_nr_pages) {
2724 ++ for (pg = 0; pg < rb->aux_nr_pages; pg++)
2725 ++ rb_free_aux_page(rb, pg);
2726 ++
2727 ++ kfree(rb->aux_pages);
2728 ++ rb->aux_nr_pages = 0;
2729 ++ }
2730 ++}
2731 ++
2732 + int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
2733 + pgoff_t pgoff, int nr_pages, long watermark, int flags)
2734 + {
2735 +@@ -556,30 +582,11 @@ out:
2736 + if (!ret)
2737 + rb->aux_pgoff = pgoff;
2738 + else
2739 +- rb_free_aux(rb);
2740 ++ __rb_free_aux(rb);
2741 +
2742 + return ret;
2743 + }
2744 +
2745 +-static void __rb_free_aux(struct ring_buffer *rb)
2746 +-{
2747 +- int pg;
2748 +-
2749 +- if (rb->aux_priv) {
2750 +- rb->free_aux(rb->aux_priv);
2751 +- rb->free_aux = NULL;
2752 +- rb->aux_priv = NULL;
2753 +- }
2754 +-
2755 +- if (rb->aux_nr_pages) {
2756 +- for (pg = 0; pg < rb->aux_nr_pages; pg++)
2757 +- rb_free_aux_page(rb, pg);
2758 +-
2759 +- kfree(rb->aux_pages);
2760 +- rb->aux_nr_pages = 0;
2761 +- }
2762 +-}
2763 +-
2764 + void rb_free_aux(struct ring_buffer *rb)
2765 + {
2766 + if (atomic_dec_and_test(&rb->aux_refcount))
2767 +diff --git a/kernel/futex.c b/kernel/futex.c
2768 +index 0c92c8d34ffa..ec9df5ba040b 100644
2769 +--- a/kernel/futex.c
2770 ++++ b/kernel/futex.c
2771 +@@ -3067,6 +3067,10 @@ int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
2772 + {
2773 + u32 uval, uninitialized_var(nval), mval;
2774 +
2775 ++ /* Futex address must be 32bit aligned */
2776 ++ if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
2777 ++ return -1;
2778 ++
2779 + retry:
2780 + if (get_user(uval, uaddr))
2781 + return -1;
2782 +diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
2783 +index 774ab79d3ec7..a49c565529a0 100644
2784 +--- a/kernel/locking/lockdep.c
2785 ++++ b/kernel/locking/lockdep.c
2786 +@@ -3314,6 +3314,9 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
2787 + unsigned int depth;
2788 + int i;
2789 +
2790 ++ if (unlikely(!debug_locks))
2791 ++ return 0;
2792 ++
2793 + depth = curr->lockdep_depth;
2794 + /*
2795 + * This function is about (re)setting the class of a held lock,
2796 +diff --git a/kernel/power/swap.c b/kernel/power/swap.c
2797 +index 12cd989dadf6..160e1006640d 100644
2798 +--- a/kernel/power/swap.c
2799 ++++ b/kernel/power/swap.c
2800 +@@ -36,6 +36,14 @@
2801 +
2802 + #define HIBERNATE_SIG "S1SUSPEND"
2803 +
2804 ++/*
2805 ++ * When reading an {un,}compressed image, we may restore pages in place,
2806 ++ * in which case some architectures need these pages cleaning before they
2807 ++ * can be executed. We don't know which pages these may be, so clean the lot.
2808 ++ */
2809 ++static bool clean_pages_on_read;
2810 ++static bool clean_pages_on_decompress;
2811 ++
2812 + /*
2813 + * The swap map is a data structure used for keeping track of each page
2814 + * written to a swap partition. It consists of many swap_map_page
2815 +@@ -241,6 +249,9 @@ static void hib_end_io(struct bio *bio)
2816 +
2817 + if (bio_data_dir(bio) == WRITE)
2818 + put_page(page);
2819 ++ else if (clean_pages_on_read)
2820 ++ flush_icache_range((unsigned long)page_address(page),
2821 ++ (unsigned long)page_address(page) + PAGE_SIZE);
2822 +
2823 + if (bio->bi_error && !hb->error)
2824 + hb->error = bio->bi_error;
2825 +@@ -1049,6 +1060,7 @@ static int load_image(struct swap_map_handle *handle,
2826 +
2827 + hib_init_batch(&hb);
2828 +
2829 ++ clean_pages_on_read = true;
2830 + printk(KERN_INFO "PM: Loading image data pages (%u pages)...\n",
2831 + nr_to_read);
2832 + m = nr_to_read / 10;
2833 +@@ -1124,6 +1136,10 @@ static int lzo_decompress_threadfn(void *data)
2834 + d->unc_len = LZO_UNC_SIZE;
2835 + d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
2836 + d->unc, &d->unc_len);
2837 ++ if (clean_pages_on_decompress)
2838 ++ flush_icache_range((unsigned long)d->unc,
2839 ++ (unsigned long)d->unc + d->unc_len);
2840 ++
2841 + atomic_set(&d->stop, 1);
2842 + wake_up(&d->done);
2843 + }
2844 +@@ -1189,6 +1205,8 @@ static int load_image_lzo(struct swap_map_handle *handle,
2845 + }
2846 + memset(crc, 0, offsetof(struct crc_data, go));
2847 +
2848 ++ clean_pages_on_decompress = true;
2849 ++
2850 + /*
2851 + * Start the decompression threads.
2852 + */
2853 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
2854 +index c2af250547bb..6051007918ad 100644
2855 +--- a/kernel/sched/fair.c
2856 ++++ b/kernel/sched/fair.c
2857 +@@ -2841,27 +2841,45 @@ dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
2858 + max_t(s64, cfs_rq->runnable_load_sum - se->avg.load_sum, 0);
2859 + }
2860 +
2861 +-/*
2862 +- * Task first catches up with cfs_rq, and then subtract
2863 +- * itself from the cfs_rq (task must be off the queue now).
2864 +- */
2865 +-void remove_entity_load_avg(struct sched_entity *se)
2866 +-{
2867 +- struct cfs_rq *cfs_rq = cfs_rq_of(se);
2868 +- u64 last_update_time;
2869 +-
2870 + #ifndef CONFIG_64BIT
2871 ++static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
2872 ++{
2873 + u64 last_update_time_copy;
2874 ++ u64 last_update_time;
2875 +
2876 + do {
2877 + last_update_time_copy = cfs_rq->load_last_update_time_copy;
2878 + smp_rmb();
2879 + last_update_time = cfs_rq->avg.last_update_time;
2880 + } while (last_update_time != last_update_time_copy);
2881 ++
2882 ++ return last_update_time;
2883 ++}
2884 + #else
2885 +- last_update_time = cfs_rq->avg.last_update_time;
2886 ++static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
2887 ++{
2888 ++ return cfs_rq->avg.last_update_time;
2889 ++}
2890 + #endif
2891 +
2892 ++/*
2893 ++ * Task first catches up with cfs_rq, and then subtract
2894 ++ * itself from the cfs_rq (task must be off the queue now).
2895 ++ */
2896 ++void remove_entity_load_avg(struct sched_entity *se)
2897 ++{
2898 ++ struct cfs_rq *cfs_rq = cfs_rq_of(se);
2899 ++ u64 last_update_time;
2900 ++
2901 ++ /*
2902 ++ * Newly created task or never used group entity should not be removed
2903 ++ * from its (source) cfs_rq
2904 ++ */
2905 ++ if (se->avg.last_update_time == 0)
2906 ++ return;
2907 ++
2908 ++ last_update_time = cfs_rq_last_update_time(cfs_rq);
2909 ++
2910 + __update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 0, NULL);
2911 + atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg);
2912 + atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg);
2913 +diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
2914 +index bfd573122e0d..306a859b36f0 100644
2915 +--- a/kernel/sched/idle.c
2916 ++++ b/kernel/sched/idle.c
2917 +@@ -219,6 +219,7 @@ static void cpu_idle_loop(void)
2918 + */
2919 +
2920 + __current_set_polling();
2921 ++ quiet_vmstat();
2922 + tick_nohz_idle_enter();
2923 +
2924 + while (!need_resched()) {
2925 +diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c
2926 +index 1ef4cc344977..1afb545a37c5 100644
2927 +--- a/lib/int_sqrt.c
2928 ++++ b/lib/int_sqrt.c
2929 +@@ -22,6 +22,9 @@ unsigned long int_sqrt(unsigned long x)
2930 + return x;
2931 +
2932 + m = 1UL << (BITS_PER_LONG - 2);
2933 ++ while (m > x)
2934 ++ m >>= 2;
2935 ++
2936 + while (m != 0) {
2937 + b = y + m;
2938 + y >>= 1;
2939 +diff --git a/mm/rmap.c b/mm/rmap.c
2940 +index 488dda209431..cf733fab230f 100644
2941 +--- a/mm/rmap.c
2942 ++++ b/mm/rmap.c
2943 +@@ -408,7 +408,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
2944 + list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
2945 + struct anon_vma *anon_vma = avc->anon_vma;
2946 +
2947 +- BUG_ON(anon_vma->degree);
2948 ++ VM_WARN_ON(anon_vma->degree);
2949 + put_anon_vma(anon_vma);
2950 +
2951 + list_del(&avc->same_vma);
2952 +diff --git a/mm/vmstat.c b/mm/vmstat.c
2953 +index a2d70ef74db7..6af9bbad94c7 100644
2954 +--- a/mm/vmstat.c
2955 ++++ b/mm/vmstat.c
2956 +@@ -460,7 +460,7 @@ static int fold_diff(int *diff)
2957 + *
2958 + * The function returns the number of global counters updated.
2959 + */
2960 +-static int refresh_cpu_vm_stats(void)
2961 ++static int refresh_cpu_vm_stats(bool do_pagesets)
2962 + {
2963 + struct zone *zone;
2964 + int i;
2965 +@@ -484,33 +484,35 @@ static int refresh_cpu_vm_stats(void)
2966 + #endif
2967 + }
2968 + }
2969 +- cond_resched();
2970 + #ifdef CONFIG_NUMA
2971 +- /*
2972 +- * Deal with draining the remote pageset of this
2973 +- * processor
2974 +- *
2975 +- * Check if there are pages remaining in this pageset
2976 +- * if not then there is nothing to expire.
2977 +- */
2978 +- if (!__this_cpu_read(p->expire) ||
2979 ++ if (do_pagesets) {
2980 ++ cond_resched();
2981 ++ /*
2982 ++ * Deal with draining the remote pageset of this
2983 ++ * processor
2984 ++ *
2985 ++ * Check if there are pages remaining in this pageset
2986 ++ * if not then there is nothing to expire.
2987 ++ */
2988 ++ if (!__this_cpu_read(p->expire) ||
2989 + !__this_cpu_read(p->pcp.count))
2990 +- continue;
2991 ++ continue;
2992 +
2993 +- /*
2994 +- * We never drain zones local to this processor.
2995 +- */
2996 +- if (zone_to_nid(zone) == numa_node_id()) {
2997 +- __this_cpu_write(p->expire, 0);
2998 +- continue;
2999 +- }
3000 ++ /*
3001 ++ * We never drain zones local to this processor.
3002 ++ */
3003 ++ if (zone_to_nid(zone) == numa_node_id()) {
3004 ++ __this_cpu_write(p->expire, 0);
3005 ++ continue;
3006 ++ }
3007 +
3008 +- if (__this_cpu_dec_return(p->expire))
3009 +- continue;
3010 ++ if (__this_cpu_dec_return(p->expire))
3011 ++ continue;
3012 +
3013 +- if (__this_cpu_read(p->pcp.count)) {
3014 +- drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
3015 +- changes++;
3016 ++ if (__this_cpu_read(p->pcp.count)) {
3017 ++ drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
3018 ++ changes++;
3019 ++ }
3020 + }
3021 + #endif
3022 + }
3023 +@@ -1393,7 +1395,7 @@ static cpumask_var_t cpu_stat_off;
3024 +
3025 + static void vmstat_update(struct work_struct *w)
3026 + {
3027 +- if (refresh_cpu_vm_stats()) {
3028 ++ if (refresh_cpu_vm_stats(true)) {
3029 + /*
3030 + * Counters were updated so we expect more updates
3031 + * to occur in the future. Keep on running the
3032 +@@ -1424,6 +1426,23 @@ static void vmstat_update(struct work_struct *w)
3033 + }
3034 + }
3035 +
3036 ++/*
3037 ++ * Switch off vmstat processing and then fold all the remaining differentials
3038 ++ * until the diffs stay at zero. The function is used by NOHZ and can only be
3039 ++ * invoked when tick processing is not active.
3040 ++ */
3041 ++void quiet_vmstat(void)
3042 ++{
3043 ++ if (system_state != SYSTEM_RUNNING)
3044 ++ return;
3045 ++
3046 ++ do {
3047 ++ if (!cpumask_test_and_set_cpu(smp_processor_id(), cpu_stat_off))
3048 ++ cancel_delayed_work(this_cpu_ptr(&vmstat_work));
3049 ++
3050 ++ } while (refresh_cpu_vm_stats(false));
3051 ++}
3052 ++
3053 + /*
3054 + * Check if the diffs for a certain cpu indicate that
3055 + * an update is needed.
3056 +@@ -1456,7 +1475,7 @@ static bool need_update(int cpu)
3057 + */
3058 + static void vmstat_shepherd(struct work_struct *w);
3059 +
3060 +-static DECLARE_DELAYED_WORK(shepherd, vmstat_shepherd);
3061 ++static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
3062 +
3063 + static void vmstat_shepherd(struct work_struct *w)
3064 + {
3065 +diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
3066 +index af68674690af..f76e9c1e9f17 100644
3067 +--- a/net/bluetooth/l2cap_core.c
3068 ++++ b/net/bluetooth/l2cap_core.c
3069 +@@ -3315,16 +3315,22 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
3070 +
3071 + while (len >= L2CAP_CONF_OPT_SIZE) {
3072 + len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3073 ++ if (len < 0)
3074 ++ break;
3075 +
3076 + hint = type & L2CAP_CONF_HINT;
3077 + type &= L2CAP_CONF_MASK;
3078 +
3079 + switch (type) {
3080 + case L2CAP_CONF_MTU:
3081 ++ if (olen != 2)
3082 ++ break;
3083 + mtu = val;
3084 + break;
3085 +
3086 + case L2CAP_CONF_FLUSH_TO:
3087 ++ if (olen != 2)
3088 ++ break;
3089 + chan->flush_to = val;
3090 + break;
3091 +
3092 +@@ -3332,26 +3338,30 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
3093 + break;
3094 +
3095 + case L2CAP_CONF_RFC:
3096 +- if (olen == sizeof(rfc))
3097 +- memcpy(&rfc, (void *) val, olen);
3098 ++ if (olen != sizeof(rfc))
3099 ++ break;
3100 ++ memcpy(&rfc, (void *) val, olen);
3101 + break;
3102 +
3103 + case L2CAP_CONF_FCS:
3104 ++ if (olen != 1)
3105 ++ break;
3106 + if (val == L2CAP_FCS_NONE)
3107 + set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3108 + break;
3109 +
3110 + case L2CAP_CONF_EFS:
3111 +- if (olen == sizeof(efs)) {
3112 +- remote_efs = 1;
3113 +- memcpy(&efs, (void *) val, olen);
3114 +- }
3115 ++ if (olen != sizeof(efs))
3116 ++ break;
3117 ++ remote_efs = 1;
3118 ++ memcpy(&efs, (void *) val, olen);
3119 + break;
3120 +
3121 + case L2CAP_CONF_EWS:
3122 ++ if (olen != 2)
3123 ++ break;
3124 + if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3125 + return -ECONNREFUSED;
3126 +-
3127 + set_bit(FLAG_EXT_CTRL, &chan->flags);
3128 + set_bit(CONF_EWS_RECV, &chan->conf_state);
3129 + chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3130 +@@ -3361,7 +3371,6 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
3131 + default:
3132 + if (hint)
3133 + break;
3134 +-
3135 + result = L2CAP_CONF_UNKNOWN;
3136 + *((u8 *) ptr++) = type;
3137 + break;
3138 +@@ -3526,58 +3535,65 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3139 +
3140 + while (len >= L2CAP_CONF_OPT_SIZE) {
3141 + len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3142 ++ if (len < 0)
3143 ++ break;
3144 +
3145 + switch (type) {
3146 + case L2CAP_CONF_MTU:
3147 ++ if (olen != 2)
3148 ++ break;
3149 + if (val < L2CAP_DEFAULT_MIN_MTU) {
3150 + *result = L2CAP_CONF_UNACCEPT;
3151 + chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3152 + } else
3153 + chan->imtu = val;
3154 +- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
3155 ++ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3156 ++ endptr - ptr);
3157 + break;
3158 +
3159 + case L2CAP_CONF_FLUSH_TO:
3160 ++ if (olen != 2)
3161 ++ break;
3162 + chan->flush_to = val;
3163 +- l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3164 +- 2, chan->flush_to, endptr - ptr);
3165 ++ l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3166 ++ chan->flush_to, endptr - ptr);
3167 + break;
3168 +
3169 + case L2CAP_CONF_RFC:
3170 +- if (olen == sizeof(rfc))
3171 +- memcpy(&rfc, (void *)val, olen);
3172 +-
3173 ++ if (olen != sizeof(rfc))
3174 ++ break;
3175 ++ memcpy(&rfc, (void *)val, olen);
3176 + if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3177 + rfc.mode != chan->mode)
3178 + return -ECONNREFUSED;
3179 +-
3180 + chan->fcs = 0;
3181 +-
3182 +- l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3183 +- sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3184 ++ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3185 ++ (unsigned long) &rfc, endptr - ptr);
3186 + break;
3187 +
3188 + case L2CAP_CONF_EWS:
3189 ++ if (olen != 2)
3190 ++ break;
3191 + chan->ack_win = min_t(u16, val, chan->ack_win);
3192 + l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3193 + chan->tx_win, endptr - ptr);
3194 + break;
3195 +
3196 + case L2CAP_CONF_EFS:
3197 +- if (olen == sizeof(efs)) {
3198 +- memcpy(&efs, (void *)val, olen);
3199 +-
3200 +- if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3201 +- efs.stype != L2CAP_SERV_NOTRAFIC &&
3202 +- efs.stype != chan->local_stype)
3203 +- return -ECONNREFUSED;
3204 +-
3205 +- l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3206 +- (unsigned long) &efs, endptr - ptr);
3207 +- }
3208 ++ if (olen != sizeof(efs))
3209 ++ break;
3210 ++ memcpy(&efs, (void *)val, olen);
3211 ++ if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3212 ++ efs.stype != L2CAP_SERV_NOTRAFIC &&
3213 ++ efs.stype != chan->local_stype)
3214 ++ return -ECONNREFUSED;
3215 ++ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3216 ++ (unsigned long) &efs, endptr - ptr);
3217 + break;
3218 +
3219 + case L2CAP_CONF_FCS:
3220 ++ if (olen != 1)
3221 ++ break;
3222 + if (*result == L2CAP_CONF_PENDING)
3223 + if (val == L2CAP_FCS_NONE)
3224 + set_bit(CONF_RECV_NO_FCS,
3225 +@@ -3706,13 +3722,18 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3226 +
3227 + while (len >= L2CAP_CONF_OPT_SIZE) {
3228 + len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3229 ++ if (len < 0)
3230 ++ break;
3231 +
3232 + switch (type) {
3233 + case L2CAP_CONF_RFC:
3234 +- if (olen == sizeof(rfc))
3235 +- memcpy(&rfc, (void *)val, olen);
3236 ++ if (olen != sizeof(rfc))
3237 ++ break;
3238 ++ memcpy(&rfc, (void *)val, olen);
3239 + break;
3240 + case L2CAP_CONF_EWS:
3241 ++ if (olen != 2)
3242 ++ break;
3243 + txwin_ext = val;
3244 + break;
3245 + }
3246 +diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
3247 +index 45fd82e61e79..b0a577a79a6a 100644
3248 +--- a/net/dccp/ipv4.c
3249 ++++ b/net/dccp/ipv4.c
3250 +@@ -592,13 +592,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
3251 + if (inet_csk_reqsk_queue_is_full(sk))
3252 + goto drop;
3253 +
3254 +- /*
3255 +- * Accept backlog is full. If we have already queued enough
3256 +- * of warm entries in syn queue, drop request. It is better than
3257 +- * clogging syn queue with openreqs with exponentially increasing
3258 +- * timeout.
3259 +- */
3260 +- if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
3261 ++ if (sk_acceptq_is_full(sk))
3262 + goto drop;
3263 +
3264 + req = inet_reqsk_alloc(&dccp_request_sock_ops, sk, true);
3265 +diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
3266 +index 0bf41faeffc4..d2caa4d69159 100644
3267 +--- a/net/dccp/ipv6.c
3268 ++++ b/net/dccp/ipv6.c
3269 +@@ -324,7 +324,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
3270 + if (inet_csk_reqsk_queue_is_full(sk))
3271 + goto drop;
3272 +
3273 +- if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
3274 ++ if (sk_acceptq_is_full(sk))
3275 + goto drop;
3276 +
3277 + req = inet_reqsk_alloc(&dccp6_request_sock_ops, sk, true);
3278 +@@ -427,8 +427,8 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
3279 + newnp->ipv6_mc_list = NULL;
3280 + newnp->ipv6_ac_list = NULL;
3281 + newnp->ipv6_fl_list = NULL;
3282 +- newnp->mcast_oif = inet6_iif(skb);
3283 +- newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
3284 ++ newnp->mcast_oif = inet_iif(skb);
3285 ++ newnp->mcast_hops = ip_hdr(skb)->ttl;
3286 +
3287 + /*
3288 + * No need to charge this sock to the relevant IPv6 refcnt debug socks count
3289 +diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
3290 +index ab9f8a66615d..386443e780da 100644
3291 +--- a/net/ipv4/inet_diag.c
3292 ++++ b/net/ipv4/inet_diag.c
3293 +@@ -366,13 +366,20 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
3294 + req->id.idiag_dport, req->id.idiag_src[0],
3295 + req->id.idiag_sport, req->id.idiag_if);
3296 + #if IS_ENABLED(CONFIG_IPV6)
3297 +- else if (req->sdiag_family == AF_INET6)
3298 +- sk = inet6_lookup(net, hashinfo,
3299 +- (struct in6_addr *)req->id.idiag_dst,
3300 +- req->id.idiag_dport,
3301 +- (struct in6_addr *)req->id.idiag_src,
3302 +- req->id.idiag_sport,
3303 +- req->id.idiag_if);
3304 ++ else if (req->sdiag_family == AF_INET6) {
3305 ++ if (ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_dst) &&
3306 ++ ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_src))
3307 ++ sk = inet_lookup(net, hashinfo, req->id.idiag_dst[3],
3308 ++ req->id.idiag_dport, req->id.idiag_src[3],
3309 ++ req->id.idiag_sport, req->id.idiag_if);
3310 ++ else
3311 ++ sk = inet6_lookup(net, hashinfo,
3312 ++ (struct in6_addr *)req->id.idiag_dst,
3313 ++ req->id.idiag_dport,
3314 ++ (struct in6_addr *)req->id.idiag_src,
3315 ++ req->id.idiag_sport,
3316 ++ req->id.idiag_if);
3317 ++ }
3318 + #endif
3319 + else
3320 + goto out_nosk;
3321 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
3322 +index 561f568e8938..aff90b0ddb63 100644
3323 +--- a/net/ipv4/tcp_input.c
3324 ++++ b/net/ipv4/tcp_input.c
3325 +@@ -6305,13 +6305,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
3326 + goto drop;
3327 + }
3328 +
3329 +-
3330 +- /* Accept backlog is full. If we have already queued enough
3331 +- * of warm entries in syn queue, drop request. It is better than
3332 +- * clogging syn queue with openreqs with exponentially increasing
3333 +- * timeout.
3334 +- */
3335 +- if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
3336 ++ if (sk_acceptq_is_full(sk)) {
3337 + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
3338 + goto drop;
3339 + }
3340 +diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
3341 +index 0a37ddc7af51..3697cd08c515 100644
3342 +--- a/net/ipv6/icmp.c
3343 ++++ b/net/ipv6/icmp.c
3344 +@@ -98,7 +98,7 @@ static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
3345 +
3346 + if (!(type & ICMPV6_INFOMSG_MASK))
3347 + if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST)
3348 +- ping_err(skb, offset, info);
3349 ++ ping_err(skb, offset, ntohl(info));
3350 + }
3351 +
3352 + static int icmpv6_rcv(struct sk_buff *skb);
3353 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
3354 +index d6c191158e07..6e7f99569bdf 100644
3355 +--- a/net/ipv6/tcp_ipv6.c
3356 ++++ b/net/ipv6/tcp_ipv6.c
3357 +@@ -1043,11 +1043,11 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
3358 + newnp->ipv6_fl_list = NULL;
3359 + newnp->pktoptions = NULL;
3360 + newnp->opt = NULL;
3361 +- newnp->mcast_oif = tcp_v6_iif(skb);
3362 +- newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
3363 +- newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
3364 ++ newnp->mcast_oif = inet_iif(skb);
3365 ++ newnp->mcast_hops = ip_hdr(skb)->ttl;
3366 ++ newnp->rcv_flowinfo = 0;
3367 + if (np->repflow)
3368 +- newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
3369 ++ newnp->flow_label = 0;
3370 +
3371 + /*
3372 + * No need to charge this sock to the relevant IPv6 refcnt debug socks count
3373 +diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
3374 +index 33d5271a9e32..466922f09d04 100644
3375 +--- a/net/mac80211/mesh_hwmp.c
3376 ++++ b/net/mac80211/mesh_hwmp.c
3377 +@@ -530,7 +530,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
3378 + const u8 *target_addr, *orig_addr;
3379 + const u8 *da;
3380 + u8 target_flags, ttl, flags;
3381 +- u32 orig_sn, target_sn, lifetime, target_metric;
3382 ++ u32 orig_sn, target_sn, lifetime, target_metric = 0;
3383 + bool reply = false;
3384 + bool forward = true;
3385 + bool root_is_gate;
3386 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
3387 +index d517dd7f4ac7..7d93228ba1e1 100644
3388 +--- a/net/packet/af_packet.c
3389 ++++ b/net/packet/af_packet.c
3390 +@@ -3155,7 +3155,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
3391 + }
3392 +
3393 + mutex_lock(&net->packet.sklist_lock);
3394 +- sk_add_node_rcu(sk, &net->packet.sklist);
3395 ++ sk_add_node_tail_rcu(sk, &net->packet.sklist);
3396 + mutex_unlock(&net->packet.sklist_lock);
3397 +
3398 + preempt_disable();
3399 +@@ -4130,7 +4130,7 @@ static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
3400 + struct pgv *pg_vec;
3401 + int i;
3402 +
3403 +- pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
3404 ++ pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
3405 + if (unlikely(!pg_vec))
3406 + goto out;
3407 +
3408 +diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c
3409 +index 7ca57741b2fb..7849f286bb93 100644
3410 +--- a/net/rose/rose_subr.c
3411 ++++ b/net/rose/rose_subr.c
3412 +@@ -105,16 +105,17 @@ void rose_write_internal(struct sock *sk, int frametype)
3413 + struct sk_buff *skb;
3414 + unsigned char *dptr;
3415 + unsigned char lci1, lci2;
3416 +- char buffer[100];
3417 +- int len, faclen = 0;
3418 ++ int maxfaclen = 0;
3419 ++ int len, faclen;
3420 ++ int reserve;
3421 +
3422 +- len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 1;
3423 ++ reserve = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1;
3424 ++ len = ROSE_MIN_LEN;
3425 +
3426 + switch (frametype) {
3427 + case ROSE_CALL_REQUEST:
3428 + len += 1 + ROSE_ADDR_LEN + ROSE_ADDR_LEN;
3429 +- faclen = rose_create_facilities(buffer, rose);
3430 +- len += faclen;
3431 ++ maxfaclen = 256;
3432 + break;
3433 + case ROSE_CALL_ACCEPTED:
3434 + case ROSE_CLEAR_REQUEST:
3435 +@@ -123,15 +124,16 @@ void rose_write_internal(struct sock *sk, int frametype)
3436 + break;
3437 + }
3438 +
3439 +- if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
3440 ++ skb = alloc_skb(reserve + len + maxfaclen, GFP_ATOMIC);
3441 ++ if (!skb)
3442 + return;
3443 +
3444 + /*
3445 + * Space for AX.25 header and PID.
3446 + */
3447 +- skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1);
3448 ++ skb_reserve(skb, reserve);
3449 +
3450 +- dptr = skb_put(skb, skb_tailroom(skb));
3451 ++ dptr = skb_put(skb, len);
3452 +
3453 + lci1 = (rose->lci >> 8) & 0x0F;
3454 + lci2 = (rose->lci >> 0) & 0xFF;
3455 +@@ -146,7 +148,8 @@ void rose_write_internal(struct sock *sk, int frametype)
3456 + dptr += ROSE_ADDR_LEN;
3457 + memcpy(dptr, &rose->source_addr, ROSE_ADDR_LEN);
3458 + dptr += ROSE_ADDR_LEN;
3459 +- memcpy(dptr, buffer, faclen);
3460 ++ faclen = rose_create_facilities(dptr, rose);
3461 ++ skb_put(skb, faclen);
3462 + dptr += faclen;
3463 + break;
3464 +
3465 +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
3466 +index 642a78079ae1..81013490a99f 100644
3467 +--- a/net/wireless/nl80211.c
3468 ++++ b/net/wireless/nl80211.c
3469 +@@ -11721,7 +11721,7 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
3470 + struct sk_buff *msg;
3471 + void *hdr;
3472 +
3473 +- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
3474 ++ msg = nlmsg_new(100 + len, gfp);
3475 + if (!msg)
3476 + return;
3477 +
3478 +@@ -11873,7 +11873,7 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
3479 + struct sk_buff *msg;
3480 + void *hdr;
3481 +
3482 +- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
3483 ++ msg = nlmsg_new(100 + req_ie_len + resp_ie_len, gfp);
3484 + if (!msg)
3485 + return;
3486 +
3487 +@@ -11913,7 +11913,7 @@ void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
3488 + struct sk_buff *msg;
3489 + void *hdr;
3490 +
3491 +- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
3492 ++ msg = nlmsg_new(100 + req_ie_len + resp_ie_len, gfp);
3493 + if (!msg)
3494 + return;
3495 +
3496 +@@ -11951,7 +11951,7 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
3497 + struct sk_buff *msg;
3498 + void *hdr;
3499 +
3500 +- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
3501 ++ msg = nlmsg_new(100 + ie_len, GFP_KERNEL);
3502 + if (!msg)
3503 + return;
3504 +
3505 +@@ -12028,7 +12028,7 @@ void cfg80211_notify_new_peer_candidate(struct net_device *dev, const u8 *addr,
3506 +
3507 + trace_cfg80211_notify_new_peer_candidate(dev, addr);
3508 +
3509 +- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
3510 ++ msg = nlmsg_new(100 + ie_len, gfp);
3511 + if (!msg)
3512 + return;
3513 +
3514 +@@ -12397,7 +12397,7 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
3515 + struct sk_buff *msg;
3516 + void *hdr;
3517 +
3518 +- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
3519 ++ msg = nlmsg_new(100 + len, gfp);
3520 + if (!msg)
3521 + return -ENOMEM;
3522 +
3523 +@@ -12440,7 +12440,7 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
3524 +
3525 + trace_cfg80211_mgmt_tx_status(wdev, cookie, ack);
3526 +
3527 +- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
3528 ++ msg = nlmsg_new(100 + len, gfp);
3529 + if (!msg)
3530 + return;
3531 +
3532 +@@ -13244,7 +13244,7 @@ void cfg80211_ft_event(struct net_device *netdev,
3533 + if (!ft_event->target_ap)
3534 + return;
3535 +
3536 +- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
3537 ++ msg = nlmsg_new(100 + ft_event->ric_ies_len, GFP_KERNEL);
3538 + if (!msg)
3539 + return;
3540 +
3541 +diff --git a/scripts/setlocalversion b/scripts/setlocalversion
3542 +index 63d91e22ed7c..966dd3924ea9 100755
3543 +--- a/scripts/setlocalversion
3544 ++++ b/scripts/setlocalversion
3545 +@@ -143,7 +143,7 @@ fi
3546 + if test -e include/config/auto.conf; then
3547 + . include/config/auto.conf
3548 + else
3549 +- echo "Error: kernelrelease not valid - run 'make prepare' to update it"
3550 ++ echo "Error: kernelrelease not valid - run 'make prepare' to update it" >&2
3551 + exit 1
3552 + fi
3553 +
3554 +diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
3555 +index 2272aee12871..3c88a3384064 100644
3556 +--- a/sound/core/compress_offload.c
3557 ++++ b/sound/core/compress_offload.c
3558 +@@ -38,6 +38,7 @@
3559 + #include <linux/uio.h>
3560 + #include <linux/uaccess.h>
3561 + #include <linux/module.h>
3562 ++#include <linux/compat.h>
3563 + #include <sound/core.h>
3564 + #include <sound/initval.h>
3565 + #include <sound/compress_params.h>
3566 +@@ -859,6 +860,15 @@ static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
3567 + return retval;
3568 + }
3569 +
3570 ++/* support of 32bit userspace on 64bit platforms */
3571 ++#ifdef CONFIG_COMPAT
3572 ++static long snd_compr_ioctl_compat(struct file *file, unsigned int cmd,
3573 ++ unsigned long arg)
3574 ++{
3575 ++ return snd_compr_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
3576 ++}
3577 ++#endif
3578 ++
3579 + static const struct file_operations snd_compr_file_ops = {
3580 + .owner = THIS_MODULE,
3581 + .open = snd_compr_open,
3582 +@@ -866,6 +876,9 @@ static const struct file_operations snd_compr_file_ops = {
3583 + .write = snd_compr_write,
3584 + .read = snd_compr_read,
3585 + .unlocked_ioctl = snd_compr_ioctl,
3586 ++#ifdef CONFIG_COMPAT
3587 ++ .compat_ioctl = snd_compr_ioctl_compat,
3588 ++#endif
3589 + .mmap = snd_compr_mmap,
3590 + .poll = snd_compr_poll,
3591 + };
3592 +diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
3593 +index 07feb35f1935..443bb8ce8255 100644
3594 +--- a/sound/core/oss/pcm_oss.c
3595 ++++ b/sound/core/oss/pcm_oss.c
3596 +@@ -950,6 +950,28 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
3597 + oss_frame_size = snd_pcm_format_physical_width(params_format(params)) *
3598 + params_channels(params) / 8;
3599 +
3600 ++ err = snd_pcm_oss_period_size(substream, params, sparams);
3601 ++ if (err < 0)
3602 ++ goto failure;
3603 ++
3604 ++ n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size);
3605 ++ err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL);
3606 ++ if (err < 0)
3607 ++ goto failure;
3608 ++
3609 ++ err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS,
3610 ++ runtime->oss.periods, NULL);
3611 ++ if (err < 0)
3612 ++ goto failure;
3613 ++
3614 ++ snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
3615 ++
3616 ++ err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams);
3617 ++ if (err < 0) {
3618 ++ pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err);
3619 ++ goto failure;
3620 ++ }
3621 ++
3622 + #ifdef CONFIG_SND_PCM_OSS_PLUGINS
3623 + snd_pcm_oss_plugin_clear(substream);
3624 + if (!direct) {
3625 +@@ -984,27 +1006,6 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
3626 + }
3627 + #endif
3628 +
3629 +- err = snd_pcm_oss_period_size(substream, params, sparams);
3630 +- if (err < 0)
3631 +- goto failure;
3632 +-
3633 +- n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size);
3634 +- err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL);
3635 +- if (err < 0)
3636 +- goto failure;
3637 +-
3638 +- err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS,
3639 +- runtime->oss.periods, NULL);
3640 +- if (err < 0)
3641 +- goto failure;
3642 +-
3643 +- snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
3644 +-
3645 +- if ((err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams)) < 0) {
3646 +- pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err);
3647 +- goto failure;
3648 +- }
3649 +-
3650 + if (runtime->oss.trigger) {
3651 + sw_params->start_threshold = 1;
3652 + } else {
3653 +diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
3654 +index 9b6dcdea4431..4d6f0f56d54a 100644
3655 +--- a/sound/core/pcm_native.c
3656 ++++ b/sound/core/pcm_native.c
3657 +@@ -1254,8 +1254,15 @@ static int snd_pcm_pause(struct snd_pcm_substream *substream, int push)
3658 + static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state)
3659 + {
3660 + struct snd_pcm_runtime *runtime = substream->runtime;
3661 +- if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
3662 ++ switch (runtime->status->state) {
3663 ++ case SNDRV_PCM_STATE_SUSPENDED:
3664 + return -EBUSY;
3665 ++ /* unresumable PCM state; return -EBUSY for skipping suspend */
3666 ++ case SNDRV_PCM_STATE_OPEN:
3667 ++ case SNDRV_PCM_STATE_SETUP:
3668 ++ case SNDRV_PCM_STATE_DISCONNECTED:
3669 ++ return -EBUSY;
3670 ++ }
3671 + runtime->trigger_master = substream;
3672 + return 0;
3673 + }
3674 +diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
3675 +index 59111cadaec2..c8b2309352d7 100644
3676 +--- a/sound/core/rawmidi.c
3677 ++++ b/sound/core/rawmidi.c
3678 +@@ -29,6 +29,7 @@
3679 + #include <linux/mutex.h>
3680 + #include <linux/module.h>
3681 + #include <linux/delay.h>
3682 ++#include <linux/nospec.h>
3683 + #include <sound/rawmidi.h>
3684 + #include <sound/info.h>
3685 + #include <sound/control.h>
3686 +@@ -591,6 +592,7 @@ static int __snd_rawmidi_info_select(struct snd_card *card,
3687 + return -ENXIO;
3688 + if (info->stream < 0 || info->stream > 1)
3689 + return -EINVAL;
3690 ++ info->stream = array_index_nospec(info->stream, 2);
3691 + pstr = &rmidi->streams[info->stream];
3692 + if (pstr->substream_count == 0)
3693 + return -ENOENT;
3694 +diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
3695 +index ea545f9291b4..df5b984bb33f 100644
3696 +--- a/sound/core/seq/oss/seq_oss_synth.c
3697 ++++ b/sound/core/seq/oss/seq_oss_synth.c
3698 +@@ -617,13 +617,14 @@ int
3699 + snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_info *inf)
3700 + {
3701 + struct seq_oss_synth *rec;
3702 ++ struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev);
3703 +
3704 +- if (dev < 0 || dev >= dp->max_synthdev)
3705 ++ if (!info)
3706 + return -ENXIO;
3707 +
3708 +- if (dp->synths[dev].is_midi) {
3709 ++ if (info->is_midi) {
3710 + struct midi_info minf;
3711 +- snd_seq_oss_midi_make_info(dp, dp->synths[dev].midi_mapped, &minf);
3712 ++ snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf);
3713 + inf->synth_type = SYNTH_TYPE_MIDI;
3714 + inf->synth_subtype = 0;
3715 + inf->nr_voices = 16;
3716 +diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
3717 +index f6d4a1046e54..ad0b23a21bc8 100644
3718 +--- a/sound/pci/hda/hda_codec.c
3719 ++++ b/sound/pci/hda/hda_codec.c
3720 +@@ -3004,6 +3004,7 @@ static void hda_call_codec_resume(struct hda_codec *codec)
3721 + hda_jackpoll_work(&codec->jackpoll_work.work);
3722 + else
3723 + snd_hda_jack_report_sync(codec);
3724 ++ codec->core.dev.power.power_state = PMSG_ON;
3725 + atomic_dec(&codec->core.in_pm);
3726 + }
3727 +
3728 +@@ -3036,10 +3037,62 @@ static int hda_codec_runtime_resume(struct device *dev)
3729 + }
3730 + #endif /* CONFIG_PM */
3731 +
3732 ++#ifdef CONFIG_PM_SLEEP
3733 ++static int hda_codec_force_resume(struct device *dev)
3734 ++{
3735 ++ int ret;
3736 ++
3737 ++ /* The get/put pair below enforces the runtime resume even if the
3738 ++ * device hasn't been used at suspend time. This trick is needed to
3739 ++ * update the jack state change during the sleep.
3740 ++ */
3741 ++ pm_runtime_get_noresume(dev);
3742 ++ ret = pm_runtime_force_resume(dev);
3743 ++ pm_runtime_put(dev);
3744 ++ return ret;
3745 ++}
3746 ++
3747 ++static int hda_codec_pm_suspend(struct device *dev)
3748 ++{
3749 ++ dev->power.power_state = PMSG_SUSPEND;
3750 ++ return pm_runtime_force_suspend(dev);
3751 ++}
3752 ++
3753 ++static int hda_codec_pm_resume(struct device *dev)
3754 ++{
3755 ++ dev->power.power_state = PMSG_RESUME;
3756 ++ return hda_codec_force_resume(dev);
3757 ++}
3758 ++
3759 ++static int hda_codec_pm_freeze(struct device *dev)
3760 ++{
3761 ++ dev->power.power_state = PMSG_FREEZE;
3762 ++ return pm_runtime_force_suspend(dev);
3763 ++}
3764 ++
3765 ++static int hda_codec_pm_thaw(struct device *dev)
3766 ++{
3767 ++ dev->power.power_state = PMSG_THAW;
3768 ++ return hda_codec_force_resume(dev);
3769 ++}
3770 ++
3771 ++static int hda_codec_pm_restore(struct device *dev)
3772 ++{
3773 ++ dev->power.power_state = PMSG_RESTORE;
3774 ++ return hda_codec_force_resume(dev);
3775 ++}
3776 ++#endif /* CONFIG_PM_SLEEP */
3777 ++
3778 + /* referred in hda_bind.c */
3779 + const struct dev_pm_ops hda_codec_driver_pm = {
3780 +- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
3781 +- pm_runtime_force_resume)
3782 ++#ifdef CONFIG_PM_SLEEP
3783 ++ .suspend = hda_codec_pm_suspend,
3784 ++ .resume = hda_codec_pm_resume,
3785 ++ .freeze = hda_codec_pm_freeze,
3786 ++ .thaw = hda_codec_pm_thaw,
3787 ++ .poweroff = hda_codec_pm_suspend,
3788 ++ .restore = hda_codec_pm_restore,
3789 ++#endif /* CONFIG_PM_SLEEP */
3790 + SET_RUNTIME_PM_OPS(hda_codec_runtime_suspend, hda_codec_runtime_resume,
3791 + NULL)
3792 + };
3793 +diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
3794 +index d01e2ce818f7..62b38f2ff60d 100644
3795 +--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
3796 ++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
3797 +@@ -238,19 +238,15 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
3798 + if (!(decoder->tsc_ctc_ratio_n % decoder->tsc_ctc_ratio_d))
3799 + decoder->tsc_ctc_mult = decoder->tsc_ctc_ratio_n /
3800 + decoder->tsc_ctc_ratio_d;
3801 +-
3802 +- /*
3803 +- * Allow for timestamps appearing to backwards because a TSC
3804 +- * packet has slipped past a MTC packet, so allow 2 MTC ticks
3805 +- * or ...
3806 +- */
3807 +- decoder->tsc_slip = multdiv(2 << decoder->mtc_shift,
3808 +- decoder->tsc_ctc_ratio_n,
3809 +- decoder->tsc_ctc_ratio_d);
3810 + }
3811 +- /* ... or 0x100 paranoia */
3812 +- if (decoder->tsc_slip < 0x100)
3813 +- decoder->tsc_slip = 0x100;
3814 ++
3815 ++ /*
3816 ++ * A TSC packet can slip past MTC packets so that the timestamp appears
3817 ++ * to go backwards. One estimate is that can be up to about 40 CPU
3818 ++ * cycles, which is certainly less than 0x1000 TSC ticks, but accept
3819 ++ * slippage an order of magnitude more to be on the safe side.
3820 ++ */
3821 ++ decoder->tsc_slip = 0x10000;
3822 +
3823 + intel_pt_log("timestamp: mtc_shift %u\n", decoder->mtc_shift);
3824 + intel_pt_log("timestamp: tsc_ctc_ratio_n %u\n", decoder->tsc_ctc_ratio_n);
3825 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
3826 +index fce48d11ae07..08a954582e31 100644
3827 +--- a/virt/kvm/kvm_main.c
3828 ++++ b/virt/kvm/kvm_main.c
3829 +@@ -2611,6 +2611,9 @@ static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
3830 + {
3831 + struct kvm_device *dev = filp->private_data;
3832 +
3833 ++ if (dev->kvm->mm != current->mm)
3834 ++ return -EIO;
3835 ++
3836 + switch (ioctl) {
3837 + case KVM_SET_DEVICE_ATTR:
3838 + return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);