Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Wed, 30 May 2018 11:38:20
Message-Id: 1527680285.83af8acee1b778f2ffff17695cff770dd1bb1667.mpagano@gentoo
1 commit: 83af8acee1b778f2ffff17695cff770dd1bb1667
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed May 30 11:38:05 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed May 30 11:38:05 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=83af8ace
7
8 Linux patch 4.4.134
9
10 0000_README | 4 +
11 1133_linux-4.4.134.patch | 7683 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 7687 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 2913d51..300dbde 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -575,6 +575,10 @@ Patch: 1132_linux-4.4.133.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.4.133
21
22 +Patch: 1133_linux-4.4.134.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.4.134
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1133_linux-4.4.134.patch b/1133_linux-4.4.134.patch
31 new file mode 100644
32 index 0000000..7024f15
33 --- /dev/null
34 +++ b/1133_linux-4.4.134.patch
35 @@ -0,0 +1,7683 @@
36 +diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt
37 +index 1699a55b7b70..ef639960b272 100644
38 +--- a/Documentation/device-mapper/thin-provisioning.txt
39 ++++ b/Documentation/device-mapper/thin-provisioning.txt
40 +@@ -112,9 +112,11 @@ $low_water_mark is expressed in blocks of size $data_block_size. If
41 + free space on the data device drops below this level then a dm event
42 + will be triggered which a userspace daemon should catch allowing it to
43 + extend the pool device. Only one such event will be sent.
44 +-Resuming a device with a new table itself triggers an event so the
45 +-userspace daemon can use this to detect a situation where a new table
46 +-already exceeds the threshold.
47 ++
48 ++No special event is triggered if a just resumed device's free space is below
49 ++the low water mark. However, resuming a device always triggers an
50 ++event; a userspace daemon should verify that free space exceeds the low
51 ++water mark when handling this event.
52 +
53 + A low water mark for the metadata device is maintained in the kernel and
54 + will trigger a dm event if free space on the metadata device drops below
55 +diff --git a/Makefile b/Makefile
56 +index ac52ee65685b..119dbcb4f311 100644
57 +--- a/Makefile
58 ++++ b/Makefile
59 +@@ -1,6 +1,6 @@
60 + VERSION = 4
61 + PATCHLEVEL = 4
62 +-SUBLEVEL = 133
63 ++SUBLEVEL = 134
64 + EXTRAVERSION =
65 + NAME = Blurry Fish Butt
66 +
67 +diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h
68 +index 0ca9724597c1..7081e52291d0 100644
69 +--- a/arch/alpha/include/asm/xchg.h
70 ++++ b/arch/alpha/include/asm/xchg.h
71 +@@ -11,6 +11,10 @@
72 + * Atomic exchange.
73 + * Since it can be used to implement critical sections
74 + * it must clobber "memory" (also for interrupts in UP).
75 ++ *
76 ++ * The leading and the trailing memory barriers guarantee that these
77 ++ * operations are fully ordered.
78 ++ *
79 + */
80 +
81 + static inline unsigned long
82 +@@ -18,6 +22,7 @@ ____xchg(_u8, volatile char *m, unsigned long val)
83 + {
84 + unsigned long ret, tmp, addr64;
85 +
86 ++ smp_mb();
87 + __asm__ __volatile__(
88 + " andnot %4,7,%3\n"
89 + " insbl %1,%4,%1\n"
90 +@@ -42,6 +47,7 @@ ____xchg(_u16, volatile short *m, unsigned long val)
91 + {
92 + unsigned long ret, tmp, addr64;
93 +
94 ++ smp_mb();
95 + __asm__ __volatile__(
96 + " andnot %4,7,%3\n"
97 + " inswl %1,%4,%1\n"
98 +@@ -66,6 +72,7 @@ ____xchg(_u32, volatile int *m, unsigned long val)
99 + {
100 + unsigned long dummy;
101 +
102 ++ smp_mb();
103 + __asm__ __volatile__(
104 + "1: ldl_l %0,%4\n"
105 + " bis $31,%3,%1\n"
106 +@@ -86,6 +93,7 @@ ____xchg(_u64, volatile long *m, unsigned long val)
107 + {
108 + unsigned long dummy;
109 +
110 ++ smp_mb();
111 + __asm__ __volatile__(
112 + "1: ldq_l %0,%4\n"
113 + " bis $31,%3,%1\n"
114 +@@ -127,10 +135,12 @@ ____xchg(, volatile void *ptr, unsigned long x, int size)
115 + * store NEW in MEM. Return the initial value in MEM. Success is
116 + * indicated by comparing RETURN with OLD.
117 + *
118 +- * The memory barrier should be placed in SMP only when we actually
119 +- * make the change. If we don't change anything (so if the returned
120 +- * prev is equal to old) then we aren't acquiring anything new and
121 +- * we don't need any memory barrier as far I can tell.
122 ++ * The leading and the trailing memory barriers guarantee that these
123 ++ * operations are fully ordered.
124 ++ *
125 ++ * The trailing memory barrier is placed in SMP unconditionally, in
126 ++ * order to guarantee that dependency ordering is preserved when a
127 ++ * dependency is headed by an unsuccessful operation.
128 + */
129 +
130 + static inline unsigned long
131 +@@ -138,6 +148,7 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
132 + {
133 + unsigned long prev, tmp, cmp, addr64;
134 +
135 ++ smp_mb();
136 + __asm__ __volatile__(
137 + " andnot %5,7,%4\n"
138 + " insbl %1,%5,%1\n"
139 +@@ -149,8 +160,8 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
140 + " or %1,%2,%2\n"
141 + " stq_c %2,0(%4)\n"
142 + " beq %2,3f\n"
143 +- __ASM__MB
144 + "2:\n"
145 ++ __ASM__MB
146 + ".subsection 2\n"
147 + "3: br 1b\n"
148 + ".previous"
149 +@@ -165,6 +176,7 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
150 + {
151 + unsigned long prev, tmp, cmp, addr64;
152 +
153 ++ smp_mb();
154 + __asm__ __volatile__(
155 + " andnot %5,7,%4\n"
156 + " inswl %1,%5,%1\n"
157 +@@ -176,8 +188,8 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
158 + " or %1,%2,%2\n"
159 + " stq_c %2,0(%4)\n"
160 + " beq %2,3f\n"
161 +- __ASM__MB
162 + "2:\n"
163 ++ __ASM__MB
164 + ".subsection 2\n"
165 + "3: br 1b\n"
166 + ".previous"
167 +@@ -192,6 +204,7 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
168 + {
169 + unsigned long prev, cmp;
170 +
171 ++ smp_mb();
172 + __asm__ __volatile__(
173 + "1: ldl_l %0,%5\n"
174 + " cmpeq %0,%3,%1\n"
175 +@@ -199,8 +212,8 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
176 + " mov %4,%1\n"
177 + " stl_c %1,%2\n"
178 + " beq %1,3f\n"
179 +- __ASM__MB
180 + "2:\n"
181 ++ __ASM__MB
182 + ".subsection 2\n"
183 + "3: br 1b\n"
184 + ".previous"
185 +@@ -215,6 +228,7 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
186 + {
187 + unsigned long prev, cmp;
188 +
189 ++ smp_mb();
190 + __asm__ __volatile__(
191 + "1: ldq_l %0,%5\n"
192 + " cmpeq %0,%3,%1\n"
193 +@@ -222,8 +236,8 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
194 + " mov %4,%1\n"
195 + " stq_c %1,%2\n"
196 + " beq %1,3f\n"
197 +- __ASM__MB
198 + "2:\n"
199 ++ __ASM__MB
200 + ".subsection 2\n"
201 + "3: br 1b\n"
202 + ".previous"
203 +diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
204 +index 2d785f5a3041..c4ee25e88a7b 100644
205 +--- a/arch/arc/Kconfig
206 ++++ b/arch/arc/Kconfig
207 +@@ -479,7 +479,6 @@ config ARC_CURR_IN_REG
208 +
209 + config ARC_EMUL_UNALIGNED
210 + bool "Emulate unaligned memory access (userspace only)"
211 +- default N
212 + select SYSCTL_ARCH_UNALIGN_NO_WARN
213 + select SYSCTL_ARCH_UNALIGN_ALLOW
214 + depends on ISA_ARCOMPACT
215 +diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
216 +index 39c470e291f9..69381deeb703 100644
217 +--- a/arch/arm/boot/dts/socfpga.dtsi
218 ++++ b/arch/arm/boot/dts/socfpga.dtsi
219 +@@ -738,7 +738,7 @@
220 + timer@fffec600 {
221 + compatible = "arm,cortex-a9-twd-timer";
222 + reg = <0xfffec600 0x100>;
223 +- interrupts = <1 13 0xf04>;
224 ++ interrupts = <1 13 0xf01>;
225 + clocks = <&mpu_periph_clk>;
226 + };
227 +
228 +diff --git a/arch/arm/include/asm/vdso.h b/arch/arm/include/asm/vdso.h
229 +index d0295f1dd1a3..ff65b6d96c7e 100644
230 +--- a/arch/arm/include/asm/vdso.h
231 ++++ b/arch/arm/include/asm/vdso.h
232 +@@ -11,8 +11,6 @@ struct mm_struct;
233 +
234 + void arm_install_vdso(struct mm_struct *mm, unsigned long addr);
235 +
236 +-extern char vdso_start, vdso_end;
237 +-
238 + extern unsigned int vdso_total_pages;
239 +
240 + #else /* CONFIG_VDSO */
241 +diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
242 +index 54a5aeab988d..2dee87273e51 100644
243 +--- a/arch/arm/kernel/vdso.c
244 ++++ b/arch/arm/kernel/vdso.c
245 +@@ -38,6 +38,8 @@
246 +
247 + static struct page **vdso_text_pagelist;
248 +
249 ++extern char vdso_start[], vdso_end[];
250 ++
251 + /* Total number of pages needed for the data and text portions of the VDSO. */
252 + unsigned int vdso_total_pages __read_mostly;
253 +
254 +@@ -178,13 +180,13 @@ static int __init vdso_init(void)
255 + unsigned int text_pages;
256 + int i;
257 +
258 +- if (memcmp(&vdso_start, "\177ELF", 4)) {
259 ++ if (memcmp(vdso_start, "\177ELF", 4)) {
260 + pr_err("VDSO is not a valid ELF object!\n");
261 + return -ENOEXEC;
262 + }
263 +
264 +- text_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
265 +- pr_debug("vdso: %i text pages at base %p\n", text_pages, &vdso_start);
266 ++ text_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
267 ++ pr_debug("vdso: %i text pages at base %p\n", text_pages, vdso_start);
268 +
269 + /* Allocate the VDSO text pagelist */
270 + vdso_text_pagelist = kcalloc(text_pages, sizeof(struct page *),
271 +@@ -199,7 +201,7 @@ static int __init vdso_init(void)
272 + for (i = 0; i < text_pages; i++) {
273 + struct page *page;
274 +
275 +- page = virt_to_page(&vdso_start + i * PAGE_SIZE);
276 ++ page = virt_to_page(vdso_start + i * PAGE_SIZE);
277 + vdso_text_pagelist[i] = page;
278 + }
279 +
280 +@@ -210,7 +212,7 @@ static int __init vdso_init(void)
281 +
282 + cntvct_ok = cntvct_functional();
283 +
284 +- patch_vdso(&vdso_start);
285 ++ patch_vdso(vdso_start);
286 +
287 + return 0;
288 + }
289 +diff --git a/arch/arm/mach-omap1/clock.c b/arch/arm/mach-omap1/clock.c
290 +index 4f5fd4a084c0..034b89499bd7 100644
291 +--- a/arch/arm/mach-omap1/clock.c
292 ++++ b/arch/arm/mach-omap1/clock.c
293 +@@ -1031,17 +1031,17 @@ static int clk_debugfs_register_one(struct clk *c)
294 + return -ENOMEM;
295 + c->dent = d;
296 +
297 +- d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount);
298 ++ d = debugfs_create_u8("usecount", S_IRUGO, c->dent, &c->usecount);
299 + if (!d) {
300 + err = -ENOMEM;
301 + goto err_out;
302 + }
303 +- d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
304 ++ d = debugfs_create_ulong("rate", S_IRUGO, c->dent, &c->rate);
305 + if (!d) {
306 + err = -ENOMEM;
307 + goto err_out;
308 + }
309 +- d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
310 ++ d = debugfs_create_x8("flags", S_IRUGO, c->dent, &c->flags);
311 + if (!d) {
312 + err = -ENOMEM;
313 + goto err_out;
314 +diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
315 +index 58920bc8807b..3d876bde8c85 100644
316 +--- a/arch/arm/mach-omap2/pm.c
317 ++++ b/arch/arm/mach-omap2/pm.c
318 +@@ -231,7 +231,7 @@ static void omap_pm_end(void)
319 + cpu_idle_poll_ctrl(false);
320 + }
321 +
322 +-static void omap_pm_finish(void)
323 ++static void omap_pm_wake(void)
324 + {
325 + if (cpu_is_omap34xx())
326 + omap_prcm_irq_complete();
327 +@@ -241,7 +241,7 @@ static const struct platform_suspend_ops omap_pm_ops = {
328 + .begin = omap_pm_begin,
329 + .end = omap_pm_end,
330 + .enter = omap_pm_enter,
331 +- .finish = omap_pm_finish,
332 ++ .wake = omap_pm_wake,
333 + .valid = suspend_valid_only_mem,
334 + };
335 +
336 +diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
337 +index 83fc403aec3c..1f774ec4ab27 100644
338 +--- a/arch/arm/mach-omap2/timer.c
339 ++++ b/arch/arm/mach-omap2/timer.c
340 +@@ -136,12 +136,6 @@ static struct clock_event_device clockevent_gpt = {
341 + .tick_resume = omap2_gp_timer_shutdown,
342 + };
343 +
344 +-static struct property device_disabled = {
345 +- .name = "status",
346 +- .length = sizeof("disabled"),
347 +- .value = "disabled",
348 +-};
349 +-
350 + static const struct of_device_id omap_timer_match[] __initconst = {
351 + { .compatible = "ti,omap2420-timer", },
352 + { .compatible = "ti,omap3430-timer", },
353 +@@ -183,8 +177,17 @@ static struct device_node * __init omap_get_timer_dt(const struct of_device_id *
354 + of_get_property(np, "ti,timer-secure", NULL)))
355 + continue;
356 +
357 +- if (!of_device_is_compatible(np, "ti,omap-counter32k"))
358 +- of_add_property(np, &device_disabled);
359 ++ if (!of_device_is_compatible(np, "ti,omap-counter32k")) {
360 ++ struct property *prop;
361 ++
362 ++ prop = kzalloc(sizeof(*prop), GFP_KERNEL);
363 ++ if (!prop)
364 ++ return NULL;
365 ++ prop->name = "status";
366 ++ prop->value = "disabled";
367 ++ prop->length = strlen(prop->value);
368 ++ of_add_property(np, prop);
369 ++ }
370 + return np;
371 + }
372 +
373 +diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
374 +index 8ca94d379bc3..6f75c32dc3bf 100644
375 +--- a/arch/arm/plat-omap/dmtimer.c
376 ++++ b/arch/arm/plat-omap/dmtimer.c
377 +@@ -854,11 +854,8 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
378 + timer->irq = irq->start;
379 + timer->pdev = pdev;
380 +
381 +- /* Skip pm_runtime_enable for OMAP1 */
382 +- if (!(timer->capability & OMAP_TIMER_NEEDS_RESET)) {
383 +- pm_runtime_enable(dev);
384 +- pm_runtime_irq_safe(dev);
385 +- }
386 ++ pm_runtime_enable(dev);
387 ++ pm_runtime_irq_safe(dev);
388 +
389 + if (!timer->reserved) {
390 + ret = pm_runtime_get_sync(dev);
391 +diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
392 +index 499e8de33a00..fbbd7fb83fd6 100644
393 +--- a/arch/arm64/include/asm/spinlock.h
394 ++++ b/arch/arm64/include/asm/spinlock.h
395 +@@ -94,8 +94,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
396 + " cbnz %w1, 1f\n"
397 + " add %w1, %w0, %3\n"
398 + " casa %w0, %w1, %2\n"
399 +- " and %w1, %w1, #0xffff\n"
400 +- " eor %w1, %w1, %w0, lsr #16\n"
401 ++ " sub %w1, %w1, %3\n"
402 ++ " eor %w1, %w1, %w0\n"
403 + "1:")
404 + : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
405 + : "I" (1 << TICKET_SHIFT)
406 +diff --git a/arch/m68k/coldfire/device.c b/arch/m68k/coldfire/device.c
407 +index 71ea4c02795d..8a2dc0af4cad 100644
408 +--- a/arch/m68k/coldfire/device.c
409 ++++ b/arch/m68k/coldfire/device.c
410 +@@ -135,7 +135,11 @@ static struct platform_device mcf_fec0 = {
411 + .id = 0,
412 + .num_resources = ARRAY_SIZE(mcf_fec0_resources),
413 + .resource = mcf_fec0_resources,
414 +- .dev.platform_data = FEC_PDATA,
415 ++ .dev = {
416 ++ .dma_mask = &mcf_fec0.dev.coherent_dma_mask,
417 ++ .coherent_dma_mask = DMA_BIT_MASK(32),
418 ++ .platform_data = FEC_PDATA,
419 ++ }
420 + };
421 +
422 + #ifdef MCFFEC_BASE1
423 +@@ -167,7 +171,11 @@ static struct platform_device mcf_fec1 = {
424 + .id = 1,
425 + .num_resources = ARRAY_SIZE(mcf_fec1_resources),
426 + .resource = mcf_fec1_resources,
427 +- .dev.platform_data = FEC_PDATA,
428 ++ .dev = {
429 ++ .dma_mask = &mcf_fec1.dev.coherent_dma_mask,
430 ++ .coherent_dma_mask = DMA_BIT_MASK(32),
431 ++ .platform_data = FEC_PDATA,
432 ++ }
433 + };
434 + #endif /* MCFFEC_BASE1 */
435 + #endif /* CONFIG_FEC */
436 +diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
437 +index 10d0b2140375..63d35076722d 100644
438 +--- a/arch/mips/cavium-octeon/octeon-irq.c
439 ++++ b/arch/mips/cavium-octeon/octeon-irq.c
440 +@@ -2240,7 +2240,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
441 +
442 + parent_irq = irq_of_parse_and_map(ciu_node, 0);
443 + if (!parent_irq) {
444 +- pr_err("ERROR: Couldn't acquire parent_irq for %s\n.",
445 ++ pr_err("ERROR: Couldn't acquire parent_irq for %s\n",
446 + ciu_node->name);
447 + return -EINVAL;
448 + }
449 +@@ -2252,7 +2252,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
450 +
451 + addr = of_get_address(ciu_node, 0, NULL, NULL);
452 + if (!addr) {
453 +- pr_err("ERROR: Couldn't acquire reg(0) %s\n.", ciu_node->name);
454 ++ pr_err("ERROR: Couldn't acquire reg(0) %s\n", ciu_node->name);
455 + return -EINVAL;
456 + }
457 + host_data->raw_reg = (u64)phys_to_virt(
458 +@@ -2260,7 +2260,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
459 +
460 + addr = of_get_address(ciu_node, 1, NULL, NULL);
461 + if (!addr) {
462 +- pr_err("ERROR: Couldn't acquire reg(1) %s\n.", ciu_node->name);
463 ++ pr_err("ERROR: Couldn't acquire reg(1) %s\n", ciu_node->name);
464 + return -EINVAL;
465 + }
466 + host_data->en_reg = (u64)phys_to_virt(
467 +@@ -2268,7 +2268,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
468 +
469 + r = of_property_read_u32(ciu_node, "cavium,max-bits", &val);
470 + if (r) {
471 +- pr_err("ERROR: Couldn't read cavium,max-bits from %s\n.",
472 ++ pr_err("ERROR: Couldn't read cavium,max-bits from %s\n",
473 + ciu_node->name);
474 + return r;
475 + }
476 +@@ -2278,7 +2278,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
477 + &octeon_irq_domain_cib_ops,
478 + host_data);
479 + if (!cib_domain) {
480 +- pr_err("ERROR: Couldn't irq_domain_add_linear()\n.");
481 ++ pr_err("ERROR: Couldn't irq_domain_add_linear()\n");
482 + return -ENOMEM;
483 + }
484 +
485 +diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
486 +index aa3800c82332..d99ca862dae3 100644
487 +--- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
488 ++++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
489 +@@ -167,7 +167,7 @@
490 + #define AR71XX_AHB_DIV_MASK 0x7
491 +
492 + #define AR724X_PLL_REG_CPU_CONFIG 0x00
493 +-#define AR724X_PLL_REG_PCIE_CONFIG 0x18
494 ++#define AR724X_PLL_REG_PCIE_CONFIG 0x10
495 +
496 + #define AR724X_PLL_FB_SHIFT 0
497 + #define AR724X_PLL_FB_MASK 0x3ff
498 +diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
499 +index c3d2d2c05fdb..a9958b4d9194 100644
500 +--- a/arch/mips/kernel/ptrace.c
501 ++++ b/arch/mips/kernel/ptrace.c
502 +@@ -483,7 +483,7 @@ static int fpr_get_msa(struct task_struct *target,
503 + /*
504 + * Copy the floating-point context to the supplied NT_PRFPREG buffer.
505 + * Choose the appropriate helper for general registers, and then copy
506 +- * the FCSR register separately.
507 ++ * the FCSR and FIR registers separately.
508 + */
509 + static int fpr_get(struct task_struct *target,
510 + const struct user_regset *regset,
511 +@@ -491,6 +491,7 @@ static int fpr_get(struct task_struct *target,
512 + void *kbuf, void __user *ubuf)
513 + {
514 + const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
515 ++ const int fir_pos = fcr31_pos + sizeof(u32);
516 + int err;
517 +
518 + if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
519 +@@ -503,6 +504,12 @@ static int fpr_get(struct task_struct *target,
520 + err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
521 + &target->thread.fpu.fcr31,
522 + fcr31_pos, fcr31_pos + sizeof(u32));
523 ++ if (err)
524 ++ return err;
525 ++
526 ++ err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
527 ++ &boot_cpu_data.fpu_id,
528 ++ fir_pos, fir_pos + sizeof(u32));
529 +
530 + return err;
531 + }
532 +@@ -551,7 +558,8 @@ static int fpr_set_msa(struct task_struct *target,
533 + /*
534 + * Copy the supplied NT_PRFPREG buffer to the floating-point context.
535 + * Choose the appropriate helper for general registers, and then copy
536 +- * the FCSR register separately.
537 ++ * the FCSR register separately. Ignore the incoming FIR register
538 ++ * contents though, as the register is read-only.
539 + *
540 + * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
541 + * which is supposed to have been guaranteed by the kernel before
542 +@@ -565,6 +573,7 @@ static int fpr_set(struct task_struct *target,
543 + const void *kbuf, const void __user *ubuf)
544 + {
545 + const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
546 ++ const int fir_pos = fcr31_pos + sizeof(u32);
547 + u32 fcr31;
548 + int err;
549 +
550 +@@ -592,6 +601,11 @@ static int fpr_set(struct task_struct *target,
551 + ptrace_setfcr31(target, fcr31);
552 + }
553 +
554 ++ if (count > 0)
555 ++ err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
556 ++ fir_pos,
557 ++ fir_pos + sizeof(u32));
558 ++
559 + return err;
560 + }
561 +
562 +@@ -816,7 +830,7 @@ long arch_ptrace(struct task_struct *child, long request,
563 + fregs = get_fpu_regs(child);
564 +
565 + #ifdef CONFIG_32BIT
566 +- if (test_thread_flag(TIF_32BIT_FPREGS)) {
567 ++ if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
568 + /*
569 + * The odd registers are actually the high
570 + * order bits of the values stored in the even
571 +@@ -905,7 +919,7 @@ long arch_ptrace(struct task_struct *child, long request,
572 +
573 + init_fp_ctx(child);
574 + #ifdef CONFIG_32BIT
575 +- if (test_thread_flag(TIF_32BIT_FPREGS)) {
576 ++ if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
577 + /*
578 + * The odd registers are actually the high
579 + * order bits of the values stored in the even
580 +diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
581 +index 283b5a1967d1..b4b7e02443e7 100644
582 +--- a/arch/mips/kernel/ptrace32.c
583 ++++ b/arch/mips/kernel/ptrace32.c
584 +@@ -97,7 +97,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
585 + break;
586 + }
587 + fregs = get_fpu_regs(child);
588 +- if (test_thread_flag(TIF_32BIT_FPREGS)) {
589 ++ if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
590 + /*
591 + * The odd registers are actually the high
592 + * order bits of the values stored in the even
593 +@@ -203,7 +203,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
594 + sizeof(child->thread.fpu));
595 + child->thread.fpu.fcr31 = 0;
596 + }
597 +- if (test_thread_flag(TIF_32BIT_FPREGS)) {
598 ++ if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
599 + /*
600 + * The odd registers are actually the high
601 + * order bits of the values stored in the even
602 +diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
603 +index a017b23ee4aa..8a95c3d76a9a 100644
604 +--- a/arch/mips/kvm/mips.c
605 ++++ b/arch/mips/kvm/mips.c
606 +@@ -40,7 +40,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
607 + { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU },
608 + { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU },
609 + { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU },
610 +- { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
611 ++ { "cop_unusable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
612 + { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU },
613 + { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU },
614 + { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU },
615 +diff --git a/arch/mips/txx9/rbtx4939/setup.c b/arch/mips/txx9/rbtx4939/setup.c
616 +index 37030409745c..586ca7ea3e7c 100644
617 +--- a/arch/mips/txx9/rbtx4939/setup.c
618 ++++ b/arch/mips/txx9/rbtx4939/setup.c
619 +@@ -186,7 +186,7 @@ static void __init rbtx4939_update_ioc_pen(void)
620 +
621 + #define RBTX4939_MAX_7SEGLEDS 8
622 +
623 +-#if IS_ENABLED(CONFIG_LEDS_CLASS)
624 ++#if IS_BUILTIN(CONFIG_LEDS_CLASS)
625 + static u8 led_val[RBTX4939_MAX_7SEGLEDS];
626 + struct rbtx4939_led_data {
627 + struct led_classdev cdev;
628 +@@ -261,7 +261,7 @@ static inline void rbtx4939_led_setup(void)
629 +
630 + static void __rbtx4939_7segled_putc(unsigned int pos, unsigned char val)
631 + {
632 +-#if IS_ENABLED(CONFIG_LEDS_CLASS)
633 ++#if IS_BUILTIN(CONFIG_LEDS_CLASS)
634 + unsigned long flags;
635 + local_irq_save(flags);
636 + /* bit7: reserved for LED class */
637 +diff --git a/arch/powerpc/include/asm/irq_work.h b/arch/powerpc/include/asm/irq_work.h
638 +index 744fd54de374..1bcc84903930 100644
639 +--- a/arch/powerpc/include/asm/irq_work.h
640 ++++ b/arch/powerpc/include/asm/irq_work.h
641 +@@ -5,5 +5,6 @@ static inline bool arch_irq_work_has_interrupt(void)
642 + {
643 + return true;
644 + }
645 ++extern void arch_irq_work_raise(void);
646 +
647 + #endif /* _ASM_POWERPC_IRQ_WORK_H */
648 +diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
649 +index 428563b195c3..767ac1572c02 100644
650 +--- a/arch/powerpc/kvm/book3s_hv.c
651 ++++ b/arch/powerpc/kvm/book3s_hv.c
652 +@@ -3002,15 +3002,17 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
653 + goto up_out;
654 +
655 + psize = vma_kernel_pagesize(vma);
656 +- porder = __ilog2(psize);
657 +
658 + up_read(&current->mm->mmap_sem);
659 +
660 + /* We can handle 4k, 64k or 16M pages in the VRMA */
661 +- err = -EINVAL;
662 +- if (!(psize == 0x1000 || psize == 0x10000 ||
663 +- psize == 0x1000000))
664 +- goto out_srcu;
665 ++ if (psize >= 0x1000000)
666 ++ psize = 0x1000000;
667 ++ else if (psize >= 0x10000)
668 ++ psize = 0x10000;
669 ++ else
670 ++ psize = 0x1000;
671 ++ porder = __ilog2(psize);
672 +
673 + /* Update VRMASD field in the LPCR */
674 + senc = slb_pgsize_encoding(psize);
675 +diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
676 +index 669a15e7fa76..3c4faa4c2742 100644
677 +--- a/arch/powerpc/mm/numa.c
678 ++++ b/arch/powerpc/mm/numa.c
679 +@@ -551,7 +551,7 @@ static int numa_setup_cpu(unsigned long lcpu)
680 + nid = of_node_to_nid_single(cpu);
681 +
682 + out_present:
683 +- if (nid < 0 || !node_online(nid))
684 ++ if (nid < 0 || !node_possible(nid))
685 + nid = first_online_node;
686 +
687 + map_cpu_to_node(lcpu, nid);
688 +@@ -951,6 +951,32 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
689 + NODE_DATA(nid)->node_spanned_pages = spanned_pages;
690 + }
691 +
692 ++static void __init find_possible_nodes(void)
693 ++{
694 ++ struct device_node *rtas;
695 ++ u32 numnodes, i;
696 ++
697 ++ if (min_common_depth <= 0)
698 ++ return;
699 ++
700 ++ rtas = of_find_node_by_path("/rtas");
701 ++ if (!rtas)
702 ++ return;
703 ++
704 ++ if (of_property_read_u32_index(rtas,
705 ++ "ibm,max-associativity-domains",
706 ++ min_common_depth, &numnodes))
707 ++ goto out;
708 ++
709 ++ for (i = 0; i < numnodes; i++) {
710 ++ if (!node_possible(i))
711 ++ node_set(i, node_possible_map);
712 ++ }
713 ++
714 ++out:
715 ++ of_node_put(rtas);
716 ++}
717 ++
718 + void __init initmem_init(void)
719 + {
720 + int nid, cpu;
721 +@@ -966,12 +992,15 @@ void __init initmem_init(void)
722 + memblock_dump_all();
723 +
724 + /*
725 +- * Reduce the possible NUMA nodes to the online NUMA nodes,
726 +- * since we do not support node hotplug. This ensures that we
727 +- * lower the maximum NUMA node ID to what is actually present.
728 ++ * Modify the set of possible NUMA nodes to reflect information
729 ++ * available about the set of online nodes, and the set of nodes
730 ++ * that we expect to make use of for this platform's affinity
731 ++ * calculations.
732 + */
733 + nodes_and(node_possible_map, node_possible_map, node_online_map);
734 +
735 ++ find_possible_nodes();
736 ++
737 + for_each_online_node(nid) {
738 + unsigned long start_pfn, end_pfn;
739 +
740 +@@ -1304,6 +1333,40 @@ static long vphn_get_associativity(unsigned long cpu,
741 + return rc;
742 + }
743 +
744 ++static inline int find_and_online_cpu_nid(int cpu)
745 ++{
746 ++ __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
747 ++ int new_nid;
748 ++
749 ++ /* Use associativity from first thread for all siblings */
750 ++ vphn_get_associativity(cpu, associativity);
751 ++ new_nid = associativity_to_nid(associativity);
752 ++ if (new_nid < 0 || !node_possible(new_nid))
753 ++ new_nid = first_online_node;
754 ++
755 ++ if (NODE_DATA(new_nid) == NULL) {
756 ++#ifdef CONFIG_MEMORY_HOTPLUG
757 ++ /*
758 ++ * Need to ensure that NODE_DATA is initialized for a node from
759 ++ * available memory (see memblock_alloc_try_nid). If unable to
760 ++ * init the node, then default to nearest node that has memory
761 ++ * installed.
762 ++ */
763 ++ if (try_online_node(new_nid))
764 ++ new_nid = first_online_node;
765 ++#else
766 ++ /*
767 ++ * Default to using the nearest node that has memory installed.
768 ++ * Otherwise, it would be necessary to patch the kernel MM code
769 ++ * to deal with more memoryless-node error conditions.
770 ++ */
771 ++ new_nid = first_online_node;
772 ++#endif
773 ++ }
774 ++
775 ++ return new_nid;
776 ++}
777 ++
778 + /*
779 + * Update the CPU maps and sysfs entries for a single CPU when its NUMA
780 + * characteristics change. This function doesn't perform any locking and is
781 +@@ -1369,7 +1432,6 @@ int arch_update_cpu_topology(void)
782 + {
783 + unsigned int cpu, sibling, changed = 0;
784 + struct topology_update_data *updates, *ud;
785 +- __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
786 + cpumask_t updated_cpus;
787 + struct device *dev;
788 + int weight, new_nid, i = 0;
789 +@@ -1404,11 +1466,7 @@ int arch_update_cpu_topology(void)
790 + continue;
791 + }
792 +
793 +- /* Use associativity from first thread for all siblings */
794 +- vphn_get_associativity(cpu, associativity);
795 +- new_nid = associativity_to_nid(associativity);
796 +- if (new_nid < 0 || !node_online(new_nid))
797 +- new_nid = first_online_node;
798 ++ new_nid = find_and_online_cpu_nid(cpu);
799 +
800 + if (new_nid == numa_cpu_lookup_table[cpu]) {
801 + cpumask_andnot(&cpu_associativity_changes_mask,
802 +diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
803 +index 2d66a8446198..345e255c06a2 100644
804 +--- a/arch/powerpc/net/bpf_jit_comp.c
805 ++++ b/arch/powerpc/net/bpf_jit_comp.c
806 +@@ -329,6 +329,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
807 + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
808 + PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
809 + break;
810 ++ case BPF_LDX | BPF_W | BPF_ABS: /* A = *((u32 *)(seccomp_data + K)); */
811 ++ PPC_LWZ_OFFS(r_A, r_skb, K);
812 ++ break;
813 + case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
814 + PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
815 + break;
816 +diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
817 +index 4eba7c00ea1f..30e2e8efbe6b 100644
818 +--- a/arch/powerpc/perf/core-book3s.c
819 ++++ b/arch/powerpc/perf/core-book3s.c
820 +@@ -448,6 +448,16 @@ static void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
821 + /* invalid entry */
822 + continue;
823 +
824 ++ /*
825 ++ * BHRB rolling buffer could very much contain the kernel
826 ++ * addresses at this point. Check the privileges before
827 ++ * exporting it to userspace (avoid exposure of regions
828 ++ * where we could have speculative execution)
829 ++ */
830 ++ if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN) &&
831 ++ is_kernel_addr(addr))
832 ++ continue;
833 ++
834 + /* Branches are read most recent first (ie. mfbhrb 0 is
835 + * the most recent branch).
836 + * There are two types of valid entries:
837 +@@ -1188,6 +1198,7 @@ static void power_pmu_disable(struct pmu *pmu)
838 + */
839 + write_mmcr0(cpuhw, val);
840 + mb();
841 ++ isync();
842 +
843 + /*
844 + * Disable instruction sampling if it was enabled
845 +@@ -1196,12 +1207,26 @@ static void power_pmu_disable(struct pmu *pmu)
846 + mtspr(SPRN_MMCRA,
847 + cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
848 + mb();
849 ++ isync();
850 + }
851 +
852 + cpuhw->disabled = 1;
853 + cpuhw->n_added = 0;
854 +
855 + ebb_switch_out(mmcr0);
856 ++
857 ++#ifdef CONFIG_PPC64
858 ++ /*
859 ++ * These are readable by userspace, may contain kernel
860 ++ * addresses and are not switched by context switch, so clear
861 ++ * them now to avoid leaking anything to userspace in general
862 ++ * including to another process.
863 ++ */
864 ++ if (ppmu->flags & PPMU_ARCH_207S) {
865 ++ mtspr(SPRN_SDAR, 0);
866 ++ mtspr(SPRN_SIAR, 0);
867 ++ }
868 ++#endif
869 + }
870 +
871 + local_irq_restore(flags);
872 +diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
873 +index 2a0452e364ba..d11f931cac69 100644
874 +--- a/arch/powerpc/sysdev/mpic.c
875 ++++ b/arch/powerpc/sysdev/mpic.c
876 +@@ -626,7 +626,7 @@ static inline u32 mpic_physmask(u32 cpumask)
877 + int i;
878 + u32 mask = 0;
879 +
880 +- for (i = 0; i < min(32, NR_CPUS); ++i, cpumask >>= 1)
881 ++ for (i = 0; i < min(32, NR_CPUS) && cpu_possible(i); ++i, cpumask >>= 1)
882 + mask |= (cpumask & 1) << get_hard_smp_processor_id(i);
883 + return mask;
884 + }
885 +diff --git a/arch/s390/include/asm/nospec-insn.h b/arch/s390/include/asm/nospec-insn.h
886 +index 087fc9b972c5..9a56e738d645 100644
887 +--- a/arch/s390/include/asm/nospec-insn.h
888 ++++ b/arch/s390/include/asm/nospec-insn.h
889 +@@ -2,10 +2,15 @@
890 + #ifndef _ASM_S390_NOSPEC_ASM_H
891 + #define _ASM_S390_NOSPEC_ASM_H
892 +
893 ++#include <asm/alternative-asm.h>
894 ++#include <asm/asm-offsets.h>
895 ++
896 + #ifdef __ASSEMBLY__
897 +
898 + #ifdef CONFIG_EXPOLINE
899 +
900 ++_LC_BR_R1 = __LC_BR_R1
901 ++
902 + /*
903 + * The expoline macros are used to create thunks in the same format
904 + * as gcc generates them. The 'comdat' section flag makes sure that
905 +@@ -101,13 +106,21 @@
906 + .endm
907 +
908 + .macro __THUNK_EX_BR reg,ruse
909 ++ # Be very careful when adding instructions to this macro!
910 ++ # The ALTERNATIVE replacement code has a .+10 which targets
911 ++ # the "br \reg" after the code has been patched.
912 + #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
913 + exrl 0,555f
914 + j .
915 + #else
916 ++ .ifc \reg,%r1
917 ++ ALTERNATIVE "ex %r0,_LC_BR_R1", ".insn ril,0xc60000000000,0,.+10", 35
918 ++ j .
919 ++ .else
920 + larl \ruse,555f
921 + ex 0,0(\ruse)
922 + j .
923 ++ .endif
924 + #endif
925 + 555: br \reg
926 + .endm
927 +diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
928 +index dc6c9c604543..39572281e213 100644
929 +--- a/arch/s390/kernel/asm-offsets.c
930 ++++ b/arch/s390/kernel/asm-offsets.c
931 +@@ -170,6 +170,7 @@ int main(void)
932 + OFFSET(__LC_MACHINE_FLAGS, _lowcore, machine_flags);
933 + OFFSET(__LC_GMAP, _lowcore, gmap);
934 + OFFSET(__LC_PASTE, _lowcore, paste);
935 ++ OFFSET(__LC_BR_R1, _lowcore, br_r1_trampoline);
936 + /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
937 + OFFSET(__LC_DUMP_REIPL, _lowcore, ipib);
938 + /* hardware defined lowcore locations 0x1000 - 0x18ff */
939 +diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
940 +index e499370fbccb..6c1c7d399bf9 100644
941 +--- a/arch/s390/kernel/mcount.S
942 ++++ b/arch/s390/kernel/mcount.S
943 +@@ -8,12 +8,16 @@
944 + #include <linux/linkage.h>
945 + #include <asm/asm-offsets.h>
946 + #include <asm/ftrace.h>
947 ++#include <asm/nospec-insn.h>
948 + #include <asm/ptrace.h>
949 +
950 ++ GEN_BR_THUNK %r1
951 ++ GEN_BR_THUNK %r14
952 ++
953 + .section .kprobes.text, "ax"
954 +
955 + ENTRY(ftrace_stub)
956 +- br %r14
957 ++ BR_EX %r14
958 +
959 + #define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE)
960 + #define STACK_PTREGS (STACK_FRAME_OVERHEAD)
961 +@@ -21,7 +25,7 @@ ENTRY(ftrace_stub)
962 + #define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
963 +
964 + ENTRY(_mcount)
965 +- br %r14
966 ++ BR_EX %r14
967 +
968 + ENTRY(ftrace_caller)
969 + .globl ftrace_regs_caller
970 +@@ -49,7 +53,7 @@ ENTRY(ftrace_caller)
971 + #endif
972 + lgr %r3,%r14
973 + la %r5,STACK_PTREGS(%r15)
974 +- basr %r14,%r1
975 ++ BASR_EX %r14,%r1
976 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER
977 + # The j instruction gets runtime patched to a nop instruction.
978 + # See ftrace_enable_ftrace_graph_caller.
979 +@@ -64,7 +68,7 @@ ftrace_graph_caller_end:
980 + #endif
981 + lg %r1,(STACK_PTREGS_PSW+8)(%r15)
982 + lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15)
983 +- br %r1
984 ++ BR_EX %r1
985 +
986 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER
987 +
988 +@@ -77,6 +81,6 @@ ENTRY(return_to_handler)
989 + aghi %r15,STACK_FRAME_OVERHEAD
990 + lgr %r14,%r2
991 + lmg %r2,%r5,32(%r15)
992 +- br %r14
993 ++ BR_EX %r14
994 +
995 + #endif
996 +diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
997 +index 13047a4facd2..5a9017ba26ab 100644
998 +--- a/arch/sh/kernel/entry-common.S
999 ++++ b/arch/sh/kernel/entry-common.S
1000 +@@ -255,7 +255,7 @@ debug_trap:
1001 + mov.l @r8, r8
1002 + jsr @r8
1003 + nop
1004 +- bra __restore_all
1005 ++ bra ret_from_exception
1006 + nop
1007 + CFI_ENDPROC
1008 +
1009 +diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
1010 +index f2fbf9e16faf..29070c9a70f9 100644
1011 +--- a/arch/sparc/include/asm/atomic_64.h
1012 ++++ b/arch/sparc/include/asm/atomic_64.h
1013 +@@ -74,7 +74,11 @@ ATOMIC_OP(xor)
1014 + #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
1015 +
1016 + #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
1017 +-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1018 ++
1019 ++static inline int atomic_xchg(atomic_t *v, int new)
1020 ++{
1021 ++ return xchg(&v->counter, new);
1022 ++}
1023 +
1024 + static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1025 + {
1026 +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
1027 +index a3e1f8497f8c..deddc9b93299 100644
1028 +--- a/arch/x86/kernel/apic/apic.c
1029 ++++ b/arch/x86/kernel/apic/apic.c
1030 +@@ -1368,7 +1368,7 @@ void setup_local_APIC(void)
1031 + * TODO: set up through-local-APIC from through-I/O-APIC? --macro
1032 + */
1033 + value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
1034 +- if (!cpu && (pic_mode || !value)) {
1035 ++ if (!cpu && (pic_mode || !value || skip_ioapic_setup)) {
1036 + value = APIC_DM_EXTINT;
1037 + apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", cpu);
1038 + } else {
1039 +diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
1040 +index 1f4acd68b98b..74b8dcd1bbdc 100644
1041 +--- a/arch/x86/kernel/devicetree.c
1042 ++++ b/arch/x86/kernel/devicetree.c
1043 +@@ -11,6 +11,7 @@
1044 + #include <linux/of_address.h>
1045 + #include <linux/of_platform.h>
1046 + #include <linux/of_irq.h>
1047 ++#include <linux/libfdt.h>
1048 + #include <linux/slab.h>
1049 + #include <linux/pci.h>
1050 + #include <linux/of_pci.h>
1051 +@@ -199,19 +200,22 @@ static struct of_ioapic_type of_ioapic_type[] =
1052 + static int dt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
1053 + unsigned int nr_irqs, void *arg)
1054 + {
1055 +- struct of_phandle_args *irq_data = (void *)arg;
1056 ++ struct irq_fwspec *fwspec = (struct irq_fwspec *)arg;
1057 + struct of_ioapic_type *it;
1058 + struct irq_alloc_info tmp;
1059 ++ int type_index;
1060 +
1061 +- if (WARN_ON(irq_data->args_count < 2))
1062 ++ if (WARN_ON(fwspec->param_count < 2))
1063 + return -EINVAL;
1064 +- if (irq_data->args[1] >= ARRAY_SIZE(of_ioapic_type))
1065 ++
1066 ++ type_index = fwspec->param[1];
1067 ++ if (type_index >= ARRAY_SIZE(of_ioapic_type))
1068 + return -EINVAL;
1069 +
1070 +- it = &of_ioapic_type[irq_data->args[1]];
1071 ++ it = &of_ioapic_type[type_index];
1072 + ioapic_set_alloc_attr(&tmp, NUMA_NO_NODE, it->trigger, it->polarity);
1073 + tmp.ioapic_id = mpc_ioapic_id(mp_irqdomain_ioapic_idx(domain));
1074 +- tmp.ioapic_pin = irq_data->args[0];
1075 ++ tmp.ioapic_pin = fwspec->param[0];
1076 +
1077 + return mp_irqdomain_alloc(domain, virq, nr_irqs, &tmp);
1078 + }
1079 +@@ -276,14 +280,15 @@ static void __init x86_flattree_get_config(void)
1080 +
1081 + map_len = max(PAGE_SIZE - (initial_dtb & ~PAGE_MASK), (u64)128);
1082 +
1083 +- initial_boot_params = dt = early_memremap(initial_dtb, map_len);
1084 +- size = of_get_flat_dt_size();
1085 ++ dt = early_memremap(initial_dtb, map_len);
1086 ++ size = fdt_totalsize(dt);
1087 + if (map_len < size) {
1088 + early_memunmap(dt, map_len);
1089 +- initial_boot_params = dt = early_memremap(initial_dtb, size);
1090 ++ dt = early_memremap(initial_dtb, size);
1091 + map_len = size;
1092 + }
1093 +
1094 ++ early_init_dt_verify(dt);
1095 + unflatten_and_copy_device_tree();
1096 + early_memunmap(dt, map_len);
1097 + }
1098 +diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
1099 +index 00c7878043ef..48ca93242bfd 100644
1100 +--- a/arch/x86/kernel/smpboot.c
1101 ++++ b/arch/x86/kernel/smpboot.c
1102 +@@ -1344,6 +1344,7 @@ static void remove_siblinginfo(int cpu)
1103 + cpumask_clear(topology_core_cpumask(cpu));
1104 + c->phys_proc_id = 0;
1105 + c->cpu_core_id = 0;
1106 ++ c->booted_cores = 0;
1107 + cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
1108 + }
1109 +
1110 +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
1111 +index 1c96f09367ae..a1afd80a68aa 100644
1112 +--- a/arch/x86/kvm/lapic.c
1113 ++++ b/arch/x86/kvm/lapic.c
1114 +@@ -288,8 +288,16 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu)
1115 + if (!kvm_vcpu_has_lapic(vcpu))
1116 + return;
1117 +
1118 ++ /*
1119 ++ * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
1120 ++ * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
1121 ++ * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
1122 ++ * version first and level-triggered interrupts never get EOIed in
1123 ++ * IOAPIC.
1124 ++ */
1125 + feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
1126 +- if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))))
1127 ++ if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))) &&
1128 ++ !ioapic_in_kernel(vcpu->kvm))
1129 + v |= APIC_LVR_DIRECTED_EOI;
1130 + apic_set_reg(apic, APIC_LVR, v);
1131 + }
1132 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1133 +index 528b4352fa99..a750fc7c7458 100644
1134 +--- a/arch/x86/kvm/vmx.c
1135 ++++ b/arch/x86/kvm/vmx.c
1136 +@@ -2319,6 +2319,8 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
1137 + return;
1138 + }
1139 +
1140 ++ WARN_ON_ONCE(vmx->emulation_required);
1141 ++
1142 + if (kvm_exception_is_soft(nr)) {
1143 + vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
1144 + vmx->vcpu.arch.event_exit_inst_len);
1145 +@@ -6037,12 +6039,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
1146 + goto out;
1147 + }
1148 +
1149 +- if (err != EMULATE_DONE) {
1150 +- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1151 +- vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1152 +- vcpu->run->internal.ndata = 0;
1153 +- return 0;
1154 +- }
1155 ++ if (err != EMULATE_DONE)
1156 ++ goto emulation_error;
1157 ++
1158 ++ if (vmx->emulation_required && !vmx->rmode.vm86_active &&
1159 ++ vcpu->arch.exception.pending)
1160 ++ goto emulation_error;
1161 +
1162 + if (vcpu->arch.halt_request) {
1163 + vcpu->arch.halt_request = 0;
1164 +@@ -6058,6 +6060,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
1165 +
1166 + out:
1167 + return ret;
1168 ++
1169 ++emulation_error:
1170 ++ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1171 ++ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1172 ++ vcpu->run->internal.ndata = 0;
1173 ++ return 0;
1174 + }
1175 +
1176 + static int __grow_ple_window(int val)
1177 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1178 +index f37f0c72b22a..9cea09597d66 100644
1179 +--- a/arch/x86/kvm/x86.c
1180 ++++ b/arch/x86/kvm/x86.c
1181 +@@ -3973,13 +3973,14 @@ long kvm_arch_vm_ioctl(struct file *filp,
1182 + mutex_unlock(&kvm->lock);
1183 + break;
1184 + case KVM_XEN_HVM_CONFIG: {
1185 ++ struct kvm_xen_hvm_config xhc;
1186 + r = -EFAULT;
1187 +- if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
1188 +- sizeof(struct kvm_xen_hvm_config)))
1189 ++ if (copy_from_user(&xhc, argp, sizeof(xhc)))
1190 + goto out;
1191 + r = -EINVAL;
1192 +- if (kvm->arch.xen_hvm_config.flags)
1193 ++ if (xhc.flags)
1194 + goto out;
1195 ++ memcpy(&kvm->arch.xen_hvm_config, &xhc, sizeof(xhc));
1196 + r = 0;
1197 + break;
1198 + }
1199 +diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
1200 +index c013326a0d7a..08e94b6139ab 100644
1201 +--- a/arch/x86/mm/pgtable.c
1202 ++++ b/arch/x86/mm/pgtable.c
1203 +@@ -1,5 +1,6 @@
1204 + #include <linux/mm.h>
1205 + #include <linux/gfp.h>
1206 ++#include <linux/hugetlb.h>
1207 + #include <asm/pgalloc.h>
1208 + #include <asm/pgtable.h>
1209 + #include <asm/tlb.h>
1210 +@@ -600,6 +601,10 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
1211 + (mtrr != MTRR_TYPE_WRBACK))
1212 + return 0;
1213 +
1214 ++ /* Bail out if we are we on a populated non-leaf entry: */
1215 ++ if (pud_present(*pud) && !pud_huge(*pud))
1216 ++ return 0;
1217 ++
1218 + prot = pgprot_4k_2_large(prot);
1219 +
1220 + set_pte((pte_t *)pud, pfn_pte(
1221 +@@ -628,6 +633,10 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
1222 + return 0;
1223 + }
1224 +
1225 ++ /* Bail out if we are we on a populated non-leaf entry: */
1226 ++ if (pmd_present(*pmd) && !pmd_huge(*pmd))
1227 ++ return 0;
1228 ++
1229 + prot = pgprot_4k_2_large(prot);
1230 +
1231 + set_pte((pte_t *)pmd, pfn_pte(
1232 +diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c
1233 +index 291226b952a9..77ac4e4deb16 100644
1234 +--- a/arch/x86/power/hibernate_32.c
1235 ++++ b/arch/x86/power/hibernate_32.c
1236 +@@ -142,7 +142,7 @@ static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
1237 + #endif
1238 + }
1239 +
1240 +-int swsusp_arch_resume(void)
1241 ++asmlinkage int swsusp_arch_resume(void)
1242 + {
1243 + int error;
1244 +
1245 +diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
1246 +index 009947d419a6..0e0c773edffc 100644
1247 +--- a/arch/x86/power/hibernate_64.c
1248 ++++ b/arch/x86/power/hibernate_64.c
1249 +@@ -78,7 +78,7 @@ static int set_up_temporary_mappings(void)
1250 + return 0;
1251 + }
1252 +
1253 +-int swsusp_arch_resume(void)
1254 ++asmlinkage int swsusp_arch_resume(void)
1255 + {
1256 + int error;
1257 +
1258 +diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
1259 +index 8ea8211b2d58..f8bb0e4d035a 100644
1260 +--- a/drivers/acpi/acpi_pad.c
1261 ++++ b/drivers/acpi/acpi_pad.c
1262 +@@ -108,6 +108,7 @@ static void round_robin_cpu(unsigned int tsk_index)
1263 + cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
1264 + if (cpumask_empty(tmp)) {
1265 + mutex_unlock(&round_robin_lock);
1266 ++ free_cpumask_var(tmp);
1267 + return;
1268 + }
1269 + for_each_cpu(cpu, tmp) {
1270 +@@ -125,6 +126,8 @@ static void round_robin_cpu(unsigned int tsk_index)
1271 + mutex_unlock(&round_robin_lock);
1272 +
1273 + set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
1274 ++
1275 ++ free_cpumask_var(tmp);
1276 + }
1277 +
1278 + static void exit_round_robin(unsigned int tsk_index)
1279 +diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
1280 +index bf6873f95e72..0b5eedb60d04 100644
1281 +--- a/drivers/acpi/acpica/evevent.c
1282 ++++ b/drivers/acpi/acpica/evevent.c
1283 +@@ -204,6 +204,7 @@ u32 acpi_ev_fixed_event_detect(void)
1284 + u32 fixed_status;
1285 + u32 fixed_enable;
1286 + u32 i;
1287 ++ acpi_status status;
1288 +
1289 + ACPI_FUNCTION_NAME(ev_fixed_event_detect);
1290 +
1291 +@@ -211,8 +212,12 @@ u32 acpi_ev_fixed_event_detect(void)
1292 + * Read the fixed feature status and enable registers, as all the cases
1293 + * depend on their values. Ignore errors here.
1294 + */
1295 +- (void)acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status);
1296 +- (void)acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable);
1297 ++ status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status);
1298 ++ status |=
1299 ++ acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable);
1300 ++ if (ACPI_FAILURE(status)) {
1301 ++ return (int_status);
1302 ++ }
1303 +
1304 + ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
1305 + "Fixed Event Block: Enable %08X Status %08X\n",
1306 +diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
1307 +index 7eba578d36f3..10262cae8a19 100644
1308 +--- a/drivers/acpi/acpica/nseval.c
1309 ++++ b/drivers/acpi/acpica/nseval.c
1310 +@@ -308,6 +308,14 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
1311 + /* Map AE_CTRL_RETURN_VALUE to AE_OK, we are done with it */
1312 +
1313 + status = AE_OK;
1314 ++ } else if (ACPI_FAILURE(status)) {
1315 ++
1316 ++ /* If return_object exists, delete it */
1317 ++
1318 ++ if (info->return_object) {
1319 ++ acpi_ut_remove_reference(info->return_object);
1320 ++ info->return_object = NULL;
1321 ++ }
1322 + }
1323 +
1324 + ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
1325 +diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
1326 +index bb01dea39fdc..9825780a1cd2 100644
1327 +--- a/drivers/acpi/processor_perflib.c
1328 ++++ b/drivers/acpi/processor_perflib.c
1329 +@@ -161,7 +161,7 @@ int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
1330 + {
1331 + int ret;
1332 +
1333 +- if (ignore_ppc) {
1334 ++ if (ignore_ppc || !pr->performance) {
1335 + /*
1336 + * Only when it is notification event, the _OST object
1337 + * will be evaluated. Otherwise it is skipped.
1338 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
1339 +index 60d6db82ce5a..f9b86a1d922d 100644
1340 +--- a/drivers/ata/libata-core.c
1341 ++++ b/drivers/ata/libata-core.c
1342 +@@ -4187,6 +4187,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
1343 + /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
1344 + { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
1345 +
1346 ++ /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on
1347 ++ SD7SN6S256G and SD8SN8U256G */
1348 ++ { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, },
1349 ++
1350 + /* devices which puke on READ_NATIVE_MAX */
1351 + { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
1352 + { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
1353 +@@ -4247,6 +4251,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
1354 + { "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, },
1355 +
1356 + /* devices that don't properly handle queued TRIM commands */
1357 ++ { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
1358 ++ ATA_HORKAGE_ZERO_AFTER_TRIM, },
1359 + { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
1360 + ATA_HORKAGE_ZERO_AFTER_TRIM, },
1361 + { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
1362 +diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
1363 +index 93362362aa55..8474a1b0740f 100644
1364 +--- a/drivers/block/paride/pcd.c
1365 ++++ b/drivers/block/paride/pcd.c
1366 +@@ -230,6 +230,8 @@ static int pcd_block_open(struct block_device *bdev, fmode_t mode)
1367 + struct pcd_unit *cd = bdev->bd_disk->private_data;
1368 + int ret;
1369 +
1370 ++ check_disk_change(bdev);
1371 ++
1372 + mutex_lock(&pcd_mutex);
1373 + ret = cdrom_open(&cd->info, bdev, mode);
1374 + mutex_unlock(&pcd_mutex);
1375 +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
1376 +index 54cef3dc0beb..91676535a1a3 100644
1377 +--- a/drivers/bluetooth/btusb.c
1378 ++++ b/drivers/bluetooth/btusb.c
1379 +@@ -336,6 +336,9 @@ static const struct usb_device_id blacklist_table[] = {
1380 + { USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK },
1381 + { USB_DEVICE(0x13d3, 0x3494), .driver_info = BTUSB_REALTEK },
1382 +
1383 ++ /* Additional Realtek 8723BU Bluetooth devices */
1384 ++ { USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK },
1385 ++
1386 + /* Additional Realtek 8821AE Bluetooth devices */
1387 + { USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
1388 + { USB_DEVICE(0x13d3, 0x3414), .driver_info = BTUSB_REALTEK },
1389 +@@ -343,6 +346,9 @@ static const struct usb_device_id blacklist_table[] = {
1390 + { USB_DEVICE(0x13d3, 0x3461), .driver_info = BTUSB_REALTEK },
1391 + { USB_DEVICE(0x13d3, 0x3462), .driver_info = BTUSB_REALTEK },
1392 +
1393 ++ /* Additional Realtek 8822BE Bluetooth devices */
1394 ++ { USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK },
1395 ++
1396 + /* Silicon Wave based devices */
1397 + { USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
1398 +
1399 +diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
1400 +index b5f245d2875c..0151039bff05 100644
1401 +--- a/drivers/cdrom/cdrom.c
1402 ++++ b/drivers/cdrom/cdrom.c
1403 +@@ -1154,9 +1154,6 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
1404 +
1405 + cd_dbg(CD_OPEN, "entering cdrom_open\n");
1406 +
1407 +- /* open is event synchronization point, check events first */
1408 +- check_disk_change(bdev);
1409 +-
1410 + /* if this was a O_NONBLOCK open and we should honor the flags,
1411 + * do a quick open without drive/disc integrity checks. */
1412 + cdi->use_count++;
1413 +diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
1414 +index 584bc3126403..e2808fefbb78 100644
1415 +--- a/drivers/cdrom/gdrom.c
1416 ++++ b/drivers/cdrom/gdrom.c
1417 +@@ -497,6 +497,9 @@ static struct cdrom_device_ops gdrom_ops = {
1418 + static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
1419 + {
1420 + int ret;
1421 ++
1422 ++ check_disk_change(bdev);
1423 ++
1424 + mutex_lock(&gdrom_mutex);
1425 + ret = cdrom_open(gd.cd_info, bdev, mode);
1426 + mutex_unlock(&gdrom_mutex);
1427 +diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
1428 +index 92a810648bd0..530aacca3eb8 100644
1429 +--- a/drivers/char/hw_random/stm32-rng.c
1430 ++++ b/drivers/char/hw_random/stm32-rng.c
1431 +@@ -21,6 +21,7 @@
1432 + #include <linux/of_address.h>
1433 + #include <linux/of_platform.h>
1434 + #include <linux/pm_runtime.h>
1435 ++#include <linux/reset.h>
1436 + #include <linux/slab.h>
1437 +
1438 + #define RNG_CR 0x00
1439 +@@ -46,6 +47,7 @@ struct stm32_rng_private {
1440 + struct hwrng rng;
1441 + void __iomem *base;
1442 + struct clk *clk;
1443 ++ struct reset_control *rst;
1444 + };
1445 +
1446 + static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
1447 +@@ -140,6 +142,13 @@ static int stm32_rng_probe(struct platform_device *ofdev)
1448 + if (IS_ERR(priv->clk))
1449 + return PTR_ERR(priv->clk);
1450 +
1451 ++ priv->rst = devm_reset_control_get(&ofdev->dev, NULL);
1452 ++ if (!IS_ERR(priv->rst)) {
1453 ++ reset_control_assert(priv->rst);
1454 ++ udelay(2);
1455 ++ reset_control_deassert(priv->rst);
1456 ++ }
1457 ++
1458 + dev_set_drvdata(dev, priv);
1459 +
1460 + priv->rng.name = dev_driver_string(dev),
1461 +diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c
1462 +index 6e658aa114f1..a70518a4fcec 100644
1463 +--- a/drivers/char/ipmi/ipmi_powernv.c
1464 ++++ b/drivers/char/ipmi/ipmi_powernv.c
1465 +@@ -251,8 +251,9 @@ static int ipmi_powernv_probe(struct platform_device *pdev)
1466 + ipmi->irq = opal_event_request(prop);
1467 + }
1468 +
1469 +- if (request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH,
1470 +- "opal-ipmi", ipmi)) {
1471 ++ rc = request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH,
1472 ++ "opal-ipmi", ipmi);
1473 ++ if (rc) {
1474 + dev_warn(dev, "Unable to request irq\n");
1475 + goto err_dispose;
1476 + }
1477 +diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
1478 +index 83c206f0fc98..d6d166fe49a3 100644
1479 +--- a/drivers/char/ipmi/ipmi_ssif.c
1480 ++++ b/drivers/char/ipmi/ipmi_ssif.c
1481 +@@ -757,7 +757,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
1482 + ssif_info->ssif_state = SSIF_NORMAL;
1483 + ipmi_ssif_unlock_cond(ssif_info, flags);
1484 + pr_warn(PFX "Error getting flags: %d %d, %x\n",
1485 +- result, len, data[2]);
1486 ++ result, len, (len >= 3) ? data[2] : 0);
1487 + } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
1488 + || data[1] != IPMI_GET_MSG_FLAGS_CMD) {
1489 + /*
1490 +@@ -779,7 +779,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
1491 + if ((result < 0) || (len < 3) || (data[2] != 0)) {
1492 + /* Error clearing flags */
1493 + pr_warn(PFX "Error clearing flags: %d %d, %x\n",
1494 +- result, len, data[2]);
1495 ++ result, len, (len >= 3) ? data[2] : 0);
1496 + } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
1497 + || data[1] != IPMI_CLEAR_MSG_FLAGS_CMD) {
1498 + pr_warn(PFX "Invalid response clearing flags: %x %x\n",
1499 +diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
1500 +index f13c3f4228d4..53c068f90b37 100644
1501 +--- a/drivers/clk/clk.c
1502 ++++ b/drivers/clk/clk.c
1503 +@@ -1905,6 +1905,9 @@ static int clk_core_get_phase(struct clk_core *core)
1504 + int ret;
1505 +
1506 + clk_prepare_lock();
1507 ++ /* Always try to update cached phase if possible */
1508 ++ if (core->ops->get_phase)
1509 ++ core->phase = core->ops->get_phase(core->hw);
1510 + ret = core->phase;
1511 + clk_prepare_unlock();
1512 +
1513 +diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
1514 +index 33c20c6b45af..b840e4ace623 100644
1515 +--- a/drivers/clk/rockchip/clk-mmc-phase.c
1516 ++++ b/drivers/clk/rockchip/clk-mmc-phase.c
1517 +@@ -60,6 +60,12 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw)
1518 + u16 degrees;
1519 + u32 delay_num = 0;
1520 +
1521 ++ /* See the comment for rockchip_mmc_set_phase below */
1522 ++ if (!rate) {
1523 ++ pr_err("%s: invalid clk rate\n", __func__);
1524 ++ return -EINVAL;
1525 ++ }
1526 ++
1527 + raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
1528 +
1529 + degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90;
1530 +@@ -86,6 +92,23 @@ static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees)
1531 + u32 raw_value;
1532 + u32 delay;
1533 +
1534 ++ /*
1535 ++ * The below calculation is based on the output clock from
1536 ++ * MMC host to the card, which expects the phase clock inherits
1537 ++ * the clock rate from its parent, namely the output clock
1538 ++ * provider of MMC host. However, things may go wrong if
1539 ++ * (1) It is orphan.
1540 ++ * (2) It is assigned to the wrong parent.
1541 ++ *
1542 ++ * This check help debug the case (1), which seems to be the
1543 ++ * most likely problem we often face and which makes it difficult
1544 ++ * for people to debug unstable mmc tuning results.
1545 ++ */
1546 ++ if (!rate) {
1547 ++ pr_err("%s: invalid clk rate\n", __func__);
1548 ++ return -EINVAL;
1549 ++ }
1550 ++
1551 + nineties = degrees / 90;
1552 + remainder = (degrees % 90);
1553 +
1554 +diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c
1555 +index fdd41b17a24f..294efaef5b82 100644
1556 +--- a/drivers/clk/samsung/clk-exynos3250.c
1557 ++++ b/drivers/clk/samsung/clk-exynos3250.c
1558 +@@ -683,7 +683,7 @@ static struct samsung_pll_rate_table exynos3250_epll_rates[] = {
1559 + PLL_36XX_RATE(144000000, 96, 2, 3, 0),
1560 + PLL_36XX_RATE( 96000000, 128, 2, 4, 0),
1561 + PLL_36XX_RATE( 84000000, 112, 2, 4, 0),
1562 +- PLL_36XX_RATE( 80000004, 106, 2, 4, 43691),
1563 ++ PLL_36XX_RATE( 80000003, 106, 2, 4, 43691),
1564 + PLL_36XX_RATE( 73728000, 98, 2, 4, 19923),
1565 + PLL_36XX_RATE( 67737598, 270, 3, 5, 62285),
1566 + PLL_36XX_RATE( 65535999, 174, 2, 5, 49982),
1567 +@@ -719,7 +719,7 @@ static struct samsung_pll_rate_table exynos3250_vpll_rates[] = {
1568 + PLL_36XX_RATE(148352005, 98, 2, 3, 59070),
1569 + PLL_36XX_RATE(108000000, 144, 2, 4, 0),
1570 + PLL_36XX_RATE( 74250000, 99, 2, 4, 0),
1571 +- PLL_36XX_RATE( 74176002, 98, 3, 4, 59070),
1572 ++ PLL_36XX_RATE( 74176002, 98, 2, 4, 59070),
1573 + PLL_36XX_RATE( 54054000, 216, 3, 5, 14156),
1574 + PLL_36XX_RATE( 54000000, 144, 2, 5, 0),
1575 + { /* sentinel */ }
1576 +diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
1577 +index 5bebf8cb0d70..f0b564c7c9c1 100644
1578 +--- a/drivers/clk/samsung/clk-exynos5250.c
1579 ++++ b/drivers/clk/samsung/clk-exynos5250.c
1580 +@@ -711,13 +711,13 @@ static struct samsung_pll_rate_table epll_24mhz_tbl[] __initdata = {
1581 + /* sorted in descending order */
1582 + /* PLL_36XX_RATE(rate, m, p, s, k) */
1583 + PLL_36XX_RATE(192000000, 64, 2, 2, 0),
1584 +- PLL_36XX_RATE(180633600, 90, 3, 2, 20762),
1585 ++ PLL_36XX_RATE(180633605, 90, 3, 2, 20762),
1586 + PLL_36XX_RATE(180000000, 90, 3, 2, 0),
1587 + PLL_36XX_RATE(73728000, 98, 2, 4, 19923),
1588 +- PLL_36XX_RATE(67737600, 90, 2, 4, 20762),
1589 ++ PLL_36XX_RATE(67737602, 90, 2, 4, 20762),
1590 + PLL_36XX_RATE(49152000, 98, 3, 4, 19923),
1591 +- PLL_36XX_RATE(45158400, 90, 3, 4, 20762),
1592 +- PLL_36XX_RATE(32768000, 131, 3, 5, 4719),
1593 ++ PLL_36XX_RATE(45158401, 90, 3, 4, 20762),
1594 ++ PLL_36XX_RATE(32768001, 131, 3, 5, 4719),
1595 + { },
1596 + };
1597 +
1598 +diff --git a/drivers/clk/samsung/clk-exynos5260.c b/drivers/clk/samsung/clk-exynos5260.c
1599 +index d1a29f6c1084..7027e77bf859 100644
1600 +--- a/drivers/clk/samsung/clk-exynos5260.c
1601 ++++ b/drivers/clk/samsung/clk-exynos5260.c
1602 +@@ -65,7 +65,7 @@ static struct samsung_pll_rate_table pll2650_24mhz_tbl[] __initdata = {
1603 + PLL_36XX_RATE(480000000, 160, 2, 2, 0),
1604 + PLL_36XX_RATE(432000000, 144, 2, 2, 0),
1605 + PLL_36XX_RATE(400000000, 200, 3, 2, 0),
1606 +- PLL_36XX_RATE(394073130, 459, 7, 2, 49282),
1607 ++ PLL_36XX_RATE(394073128, 459, 7, 2, 49282),
1608 + PLL_36XX_RATE(333000000, 111, 2, 2, 0),
1609 + PLL_36XX_RATE(300000000, 100, 2, 2, 0),
1610 + PLL_36XX_RATE(266000000, 266, 3, 3, 0),
1611 +diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
1612 +index cee062c588de..91c89ac193b9 100644
1613 +--- a/drivers/clk/samsung/clk-exynos5433.c
1614 ++++ b/drivers/clk/samsung/clk-exynos5433.c
1615 +@@ -747,7 +747,7 @@ static struct samsung_pll_rate_table exynos5443_pll_rates[] = {
1616 + PLL_35XX_RATE(800000000U, 400, 6, 1),
1617 + PLL_35XX_RATE(733000000U, 733, 12, 1),
1618 + PLL_35XX_RATE(700000000U, 175, 3, 1),
1619 +- PLL_35XX_RATE(667000000U, 222, 4, 1),
1620 ++ PLL_35XX_RATE(666000000U, 222, 4, 1),
1621 + PLL_35XX_RATE(633000000U, 211, 4, 1),
1622 + PLL_35XX_RATE(600000000U, 500, 5, 2),
1623 + PLL_35XX_RATE(552000000U, 460, 5, 2),
1624 +@@ -773,12 +773,12 @@ static struct samsung_pll_rate_table exynos5443_pll_rates[] = {
1625 + /* AUD_PLL */
1626 + static struct samsung_pll_rate_table exynos5443_aud_pll_rates[] = {
1627 + PLL_36XX_RATE(400000000U, 200, 3, 2, 0),
1628 +- PLL_36XX_RATE(393216000U, 197, 3, 2, -25690),
1629 ++ PLL_36XX_RATE(393216003U, 197, 3, 2, -25690),
1630 + PLL_36XX_RATE(384000000U, 128, 2, 2, 0),
1631 +- PLL_36XX_RATE(368640000U, 246, 4, 2, -15729),
1632 +- PLL_36XX_RATE(361507200U, 181, 3, 2, -16148),
1633 +- PLL_36XX_RATE(338688000U, 113, 2, 2, -6816),
1634 +- PLL_36XX_RATE(294912000U, 98, 1, 3, 19923),
1635 ++ PLL_36XX_RATE(368639991U, 246, 4, 2, -15729),
1636 ++ PLL_36XX_RATE(361507202U, 181, 3, 2, -16148),
1637 ++ PLL_36XX_RATE(338687988U, 113, 2, 2, -6816),
1638 ++ PLL_36XX_RATE(294912002U, 98, 1, 3, 19923),
1639 + PLL_36XX_RATE(288000000U, 96, 1, 3, 0),
1640 + PLL_36XX_RATE(252000000U, 84, 1, 3, 0),
1641 + { /* sentinel */ }
1642 +diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c
1643 +index 0945a8852299..69e3e848716a 100644
1644 +--- a/drivers/clk/samsung/clk-s3c2410.c
1645 ++++ b/drivers/clk/samsung/clk-s3c2410.c
1646 +@@ -168,7 +168,7 @@ static struct samsung_pll_rate_table pll_s3c2410_12mhz_tbl[] __initdata = {
1647 + PLL_35XX_RATE(226000000, 105, 1, 1),
1648 + PLL_35XX_RATE(210000000, 132, 2, 1),
1649 + /* 2410 common */
1650 +- PLL_35XX_RATE(203000000, 161, 3, 1),
1651 ++ PLL_35XX_RATE(202800000, 161, 3, 1),
1652 + PLL_35XX_RATE(192000000, 88, 1, 1),
1653 + PLL_35XX_RATE(186000000, 85, 1, 1),
1654 + PLL_35XX_RATE(180000000, 82, 1, 1),
1655 +@@ -178,18 +178,18 @@ static struct samsung_pll_rate_table pll_s3c2410_12mhz_tbl[] __initdata = {
1656 + PLL_35XX_RATE(147000000, 90, 2, 1),
1657 + PLL_35XX_RATE(135000000, 82, 2, 1),
1658 + PLL_35XX_RATE(124000000, 116, 1, 2),
1659 +- PLL_35XX_RATE(118000000, 150, 2, 2),
1660 ++ PLL_35XX_RATE(118500000, 150, 2, 2),
1661 + PLL_35XX_RATE(113000000, 105, 1, 2),
1662 +- PLL_35XX_RATE(101000000, 127, 2, 2),
1663 ++ PLL_35XX_RATE(101250000, 127, 2, 2),
1664 + PLL_35XX_RATE(90000000, 112, 2, 2),
1665 +- PLL_35XX_RATE(85000000, 105, 2, 2),
1666 ++ PLL_35XX_RATE(84750000, 105, 2, 2),
1667 + PLL_35XX_RATE(79000000, 71, 1, 2),
1668 +- PLL_35XX_RATE(68000000, 82, 2, 2),
1669 +- PLL_35XX_RATE(56000000, 142, 2, 3),
1670 ++ PLL_35XX_RATE(67500000, 82, 2, 2),
1671 ++ PLL_35XX_RATE(56250000, 142, 2, 3),
1672 + PLL_35XX_RATE(48000000, 120, 2, 3),
1673 +- PLL_35XX_RATE(51000000, 161, 3, 3),
1674 ++ PLL_35XX_RATE(50700000, 161, 3, 3),
1675 + PLL_35XX_RATE(45000000, 82, 1, 3),
1676 +- PLL_35XX_RATE(34000000, 82, 2, 3),
1677 ++ PLL_35XX_RATE(33750000, 82, 2, 3),
1678 + { /* sentinel */ },
1679 + };
1680 +
1681 +diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c
1682 +index 517e1c7624d4..a00209702f39 100644
1683 +--- a/drivers/clocksource/fsl_ftm_timer.c
1684 ++++ b/drivers/clocksource/fsl_ftm_timer.c
1685 +@@ -281,7 +281,7 @@ static int __init __ftm_clk_init(struct device_node *np, char *cnt_name,
1686 +
1687 + static unsigned long __init ftm_clk_init(struct device_node *np)
1688 + {
1689 +- unsigned long freq;
1690 ++ long freq;
1691 +
1692 + freq = __ftm_clk_init(np, "ftm-evt-counter-en", "ftm-evt");
1693 + if (freq <= 0)
1694 +diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
1695 +index 7c0bdfb1a2ca..0dcbf951ad1b 100644
1696 +--- a/drivers/cpufreq/cppc_cpufreq.c
1697 ++++ b/drivers/cpufreq/cppc_cpufreq.c
1698 +@@ -100,9 +100,19 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
1699 + policy->cpuinfo.max_freq = policy->max;
1700 + policy->shared_type = cpu->shared_type;
1701 +
1702 +- if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
1703 ++ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
1704 ++ int i;
1705 ++
1706 + cpumask_copy(policy->cpus, cpu->shared_cpu_map);
1707 +- else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
1708 ++
1709 ++ for_each_cpu(i, policy->cpus) {
1710 ++ if (unlikely(i == policy->cpu))
1711 ++ continue;
1712 ++
1713 ++ memcpy(&all_cpu_data[i]->perf_caps, &cpu->perf_caps,
1714 ++ sizeof(cpu->perf_caps));
1715 ++ }
1716 ++ } else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
1717 + /* Support only SW_ANY for now. */
1718 + pr_debug("Unsupported CPU co-ord type\n");
1719 + return -EFAULT;
1720 +@@ -166,8 +176,13 @@ static int __init cppc_cpufreq_init(void)
1721 + return ret;
1722 +
1723 + out:
1724 +- for_each_possible_cpu(i)
1725 +- kfree(all_cpu_data[i]);
1726 ++ for_each_possible_cpu(i) {
1727 ++ cpu = all_cpu_data[i];
1728 ++ if (!cpu)
1729 ++ break;
1730 ++ free_cpumask_var(cpu->shared_cpu_map);
1731 ++ kfree(cpu);
1732 ++ }
1733 +
1734 + kfree(all_cpu_data);
1735 + return -ENODEV;
1736 +diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
1737 +index 107cd2a41cae..24651d3217cd 100644
1738 +--- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c
1739 ++++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
1740 +@@ -422,6 +422,7 @@ static struct platform_driver sun4i_ss_driver = {
1741 +
1742 + module_platform_driver(sun4i_ss_driver);
1743 +
1744 ++MODULE_ALIAS("platform:sun4i-ss");
1745 + MODULE_DESCRIPTION("Allwinner Security System cryptographic accelerator");
1746 + MODULE_LICENSE("GPL");
1747 + MODULE_AUTHOR("Corentin LABBE <clabbe.montjoie@×××××.com>");
1748 +diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
1749 +index 66d84bcf9bbf..8db791ef2027 100644
1750 +--- a/drivers/dma/pl330.c
1751 ++++ b/drivers/dma/pl330.c
1752 +@@ -1533,7 +1533,7 @@ static void pl330_dotask(unsigned long data)
1753 + /* Returns 1 if state was updated, 0 otherwise */
1754 + static int pl330_update(struct pl330_dmac *pl330)
1755 + {
1756 +- struct dma_pl330_desc *descdone, *tmp;
1757 ++ struct dma_pl330_desc *descdone;
1758 + unsigned long flags;
1759 + void __iomem *regs;
1760 + u32 val;
1761 +@@ -1611,7 +1611,9 @@ static int pl330_update(struct pl330_dmac *pl330)
1762 + }
1763 +
1764 + /* Now that we are in no hurry, do the callbacks */
1765 +- list_for_each_entry_safe(descdone, tmp, &pl330->req_done, rqd) {
1766 ++ while (!list_empty(&pl330->req_done)) {
1767 ++ descdone = list_first_entry(&pl330->req_done,
1768 ++ struct dma_pl330_desc, rqd);
1769 + list_del(&descdone->rqd);
1770 + spin_unlock_irqrestore(&pl330->lock, flags);
1771 + dma_pl330_rqcb(descdone, PL330_ERR_NONE);
1772 +diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
1773 +index 7820d07e7bee..2b36d1c63aa5 100644
1774 +--- a/drivers/dma/sh/rcar-dmac.c
1775 ++++ b/drivers/dma/sh/rcar-dmac.c
1776 +@@ -851,7 +851,7 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
1777 +
1778 + rcar_dmac_chan_configure_desc(chan, desc);
1779 +
1780 +- max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift;
1781 ++ max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift;
1782 +
1783 + /*
1784 + * Allocate and fill the transfer chunk descriptors. We own the only
1785 +diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
1786 +index c2f5117fd8cb..5545a7f3a98f 100644
1787 +--- a/drivers/firewire/ohci.c
1788 ++++ b/drivers/firewire/ohci.c
1789 +@@ -1130,7 +1130,13 @@ static int context_add_buffer(struct context *ctx)
1790 + return -ENOMEM;
1791 +
1792 + offset = (void *)&desc->buffer - (void *)desc;
1793 +- desc->buffer_size = PAGE_SIZE - offset;
1794 ++ /*
1795 ++ * Some controllers, like JMicron ones, always issue 0x20-byte DMA reads
1796 ++ * for descriptors, even 0x10-byte ones. This can cause page faults when
1797 ++ * an IOMMU is in use and the oversized read crosses a page boundary.
1798 ++ * Work around this by always leaving at least 0x10 bytes of padding.
1799 ++ */
1800 ++ desc->buffer_size = PAGE_SIZE - offset - 0x10;
1801 + desc->buffer_bus = bus_addr + offset;
1802 + desc->used = 0;
1803 +
1804 +diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
1805 +index 0e08e665f715..053a23a7be94 100644
1806 +--- a/drivers/firmware/dmi_scan.c
1807 ++++ b/drivers/firmware/dmi_scan.c
1808 +@@ -18,7 +18,7 @@ EXPORT_SYMBOL_GPL(dmi_kobj);
1809 + * of and an antecedent to, SMBIOS, which stands for System
1810 + * Management BIOS. See further: http://www.dmtf.org/standards
1811 + */
1812 +-static const char dmi_empty_string[] = " ";
1813 ++static const char dmi_empty_string[] = "";
1814 +
1815 + static u32 dmi_ver __initdata;
1816 + static u32 dmi_len;
1817 +@@ -44,25 +44,21 @@ static int dmi_memdev_nr;
1818 + static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s)
1819 + {
1820 + const u8 *bp = ((u8 *) dm) + dm->length;
1821 ++ const u8 *nsp;
1822 +
1823 + if (s) {
1824 +- s--;
1825 +- while (s > 0 && *bp) {
1826 ++ while (--s > 0 && *bp)
1827 + bp += strlen(bp) + 1;
1828 +- s--;
1829 +- }
1830 +-
1831 +- if (*bp != 0) {
1832 +- size_t len = strlen(bp)+1;
1833 +- size_t cmp_len = len > 8 ? 8 : len;
1834 +
1835 +- if (!memcmp(bp, dmi_empty_string, cmp_len))
1836 +- return dmi_empty_string;
1837 ++ /* Strings containing only spaces are considered empty */
1838 ++ nsp = bp;
1839 ++ while (*nsp == ' ')
1840 ++ nsp++;
1841 ++ if (*nsp != '\0')
1842 + return bp;
1843 +- }
1844 + }
1845 +
1846 +- return "";
1847 ++ return dmi_empty_string;
1848 + }
1849 +
1850 + static const char * __init dmi_string(const struct dmi_header *dm, u8 s)
1851 +diff --git a/drivers/gpu/drm/exynos/regs-fimc.h b/drivers/gpu/drm/exynos/regs-fimc.h
1852 +index 30496134a3d0..d7cbe53c4c01 100644
1853 +--- a/drivers/gpu/drm/exynos/regs-fimc.h
1854 ++++ b/drivers/gpu/drm/exynos/regs-fimc.h
1855 +@@ -569,7 +569,7 @@
1856 + #define EXYNOS_CIIMGEFF_FIN_EMBOSSING (4 << 26)
1857 + #define EXYNOS_CIIMGEFF_FIN_SILHOUETTE (5 << 26)
1858 + #define EXYNOS_CIIMGEFF_FIN_MASK (7 << 26)
1859 +-#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff < 13) | (0xff < 0))
1860 ++#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff << 13) | (0xff << 0))
1861 +
1862 + /* Real input DMA size register */
1863 + #define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE (1 << 31)
1864 +diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
1865 +index d908321b94ce..e6d07680eb05 100644
1866 +--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
1867 ++++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
1868 +@@ -67,7 +67,6 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
1869 + * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
1870 + */
1871 + vma->vm_flags &= ~VM_PFNMAP;
1872 +- vma->vm_pgoff = 0;
1873 +
1874 + ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
1875 + obj->size, &rk_obj->dma_attrs);
1876 +@@ -99,6 +98,12 @@ int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1877 + if (ret)
1878 + return ret;
1879 +
1880 ++ /*
1881 ++ * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
1882 ++ * whole buffer from the start.
1883 ++ */
1884 ++ vma->vm_pgoff = 0;
1885 ++
1886 + obj = vma->vm_private_data;
1887 +
1888 + return rockchip_drm_gem_object_mmap(obj, vma);
1889 +diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
1890 +index b4de18e65db8..6296e9f270ca 100644
1891 +--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
1892 ++++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
1893 +@@ -208,6 +208,9 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
1894 + case VIRTGPU_PARAM_3D_FEATURES:
1895 + value = vgdev->has_virgl_3d == true ? 1 : 0;
1896 + break;
1897 ++ case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
1898 ++ value = 1;
1899 ++ break;
1900 + default:
1901 + return -EINVAL;
1902 + }
1903 +@@ -483,7 +486,7 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
1904 + {
1905 + struct virtio_gpu_device *vgdev = dev->dev_private;
1906 + struct drm_virtgpu_get_caps *args = data;
1907 +- int size;
1908 ++ unsigned size, host_caps_size;
1909 + int i;
1910 + int found_valid = -1;
1911 + int ret;
1912 +@@ -492,6 +495,10 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
1913 + if (vgdev->num_capsets == 0)
1914 + return -ENOSYS;
1915 +
1916 ++ /* don't allow userspace to pass 0 */
1917 ++ if (args->size == 0)
1918 ++ return -EINVAL;
1919 ++
1920 + spin_lock(&vgdev->display_info_lock);
1921 + for (i = 0; i < vgdev->num_capsets; i++) {
1922 + if (vgdev->capsets[i].id == args->cap_set_id) {
1923 +@@ -507,11 +514,9 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
1924 + return -EINVAL;
1925 + }
1926 +
1927 +- size = vgdev->capsets[found_valid].max_size;
1928 +- if (args->size > size) {
1929 +- spin_unlock(&vgdev->display_info_lock);
1930 +- return -EINVAL;
1931 +- }
1932 ++ host_caps_size = vgdev->capsets[found_valid].max_size;
1933 ++ /* only copy to user the minimum of the host caps size or the guest caps size */
1934 ++ size = min(args->size, host_caps_size);
1935 +
1936 + list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
1937 + if (cache_ent->id == args->cap_set_id &&
1938 +diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
1939 +index 966047711fbf..1073c0d1fae5 100644
1940 +--- a/drivers/hid/hid-roccat-kovaplus.c
1941 ++++ b/drivers/hid/hid-roccat-kovaplus.c
1942 +@@ -37,6 +37,8 @@ static uint kovaplus_convert_event_cpi(uint value)
1943 + static void kovaplus_profile_activated(struct kovaplus_device *kovaplus,
1944 + uint new_profile_index)
1945 + {
1946 ++ if (new_profile_index >= ARRAY_SIZE(kovaplus->profile_settings))
1947 ++ return;
1948 + kovaplus->actual_profile = new_profile_index;
1949 + kovaplus->actual_cpi = kovaplus->profile_settings[new_profile_index].cpi_startup_level;
1950 + kovaplus->actual_x_sensitivity = kovaplus->profile_settings[new_profile_index].sensitivity_x;
1951 +diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
1952 +index d7ebdf8651f5..d3c6115f16b9 100644
1953 +--- a/drivers/hwmon/nct6775.c
1954 ++++ b/drivers/hwmon/nct6775.c
1955 +@@ -1390,7 +1390,7 @@ static void nct6775_update_pwm(struct device *dev)
1956 + duty_is_dc = data->REG_PWM_MODE[i] &&
1957 + (nct6775_read_value(data, data->REG_PWM_MODE[i])
1958 + & data->PWM_MODE_MASK[i]);
1959 +- data->pwm_mode[i] = duty_is_dc;
1960 ++ data->pwm_mode[i] = !duty_is_dc;
1961 +
1962 + fanmodecfg = nct6775_read_value(data, data->REG_FAN_MODE[i]);
1963 + for (j = 0; j < ARRAY_SIZE(data->REG_PWM); j++) {
1964 +@@ -2267,7 +2267,7 @@ show_pwm_mode(struct device *dev, struct device_attribute *attr, char *buf)
1965 + struct nct6775_data *data = nct6775_update_device(dev);
1966 + struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
1967 +
1968 +- return sprintf(buf, "%d\n", !data->pwm_mode[sattr->index]);
1969 ++ return sprintf(buf, "%d\n", data->pwm_mode[sattr->index]);
1970 + }
1971 +
1972 + static ssize_t
1973 +@@ -2288,9 +2288,9 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
1974 + if (val > 1)
1975 + return -EINVAL;
1976 +
1977 +- /* Setting DC mode is not supported for all chips/channels */
1978 ++ /* Setting DC mode (0) is not supported for all chips/channels */
1979 + if (data->REG_PWM_MODE[nr] == 0) {
1980 +- if (val)
1981 ++ if (!val)
1982 + return -EINVAL;
1983 + return count;
1984 + }
1985 +@@ -2299,7 +2299,7 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
1986 + data->pwm_mode[nr] = val;
1987 + reg = nct6775_read_value(data, data->REG_PWM_MODE[nr]);
1988 + reg &= ~data->PWM_MODE_MASK[nr];
1989 +- if (val)
1990 ++ if (!val)
1991 + reg |= data->PWM_MODE_MASK[nr];
1992 + nct6775_write_value(data, data->REG_PWM_MODE[nr], reg);
1993 + mutex_unlock(&data->update_lock);
1994 +diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
1995 +index 18477dd1e243..c3f4c9ef6705 100644
1996 +--- a/drivers/hwmon/pmbus/adm1275.c
1997 ++++ b/drivers/hwmon/pmbus/adm1275.c
1998 +@@ -141,7 +141,7 @@ static int adm1275_read_word_data(struct i2c_client *client, int page, int reg)
1999 + const struct adm1275_data *data = to_adm1275_data(info);
2000 + int ret = 0;
2001 +
2002 +- if (page)
2003 ++ if (page > 0)
2004 + return -ENXIO;
2005 +
2006 + switch (reg) {
2007 +@@ -218,7 +218,7 @@ static int adm1275_write_word_data(struct i2c_client *client, int page, int reg,
2008 + const struct adm1275_data *data = to_adm1275_data(info);
2009 + int ret;
2010 +
2011 +- if (page)
2012 ++ if (page > 0)
2013 + return -ENXIO;
2014 +
2015 + switch (reg) {
2016 +diff --git a/drivers/hwmon/pmbus/max8688.c b/drivers/hwmon/pmbus/max8688.c
2017 +index dd4883a19045..e951f9b87abb 100644
2018 +--- a/drivers/hwmon/pmbus/max8688.c
2019 ++++ b/drivers/hwmon/pmbus/max8688.c
2020 +@@ -45,7 +45,7 @@ static int max8688_read_word_data(struct i2c_client *client, int page, int reg)
2021 + {
2022 + int ret;
2023 +
2024 +- if (page)
2025 ++ if (page > 0)
2026 + return -ENXIO;
2027 +
2028 + switch (reg) {
2029 +diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
2030 +index 43207f52e5a3..332d32c53c41 100644
2031 +--- a/drivers/i2c/busses/i2c-mv64xxx.c
2032 ++++ b/drivers/i2c/busses/i2c-mv64xxx.c
2033 +@@ -856,12 +856,16 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
2034 + */
2035 + if (of_device_is_compatible(np, "marvell,mv78230-i2c")) {
2036 + drv_data->offload_enabled = true;
2037 +- drv_data->errata_delay = true;
2038 ++ /* The delay is only needed in standard mode (100kHz) */
2039 ++ if (bus_freq <= 100000)
2040 ++ drv_data->errata_delay = true;
2041 + }
2042 +
2043 + if (of_device_is_compatible(np, "marvell,mv78230-a0-i2c")) {
2044 + drv_data->offload_enabled = false;
2045 +- drv_data->errata_delay = true;
2046 ++ /* The delay is only needed in standard mode (100kHz) */
2047 ++ if (bus_freq <= 100000)
2048 ++ drv_data->errata_delay = true;
2049 + }
2050 +
2051 + if (of_device_is_compatible(np, "allwinner,sun6i-a31-i2c"))
2052 +diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
2053 +index ef907fd5ba98..08a21d635d0d 100644
2054 +--- a/drivers/ide/ide-cd.c
2055 ++++ b/drivers/ide/ide-cd.c
2056 +@@ -1593,6 +1593,8 @@ static int idecd_open(struct block_device *bdev, fmode_t mode)
2057 + struct cdrom_info *info;
2058 + int rc = -ENXIO;
2059 +
2060 ++ check_disk_change(bdev);
2061 ++
2062 + mutex_lock(&ide_cd_mutex);
2063 + info = ide_cd_get(bdev->bd_disk);
2064 + if (!info)
2065 +diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
2066 +index 2b9c00faca7d..795938edce3f 100644
2067 +--- a/drivers/infiniband/core/ucma.c
2068 ++++ b/drivers/infiniband/core/ucma.c
2069 +@@ -1295,7 +1295,7 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
2070 + if (IS_ERR(ctx))
2071 + return PTR_ERR(ctx);
2072 +
2073 +- if (unlikely(cmd.optval > KMALLOC_MAX_SIZE))
2074 ++ if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
2075 + return -EINVAL;
2076 +
2077 + optval = memdup_user((void __user *) (unsigned long) cmd.optval,
2078 +diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
2079 +index c5390f6f94c5..43d277a931c2 100644
2080 +--- a/drivers/infiniband/hw/mlx5/qp.c
2081 ++++ b/drivers/infiniband/hw/mlx5/qp.c
2082 +@@ -3161,12 +3161,9 @@ int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
2083 + int err;
2084 +
2085 + err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn);
2086 +- if (err) {
2087 ++ if (err)
2088 + mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
2089 +- return err;
2090 +- }
2091 +
2092 + kfree(xrcd);
2093 +-
2094 + return 0;
2095 + }
2096 +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
2097 +index 37b42447045d..fcb18b11db75 100644
2098 +--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
2099 ++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
2100 +@@ -1953,6 +1953,9 @@ static struct net_device *ipoib_add_port(const char *format,
2101 + goto event_failed;
2102 + }
2103 +
2104 ++ /* call event handler to ensure pkey in sync */
2105 ++ queue_work(ipoib_workqueue, &priv->flush_heavy);
2106 ++
2107 + result = register_netdev(priv->dev);
2108 + if (result) {
2109 + printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
2110 +diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
2111 +index 3f1c4dea8866..9ab424b9b281 100644
2112 +--- a/drivers/irqchip/irq-gic-v3.c
2113 ++++ b/drivers/irqchip/irq-gic-v3.c
2114 +@@ -589,7 +589,7 @@ static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
2115 + MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
2116 + tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
2117 +
2118 +- pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
2119 ++ pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
2120 + gic_write_sgi1r(val);
2121 + }
2122 +
2123 +diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
2124 +index aa84fcfd59fc..16c3390e5d9f 100644
2125 +--- a/drivers/md/bcache/alloc.c
2126 ++++ b/drivers/md/bcache/alloc.c
2127 +@@ -285,8 +285,10 @@ do { \
2128 + break; \
2129 + \
2130 + mutex_unlock(&(ca)->set->bucket_lock); \
2131 +- if (kthread_should_stop()) \
2132 ++ if (kthread_should_stop()) { \
2133 ++ set_current_state(TASK_RUNNING); \
2134 + return 0; \
2135 ++ } \
2136 + \
2137 + try_to_freeze(); \
2138 + schedule(); \
2139 +diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
2140 +index 02619cabda8b..7fe7df56fa33 100644
2141 +--- a/drivers/md/bcache/bcache.h
2142 ++++ b/drivers/md/bcache/bcache.h
2143 +@@ -904,7 +904,7 @@ void bcache_write_super(struct cache_set *);
2144 +
2145 + int bch_flash_dev_create(struct cache_set *c, uint64_t size);
2146 +
2147 +-int bch_cached_dev_attach(struct cached_dev *, struct cache_set *);
2148 ++int bch_cached_dev_attach(struct cached_dev *, struct cache_set *, uint8_t *);
2149 + void bch_cached_dev_detach(struct cached_dev *);
2150 + void bch_cached_dev_run(struct cached_dev *);
2151 + void bcache_device_stop(struct bcache_device *);
2152 +diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
2153 +index a5a6909280fe..4ed621ad27e4 100644
2154 +--- a/drivers/md/bcache/btree.c
2155 ++++ b/drivers/md/bcache/btree.c
2156 +@@ -1869,14 +1869,17 @@ void bch_initial_gc_finish(struct cache_set *c)
2157 + */
2158 + for_each_cache(ca, c, i) {
2159 + for_each_bucket(b, ca) {
2160 +- if (fifo_full(&ca->free[RESERVE_PRIO]))
2161 ++ if (fifo_full(&ca->free[RESERVE_PRIO]) &&
2162 ++ fifo_full(&ca->free[RESERVE_BTREE]))
2163 + break;
2164 +
2165 + if (bch_can_invalidate_bucket(ca, b) &&
2166 + !GC_MARK(b)) {
2167 + __bch_invalidate_one_bucket(ca, b);
2168 +- fifo_push(&ca->free[RESERVE_PRIO],
2169 +- b - ca->buckets);
2170 ++ if (!fifo_push(&ca->free[RESERVE_PRIO],
2171 ++ b - ca->buckets))
2172 ++ fifo_push(&ca->free[RESERVE_BTREE],
2173 ++ b - ca->buckets);
2174 + }
2175 + }
2176 + }
2177 +diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
2178 +index e73aeb0e892c..e497bde96db3 100644
2179 +--- a/drivers/md/bcache/request.c
2180 ++++ b/drivers/md/bcache/request.c
2181 +@@ -633,11 +633,11 @@ static void do_bio_hook(struct search *s, struct bio *orig_bio)
2182 + static void search_free(struct closure *cl)
2183 + {
2184 + struct search *s = container_of(cl, struct search, cl);
2185 +- bio_complete(s);
2186 +
2187 + if (s->iop.bio)
2188 + bio_put(s->iop.bio);
2189 +
2190 ++ bio_complete(s);
2191 + closure_debug_destroy(cl);
2192 + mempool_free(s, s->d->c->search);
2193 + }
2194 +diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
2195 +index f636af441da6..ef28ddfff7c6 100644
2196 +--- a/drivers/md/bcache/super.c
2197 ++++ b/drivers/md/bcache/super.c
2198 +@@ -936,7 +936,8 @@ void bch_cached_dev_detach(struct cached_dev *dc)
2199 + cached_dev_put(dc);
2200 + }
2201 +
2202 +-int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
2203 ++int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
2204 ++ uint8_t *set_uuid)
2205 + {
2206 + uint32_t rtime = cpu_to_le32(get_seconds());
2207 + struct uuid_entry *u;
2208 +@@ -945,7 +946,8 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
2209 +
2210 + bdevname(dc->bdev, buf);
2211 +
2212 +- if (memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16))
2213 ++ if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) ||
2214 ++ (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)))
2215 + return -ENOENT;
2216 +
2217 + if (dc->disk.c) {
2218 +@@ -1189,7 +1191,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
2219 +
2220 + list_add(&dc->list, &uncached_devices);
2221 + list_for_each_entry(c, &bch_cache_sets, list)
2222 +- bch_cached_dev_attach(dc, c);
2223 ++ bch_cached_dev_attach(dc, c, NULL);
2224 +
2225 + if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE ||
2226 + BDEV_STATE(&dc->sb) == BDEV_STATE_STALE)
2227 +@@ -1711,7 +1713,7 @@ static void run_cache_set(struct cache_set *c)
2228 + bcache_write_super(c);
2229 +
2230 + list_for_each_entry_safe(dc, t, &uncached_devices, list)
2231 +- bch_cached_dev_attach(dc, c);
2232 ++ bch_cached_dev_attach(dc, c, NULL);
2233 +
2234 + flash_devs_run(c);
2235 +
2236 +@@ -1828,6 +1830,7 @@ void bch_cache_release(struct kobject *kobj)
2237 + static int cache_alloc(struct cache_sb *sb, struct cache *ca)
2238 + {
2239 + size_t free;
2240 ++ size_t btree_buckets;
2241 + struct bucket *b;
2242 +
2243 + __module_get(THIS_MODULE);
2244 +@@ -1837,9 +1840,19 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
2245 + ca->journal.bio.bi_max_vecs = 8;
2246 + ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
2247 +
2248 ++ /*
2249 ++ * when ca->sb.njournal_buckets is not zero, journal exists,
2250 ++ * and in bch_journal_replay(), tree node may split,
2251 ++ * so bucket of RESERVE_BTREE type is needed,
2252 ++ * the worst situation is all journal buckets are valid journal,
2253 ++ * and all the keys need to replay,
2254 ++ * so the number of RESERVE_BTREE type buckets should be as much
2255 ++ * as journal buckets
2256 ++ */
2257 ++ btree_buckets = ca->sb.njournal_buckets ?: 8;
2258 + free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
2259 +
2260 +- if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) ||
2261 ++ if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets, GFP_KERNEL) ||
2262 + !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
2263 + !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
2264 + !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
2265 +diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
2266 +index 4fbb5532f24c..5a5c1f1bd8a5 100644
2267 +--- a/drivers/md/bcache/sysfs.c
2268 ++++ b/drivers/md/bcache/sysfs.c
2269 +@@ -191,7 +191,7 @@ STORE(__cached_dev)
2270 + {
2271 + struct cached_dev *dc = container_of(kobj, struct cached_dev,
2272 + disk.kobj);
2273 +- ssize_t v = size;
2274 ++ ssize_t v;
2275 + struct cache_set *c;
2276 + struct kobj_uevent_env *env;
2277 +
2278 +@@ -263,17 +263,20 @@ STORE(__cached_dev)
2279 + }
2280 +
2281 + if (attr == &sysfs_attach) {
2282 +- if (bch_parse_uuid(buf, dc->sb.set_uuid) < 16)
2283 ++ uint8_t set_uuid[16];
2284 ++
2285 ++ if (bch_parse_uuid(buf, set_uuid) < 16)
2286 + return -EINVAL;
2287 +
2288 ++ v = -ENOENT;
2289 + list_for_each_entry(c, &bch_cache_sets, list) {
2290 +- v = bch_cached_dev_attach(dc, c);
2291 ++ v = bch_cached_dev_attach(dc, c, set_uuid);
2292 + if (!v)
2293 + return size;
2294 + }
2295 +
2296 + pr_err("Can't attach %s: cache set not found", buf);
2297 +- size = v;
2298 ++ return v;
2299 + }
2300 +
2301 + if (attr == &sysfs_detach && dc->disk.c)
2302 +diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
2303 +index bbb1dc9e1639..f2c0000de613 100644
2304 +--- a/drivers/md/bcache/writeback.c
2305 ++++ b/drivers/md/bcache/writeback.c
2306 +@@ -425,19 +425,28 @@ static int bch_writeback_thread(void *arg)
2307 +
2308 + while (!kthread_should_stop()) {
2309 + down_write(&dc->writeback_lock);
2310 +- if (!atomic_read(&dc->has_dirty) ||
2311 +- (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
2312 +- !dc->writeback_running)) {
2313 ++ set_current_state(TASK_INTERRUPTIBLE);
2314 ++ /*
2315 ++ * If the bache device is detaching, skip here and continue
2316 ++ * to perform writeback. Otherwise, if no dirty data on cache,
2317 ++ * or there is dirty data on cache but writeback is disabled,
2318 ++ * the writeback thread should sleep here and wait for others
2319 ++ * to wake up it.
2320 ++ */
2321 ++ if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
2322 ++ (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) {
2323 + up_write(&dc->writeback_lock);
2324 +- set_current_state(TASK_INTERRUPTIBLE);
2325 +
2326 +- if (kthread_should_stop())
2327 ++ if (kthread_should_stop()) {
2328 ++ set_current_state(TASK_RUNNING);
2329 + return 0;
2330 ++ }
2331 +
2332 + try_to_freeze();
2333 + schedule();
2334 + continue;
2335 + }
2336 ++ set_current_state(TASK_RUNNING);
2337 +
2338 + searched_full_index = refill_dirty(dc);
2339 +
2340 +@@ -447,6 +456,14 @@ static int bch_writeback_thread(void *arg)
2341 + cached_dev_put(dc);
2342 + SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
2343 + bch_write_bdev_super(dc, NULL);
2344 ++ /*
2345 ++ * If bcache device is detaching via sysfs interface,
2346 ++ * writeback thread should stop after there is no dirty
2347 ++ * data on cache. BCACHE_DEV_DETACHING flag is set in
2348 ++ * bch_cached_dev_detach().
2349 ++ */
2350 ++ if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
2351 ++ break;
2352 + }
2353 +
2354 + up_write(&dc->writeback_lock);
2355 +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
2356 +index f24a9e14021d..89dcbf2fa846 100644
2357 +--- a/drivers/md/raid1.c
2358 ++++ b/drivers/md/raid1.c
2359 +@@ -1686,6 +1686,17 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
2360 + struct md_rdev *repl =
2361 + conf->mirrors[conf->raid_disks + number].rdev;
2362 + freeze_array(conf, 0);
2363 ++ if (atomic_read(&repl->nr_pending)) {
2364 ++ /* It means that some queued IO of retry_list
2365 ++ * hold repl. Thus, we cannot set replacement
2366 ++ * as NULL, avoiding rdev NULL pointer
2367 ++ * dereference in sync_request_write and
2368 ++ * handle_write_finished.
2369 ++ */
2370 ++ err = -EBUSY;
2371 ++ unfreeze_array(conf);
2372 ++ goto abort;
2373 ++ }
2374 + clear_bit(Replacement, &repl->flags);
2375 + p->rdev = repl;
2376 + conf->mirrors[conf->raid_disks + number].rdev = NULL;
2377 +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
2378 +index bf0410403a6f..7b6acedc89c1 100644
2379 +--- a/drivers/md/raid10.c
2380 ++++ b/drivers/md/raid10.c
2381 +@@ -2630,7 +2630,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2382 + for (m = 0; m < conf->copies; m++) {
2383 + int dev = r10_bio->devs[m].devnum;
2384 + rdev = conf->mirrors[dev].rdev;
2385 +- if (r10_bio->devs[m].bio == NULL)
2386 ++ if (r10_bio->devs[m].bio == NULL ||
2387 ++ r10_bio->devs[m].bio->bi_end_io == NULL)
2388 + continue;
2389 + if (!r10_bio->devs[m].bio->bi_error) {
2390 + rdev_clear_badblocks(
2391 +@@ -2645,7 +2646,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2392 + md_error(conf->mddev, rdev);
2393 + }
2394 + rdev = conf->mirrors[dev].replacement;
2395 +- if (r10_bio->devs[m].repl_bio == NULL)
2396 ++ if (r10_bio->devs[m].repl_bio == NULL ||
2397 ++ r10_bio->devs[m].repl_bio->bi_end_io == NULL)
2398 + continue;
2399 +
2400 + if (!r10_bio->devs[m].repl_bio->bi_error) {
2401 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
2402 +index e2130fb4597d..d59b861764a1 100644
2403 +--- a/drivers/md/raid5.c
2404 ++++ b/drivers/md/raid5.c
2405 +@@ -2028,15 +2028,16 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
2406 + static int grow_stripes(struct r5conf *conf, int num)
2407 + {
2408 + struct kmem_cache *sc;
2409 ++ size_t namelen = sizeof(conf->cache_name[0]);
2410 + int devs = max(conf->raid_disks, conf->previous_raid_disks);
2411 +
2412 + if (conf->mddev->gendisk)
2413 +- sprintf(conf->cache_name[0],
2414 ++ snprintf(conf->cache_name[0], namelen,
2415 + "raid%d-%s", conf->level, mdname(conf->mddev));
2416 + else
2417 +- sprintf(conf->cache_name[0],
2418 ++ snprintf(conf->cache_name[0], namelen,
2419 + "raid%d-%p", conf->level, conf->mddev);
2420 +- sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
2421 ++ snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]);
2422 +
2423 + conf->active_name = 0;
2424 + sc = kmem_cache_create(conf->cache_name[conf->active_name],
2425 +diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
2426 +index ea9abde902e9..209db65ab610 100644
2427 +--- a/drivers/media/dvb-core/dmxdev.c
2428 ++++ b/drivers/media/dvb-core/dmxdev.c
2429 +@@ -1071,7 +1071,7 @@ static int dvb_demux_do_ioctl(struct file *file,
2430 + break;
2431 +
2432 + default:
2433 +- ret = -EINVAL;
2434 ++ ret = -ENOTTY;
2435 + break;
2436 + }
2437 + mutex_unlock(&dmxdev->mutex);
2438 +diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
2439 +index f384f295676e..679d122af63c 100644
2440 +--- a/drivers/media/pci/cx23885/cx23885-cards.c
2441 ++++ b/drivers/media/pci/cx23885/cx23885-cards.c
2442 +@@ -2124,6 +2124,10 @@ void cx23885_card_setup(struct cx23885_dev *dev)
2443 + &dev->i2c_bus[2].i2c_adap,
2444 + "cx25840", 0x88 >> 1, NULL);
2445 + if (dev->sd_cx25840) {
2446 ++ /* set host data for clk_freq configuration */
2447 ++ v4l2_set_subdev_hostdata(dev->sd_cx25840,
2448 ++ &dev->clk_freq);
2449 ++
2450 + dev->sd_cx25840->grp_id = CX23885_HW_AV_CORE;
2451 + v4l2_subdev_call(dev->sd_cx25840, core, load_fw);
2452 + }
2453 +diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
2454 +index e8f847226a19..6eb3be13b430 100644
2455 +--- a/drivers/media/pci/cx23885/cx23885-core.c
2456 ++++ b/drivers/media/pci/cx23885/cx23885-core.c
2457 +@@ -872,6 +872,16 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
2458 + if (cx23885_boards[dev->board].clk_freq > 0)
2459 + dev->clk_freq = cx23885_boards[dev->board].clk_freq;
2460 +
2461 ++ if (dev->board == CX23885_BOARD_HAUPPAUGE_IMPACTVCBE &&
2462 ++ dev->pci->subsystem_device == 0x7137) {
2463 ++ /* Hauppauge ImpactVCBe device ID 0x7137 is populated
2464 ++ * with an 888, and a 25Mhz crystal, instead of the
2465 ++ * usual third overtone 50Mhz. The default clock rate must
2466 ++ * be overridden so the cx25840 is properly configured
2467 ++ */
2468 ++ dev->clk_freq = 25000000;
2469 ++ }
2470 ++
2471 + dev->pci_bus = dev->pci->bus->number;
2472 + dev->pci_slot = PCI_SLOT(dev->pci->devfn);
2473 + cx23885_irq_add(dev, 0x001f00);
2474 +diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
2475 +index 0042803a9de7..54398d8a4696 100644
2476 +--- a/drivers/media/pci/cx25821/cx25821-core.c
2477 ++++ b/drivers/media/pci/cx25821/cx25821-core.c
2478 +@@ -871,6 +871,10 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
2479 + dev->nr = ++cx25821_devcount;
2480 + sprintf(dev->name, "cx25821[%d]", dev->nr);
2481 +
2482 ++ if (dev->nr >= ARRAY_SIZE(card)) {
2483 ++ CX25821_INFO("dev->nr >= %zd", ARRAY_SIZE(card));
2484 ++ return -ENODEV;
2485 ++ }
2486 + if (dev->pci->device != 0x8210) {
2487 + pr_info("%s(): Exiting. Incorrect Hardware device = 0x%02x\n",
2488 + __func__, dev->pci->device);
2489 +@@ -886,9 +890,6 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
2490 + dev->channels[i].sram_channels = &cx25821_sram_channels[i];
2491 + }
2492 +
2493 +- if (dev->nr > 1)
2494 +- CX25821_INFO("dev->nr > 1!");
2495 +-
2496 + /* board config */
2497 + dev->board = 1; /* card[dev->nr]; */
2498 + dev->_max_num_decoders = MAX_DECODERS;
2499 +diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
2500 +index 537b858cb94a..fa6af4a7dae1 100644
2501 +--- a/drivers/media/platform/s3c-camif/camif-capture.c
2502 ++++ b/drivers/media/platform/s3c-camif/camif-capture.c
2503 +@@ -1268,16 +1268,17 @@ static void __camif_subdev_try_format(struct camif_dev *camif,
2504 + {
2505 + const struct s3c_camif_variant *variant = camif->variant;
2506 + const struct vp_pix_limits *pix_lim;
2507 +- int i = ARRAY_SIZE(camif_mbus_formats);
2508 ++ unsigned int i;
2509 +
2510 + /* FIXME: constraints against codec or preview path ? */
2511 + pix_lim = &variant->vp_pix_limits[VP_CODEC];
2512 +
2513 +- while (i-- >= 0)
2514 ++ for (i = 0; i < ARRAY_SIZE(camif_mbus_formats); i++)
2515 + if (camif_mbus_formats[i] == mf->code)
2516 + break;
2517 +
2518 +- mf->code = camif_mbus_formats[i];
2519 ++ if (i == ARRAY_SIZE(camif_mbus_formats))
2520 ++ mf->code = camif_mbus_formats[0];
2521 +
2522 + if (pad == CAMIF_SD_PAD_SINK) {
2523 + v4l_bound_align_image(&mf->width, 8, CAMIF_MAX_PIX_WIDTH,
2524 +diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
2525 +index 76bf8ba372b3..5b53e31ce262 100644
2526 +--- a/drivers/media/usb/em28xx/em28xx.h
2527 ++++ b/drivers/media/usb/em28xx/em28xx.h
2528 +@@ -187,7 +187,7 @@
2529 + USB 2.0 spec says bulk packet size is always 512 bytes
2530 + */
2531 + #define EM28XX_BULK_PACKET_MULTIPLIER 384
2532 +-#define EM28XX_DVB_BULK_PACKET_MULTIPLIER 384
2533 ++#define EM28XX_DVB_BULK_PACKET_MULTIPLIER 94
2534 +
2535 + #define EM28XX_INTERLACED_DEFAULT 1
2536 +
2537 +diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
2538 +index 02b5f69e1a42..14cf6dfc3b14 100644
2539 +--- a/drivers/message/fusion/mptctl.c
2540 ++++ b/drivers/message/fusion/mptctl.c
2541 +@@ -2698,6 +2698,8 @@ mptctl_hp_targetinfo(unsigned long arg)
2542 + __FILE__, __LINE__, iocnum);
2543 + return -ENODEV;
2544 + }
2545 ++ if (karg.hdr.id >= MPT_MAX_FC_DEVICES)
2546 ++ return -EINVAL;
2547 + dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n",
2548 + ioc->name));
2549 +
2550 +diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
2551 +index f280744578e4..ffd448149796 100644
2552 +--- a/drivers/mmc/host/sdhci-iproc.c
2553 ++++ b/drivers/mmc/host/sdhci-iproc.c
2554 +@@ -32,6 +32,8 @@ struct sdhci_iproc_host {
2555 + const struct sdhci_iproc_data *data;
2556 + u32 shadow_cmd;
2557 + u32 shadow_blk;
2558 ++ bool is_cmd_shadowed;
2559 ++ bool is_blk_shadowed;
2560 + };
2561 +
2562 + #define REG_OFFSET_IN_BITS(reg) ((reg) << 3 & 0x18)
2563 +@@ -47,8 +49,22 @@ static inline u32 sdhci_iproc_readl(struct sdhci_host *host, int reg)
2564 +
2565 + static u16 sdhci_iproc_readw(struct sdhci_host *host, int reg)
2566 + {
2567 +- u32 val = sdhci_iproc_readl(host, (reg & ~3));
2568 +- u16 word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff;
2569 ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2570 ++ struct sdhci_iproc_host *iproc_host = sdhci_pltfm_priv(pltfm_host);
2571 ++ u32 val;
2572 ++ u16 word;
2573 ++
2574 ++ if ((reg == SDHCI_TRANSFER_MODE) && iproc_host->is_cmd_shadowed) {
2575 ++ /* Get the saved transfer mode */
2576 ++ val = iproc_host->shadow_cmd;
2577 ++ } else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
2578 ++ iproc_host->is_blk_shadowed) {
2579 ++ /* Get the saved block info */
2580 ++ val = iproc_host->shadow_blk;
2581 ++ } else {
2582 ++ val = sdhci_iproc_readl(host, (reg & ~3));
2583 ++ }
2584 ++ word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff;
2585 + return word;
2586 + }
2587 +
2588 +@@ -104,13 +120,15 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg)
2589 +
2590 + if (reg == SDHCI_COMMAND) {
2591 + /* Write the block now as we are issuing a command */
2592 +- if (iproc_host->shadow_blk != 0) {
2593 ++ if (iproc_host->is_blk_shadowed) {
2594 + sdhci_iproc_writel(host, iproc_host->shadow_blk,
2595 + SDHCI_BLOCK_SIZE);
2596 +- iproc_host->shadow_blk = 0;
2597 ++ iproc_host->is_blk_shadowed = false;
2598 + }
2599 + oldval = iproc_host->shadow_cmd;
2600 +- } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) {
2601 ++ iproc_host->is_cmd_shadowed = false;
2602 ++ } else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
2603 ++ iproc_host->is_blk_shadowed) {
2604 + /* Block size and count are stored in shadow reg */
2605 + oldval = iproc_host->shadow_blk;
2606 + } else {
2607 +@@ -122,9 +140,11 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg)
2608 + if (reg == SDHCI_TRANSFER_MODE) {
2609 + /* Save the transfer mode until the command is issued */
2610 + iproc_host->shadow_cmd = newval;
2611 ++ iproc_host->is_cmd_shadowed = true;
2612 + } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) {
2613 + /* Save the block info until the command is issued */
2614 + iproc_host->shadow_blk = newval;
2615 ++ iproc_host->is_blk_shadowed = true;
2616 + } else {
2617 + /* Command or other regular 32-bit write */
2618 + sdhci_iproc_writel(host, newval, reg & ~3);
2619 +diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
2620 +index a5e4b4b93d1b..ec3766264408 100644
2621 +--- a/drivers/net/ethernet/broadcom/bgmac.c
2622 ++++ b/drivers/net/ethernet/broadcom/bgmac.c
2623 +@@ -531,7 +531,8 @@ static void bgmac_dma_tx_ring_free(struct bgmac *bgmac,
2624 + int i;
2625 +
2626 + for (i = 0; i < BGMAC_TX_RING_SLOTS; i++) {
2627 +- int len = dma_desc[i].ctl1 & BGMAC_DESC_CTL1_LEN;
2628 ++ u32 ctl1 = le32_to_cpu(dma_desc[i].ctl1);
2629 ++ unsigned int len = ctl1 & BGMAC_DESC_CTL1_LEN;
2630 +
2631 + slot = &ring->slots[i];
2632 + dev_kfree_skb(slot->skb);
2633 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2634 +index a38a9cb3d544..9904d768a20a 100644
2635 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2636 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2637 +@@ -2925,6 +2925,9 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
2638 + struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
2639 + struct hwrm_vnic_tpa_cfg_input req = {0};
2640 +
2641 ++ if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
2642 ++ return 0;
2643 ++
2644 + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
2645 +
2646 + if (tpa_flags) {
2647 +diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
2648 +index b36643ef0593..0e3b2ebf87f1 100644
2649 +--- a/drivers/net/ethernet/cisco/enic/enic_main.c
2650 ++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
2651 +@@ -1726,6 +1726,8 @@ static int enic_open(struct net_device *netdev)
2652 + }
2653 +
2654 + for (i = 0; i < enic->rq_count; i++) {
2655 ++ /* enable rq before updating rq desc */
2656 ++ vnic_rq_enable(&enic->rq[i]);
2657 + vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
2658 + /* Need at least one buffer on ring to get going */
2659 + if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
2660 +@@ -1737,8 +1739,6 @@ static int enic_open(struct net_device *netdev)
2661 +
2662 + for (i = 0; i < enic->wq_count; i++)
2663 + vnic_wq_enable(&enic->wq[i]);
2664 +- for (i = 0; i < enic->rq_count; i++)
2665 +- vnic_rq_enable(&enic->rq[i]);
2666 +
2667 + if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
2668 + enic_dev_add_station_addr(enic);
2669 +@@ -1765,8 +1765,12 @@ static int enic_open(struct net_device *netdev)
2670 + return 0;
2671 +
2672 + err_out_free_rq:
2673 +- for (i = 0; i < enic->rq_count; i++)
2674 ++ for (i = 0; i < enic->rq_count; i++) {
2675 ++ err = vnic_rq_disable(&enic->rq[i]);
2676 ++ if (err)
2677 ++ return err;
2678 + vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
2679 ++ }
2680 + enic_dev_notify_unset(enic);
2681 + err_out_free_intr:
2682 + enic_unset_affinity_hint(enic);
2683 +diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
2684 +index 901661149b44..2d61369f586f 100644
2685 +--- a/drivers/net/ethernet/freescale/gianfar.c
2686 ++++ b/drivers/net/ethernet/freescale/gianfar.c
2687 +@@ -3053,9 +3053,6 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
2688 + if (ndev->features & NETIF_F_RXCSUM)
2689 + gfar_rx_checksum(skb, fcb);
2690 +
2691 +- /* Tell the skb what kind of packet this is */
2692 +- skb->protocol = eth_type_trans(skb, ndev);
2693 +-
2694 + /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
2695 + * Even if vlan rx accel is disabled, on some chips
2696 + * RXFCB_VLN is pseudo randomly set.
2697 +@@ -3126,13 +3123,15 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2698 + continue;
2699 + }
2700 +
2701 ++ gfar_process_frame(ndev, skb);
2702 ++
2703 + /* Increment the number of packets */
2704 + total_pkts++;
2705 + total_bytes += skb->len;
2706 +
2707 + skb_record_rx_queue(skb, rx_queue->qindex);
2708 +
2709 +- gfar_process_frame(ndev, skb);
2710 ++ skb->protocol = eth_type_trans(skb, ndev);
2711 +
2712 + /* Send the packet up the stack */
2713 + napi_gro_receive(&rx_queue->grp->napi_rx, skb);
2714 +diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
2715 +index 1908a38e7f31..485b9cc53f8b 100644
2716 +--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
2717 ++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
2718 +@@ -1574,7 +1574,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
2719 + * we have already determined whether we have link or not.
2720 + */
2721 + if (!mac->autoneg)
2722 +- return -E1000_ERR_CONFIG;
2723 ++ return 1;
2724 +
2725 + /* Auto-Neg is enabled. Auto Speed Detection takes care
2726 + * of MAC speed/duplex configuration. So we only need to
2727 +diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
2728 +index 645ace74429e..fe133f33a6c6 100644
2729 +--- a/drivers/net/ethernet/intel/e1000e/mac.c
2730 ++++ b/drivers/net/ethernet/intel/e1000e/mac.c
2731 +@@ -450,7 +450,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
2732 + * we have already determined whether we have link or not.
2733 + */
2734 + if (!mac->autoneg)
2735 +- return -E1000_ERR_CONFIG;
2736 ++ return 1;
2737 +
2738 + /* Auto-Neg is enabled. Auto Speed Detection takes care
2739 + * of MAC speed/duplex configuration. So we only need to
2740 +diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
2741 +index 20d8806d2bff..6369d88b81c1 100644
2742 +--- a/drivers/net/ethernet/intel/e1000e/netdev.c
2743 ++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
2744 +@@ -2330,8 +2330,8 @@ static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
2745 + {
2746 + struct pci_dev *pdev = adapter->pdev;
2747 +
2748 +- ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
2749 +- GFP_KERNEL);
2750 ++ ring->desc = dma_zalloc_coherent(&pdev->dev, ring->size, &ring->dma,
2751 ++ GFP_KERNEL);
2752 + if (!ring->desc)
2753 + return -ENOMEM;
2754 +
2755 +diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
2756 +index 7430dd44019e..ea693bbf56d8 100644
2757 +--- a/drivers/net/ethernet/marvell/mvneta.c
2758 ++++ b/drivers/net/ethernet/marvell/mvneta.c
2759 +@@ -818,6 +818,7 @@ static void mvneta_port_up(struct mvneta_port *pp)
2760 + }
2761 + mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
2762 +
2763 ++ q_map = 0;
2764 + /* Enable all initialized RXQs. */
2765 + mvreg_write(pp, MVNETA_RXQ_CMD, BIT(rxq_def));
2766 + }
2767 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
2768 +index 6c66d2979795..16bd585365a8 100644
2769 +--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
2770 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
2771 +@@ -1623,7 +1623,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
2772 +
2773 + cmd->checksum_disabled = 1;
2774 + cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
2775 +- cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
2776 ++ cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1;
2777 +
2778 + cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
2779 + if (cmd->cmdif_rev > CMD_IF_REV) {
2780 +diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
2781 +index cc106d892e29..b15e322b8bfe 100644
2782 +--- a/drivers/net/ethernet/sun/sunvnet.c
2783 ++++ b/drivers/net/ethernet/sun/sunvnet.c
2784 +@@ -1787,7 +1787,7 @@ static struct vnet *vnet_new(const u64 *local_mac,
2785 + dev->ethtool_ops = &vnet_ethtool_ops;
2786 + dev->watchdog_timeo = VNET_TX_TIMEOUT;
2787 +
2788 +- dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE |
2789 ++ dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_ALL_TSO |
2790 + NETIF_F_HW_CSUM | NETIF_F_SG;
2791 + dev->features = dev->hw_features;
2792 +
2793 +diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
2794 +index e83acc608678..dc934347ae28 100644
2795 +--- a/drivers/net/phy/dp83640.c
2796 ++++ b/drivers/net/phy/dp83640.c
2797 +@@ -1203,6 +1203,23 @@ static void dp83640_remove(struct phy_device *phydev)
2798 + kfree(dp83640);
2799 + }
2800 +
2801 ++static int dp83640_soft_reset(struct phy_device *phydev)
2802 ++{
2803 ++ int ret;
2804 ++
2805 ++ ret = genphy_soft_reset(phydev);
2806 ++ if (ret < 0)
2807 ++ return ret;
2808 ++
2809 ++ /* From DP83640 datasheet: "Software driver code must wait 3 us
2810 ++ * following a software reset before allowing further serial MII
2811 ++ * operations with the DP83640."
2812 ++ */
2813 ++ udelay(10); /* Taking udelay inaccuracy into account */
2814 ++
2815 ++ return 0;
2816 ++}
2817 ++
2818 + static int dp83640_config_init(struct phy_device *phydev)
2819 + {
2820 + struct dp83640_private *dp83640 = phydev->priv;
2821 +@@ -1496,6 +1513,7 @@ static struct phy_driver dp83640_driver = {
2822 + .flags = PHY_HAS_INTERRUPT,
2823 + .probe = dp83640_probe,
2824 + .remove = dp83640_remove,
2825 ++ .soft_reset = dp83640_soft_reset,
2826 + .config_init = dp83640_config_init,
2827 + .config_aneg = genphy_config_aneg,
2828 + .read_status = genphy_read_status,
2829 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
2830 +index 8aaa09b3c753..d72205f06a1d 100644
2831 +--- a/drivers/net/usb/qmi_wwan.c
2832 ++++ b/drivers/net/usb/qmi_wwan.c
2833 +@@ -637,6 +637,9 @@ static const struct usb_device_id products[] = {
2834 + {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
2835 + {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
2836 + {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
2837 ++ {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */
2838 ++ {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */
2839 ++ {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */
2840 + {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
2841 + {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */
2842 + {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */
2843 +@@ -713,6 +716,7 @@ static const struct usb_device_id products[] = {
2844 + {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
2845 + {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
2846 + {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
2847 ++ {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
2848 + {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
2849 + {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
2850 + {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
2851 +@@ -762,6 +766,7 @@ static const struct usb_device_id products[] = {
2852 + {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
2853 + {QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
2854 + {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
2855 ++ {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
2856 + {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
2857 + {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */
2858 +
2859 +diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
2860 +index b2c1a435357f..2991d7155540 100644
2861 +--- a/drivers/net/usb/r8152.c
2862 ++++ b/drivers/net/usb/r8152.c
2863 +@@ -1610,7 +1610,7 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
2864 +
2865 + tx_data += len;
2866 + agg->skb_len += len;
2867 +- agg->skb_num++;
2868 ++ agg->skb_num += skb_shinfo(skb)->gso_segs ?: 1;
2869 +
2870 + dev_kfree_skb_any(skb);
2871 +
2872 +diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
2873 +index c5f375befd2f..7337e6c0e126 100644
2874 +--- a/drivers/net/usb/smsc75xx.c
2875 ++++ b/drivers/net/usb/smsc75xx.c
2876 +@@ -945,10 +945,11 @@ static int smsc75xx_set_features(struct net_device *netdev,
2877 + /* it's racing here! */
2878 +
2879 + ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2880 +- if (ret < 0)
2881 ++ if (ret < 0) {
2882 + netdev_warn(dev->net, "Error writing RFE_CTL\n");
2883 +-
2884 +- return ret;
2885 ++ return ret;
2886 ++ }
2887 ++ return 0;
2888 + }
2889 +
2890 + static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm)
2891 +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
2892 +index d01285250204..2759d386ade7 100644
2893 +--- a/drivers/net/virtio_net.c
2894 ++++ b/drivers/net/virtio_net.c
2895 +@@ -1912,8 +1912,8 @@ static int virtnet_probe(struct virtio_device *vdev)
2896 +
2897 + /* Assume link up if device can't report link status,
2898 + otherwise get link status from config. */
2899 ++ netif_carrier_off(dev);
2900 + if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
2901 +- netif_carrier_off(dev);
2902 + schedule_work(&vi->config_work);
2903 + } else {
2904 + vi->status = VIRTIO_NET_S_LINK_UP;
2905 +diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
2906 +index 0c8efdff4843..916b9b12edd2 100644
2907 +--- a/drivers/net/wireless/ath/ath10k/mac.c
2908 ++++ b/drivers/net/wireless/ath/ath10k/mac.c
2909 +@@ -6311,10 +6311,20 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
2910 + {
2911 + struct ath10k *ar = hw->priv;
2912 + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
2913 ++ struct ath10k_vif *arvif = (void *)vif->drv_priv;
2914 ++ struct ath10k_peer *peer;
2915 + u32 bw, smps;
2916 +
2917 + spin_lock_bh(&ar->data_lock);
2918 +
2919 ++ peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
2920 ++ if (!peer) {
2921 ++ spin_unlock_bh(&ar->data_lock);
2922 ++ ath10k_warn(ar, "mac sta rc update failed to find peer %pM on vdev %i\n",
2923 ++ sta->addr, arvif->vdev_id);
2924 ++ return;
2925 ++ }
2926 ++
2927 + ath10k_dbg(ar, ATH10K_DBG_MAC,
2928 + "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
2929 + sta->addr, changed, sta->bandwidth, sta->rx_nss,
2930 +diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
2931 +index 8a9164da6c50..e8b770a95f7a 100644
2932 +--- a/drivers/net/wireless/mac80211_hwsim.c
2933 ++++ b/drivers/net/wireless/mac80211_hwsim.c
2934 +@@ -2925,8 +2925,10 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
2935 + if (info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]) {
2936 + u32 idx = nla_get_u32(info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]);
2937 +
2938 +- if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom))
2939 ++ if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom)) {
2940 ++ kfree(hwname);
2941 + return -EINVAL;
2942 ++ }
2943 + param.regd = hwsim_world_regdom_custom[idx];
2944 + }
2945 +
2946 +diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
2947 +index fee4c01fbdfd..a0de2453fa09 100644
2948 +--- a/drivers/net/xen-netfront.c
2949 ++++ b/drivers/net/xen-netfront.c
2950 +@@ -342,6 +342,9 @@ static int xennet_open(struct net_device *dev)
2951 + unsigned int i = 0;
2952 + struct netfront_queue *queue = NULL;
2953 +
2954 ++ if (!np->queues)
2955 ++ return -ENODEV;
2956 ++
2957 + for (i = 0; i < num_queues; ++i) {
2958 + queue = &np->queues[i];
2959 + napi_enable(&queue->napi);
2960 +@@ -1363,18 +1366,8 @@ static int netfront_probe(struct xenbus_device *dev,
2961 + #ifdef CONFIG_SYSFS
2962 + info->netdev->sysfs_groups[0] = &xennet_dev_group;
2963 + #endif
2964 +- err = register_netdev(info->netdev);
2965 +- if (err) {
2966 +- pr_warn("%s: register_netdev err=%d\n", __func__, err);
2967 +- goto fail;
2968 +- }
2969 +
2970 + return 0;
2971 +-
2972 +- fail:
2973 +- xennet_free_netdev(netdev);
2974 +- dev_set_drvdata(&dev->dev, NULL);
2975 +- return err;
2976 + }
2977 +
2978 + static void xennet_end_access(int ref, void *page)
2979 +@@ -1743,8 +1736,6 @@ static void xennet_destroy_queues(struct netfront_info *info)
2980 + {
2981 + unsigned int i;
2982 +
2983 +- rtnl_lock();
2984 +-
2985 + for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
2986 + struct netfront_queue *queue = &info->queues[i];
2987 +
2988 +@@ -1753,8 +1744,6 @@ static void xennet_destroy_queues(struct netfront_info *info)
2989 + netif_napi_del(&queue->napi);
2990 + }
2991 +
2992 +- rtnl_unlock();
2993 +-
2994 + kfree(info->queues);
2995 + info->queues = NULL;
2996 + }
2997 +@@ -1770,8 +1759,6 @@ static int xennet_create_queues(struct netfront_info *info,
2998 + if (!info->queues)
2999 + return -ENOMEM;
3000 +
3001 +- rtnl_lock();
3002 +-
3003 + for (i = 0; i < *num_queues; i++) {
3004 + struct netfront_queue *queue = &info->queues[i];
3005 +
3006 +@@ -1780,7 +1767,7 @@ static int xennet_create_queues(struct netfront_info *info,
3007 +
3008 + ret = xennet_init_queue(queue);
3009 + if (ret < 0) {
3010 +- dev_warn(&info->netdev->dev,
3011 ++ dev_warn(&info->xbdev->dev,
3012 + "only created %d queues\n", i);
3013 + *num_queues = i;
3014 + break;
3015 +@@ -1794,10 +1781,8 @@ static int xennet_create_queues(struct netfront_info *info,
3016 +
3017 + netif_set_real_num_tx_queues(info->netdev, *num_queues);
3018 +
3019 +- rtnl_unlock();
3020 +-
3021 + if (*num_queues == 0) {
3022 +- dev_err(&info->netdev->dev, "no queues\n");
3023 ++ dev_err(&info->xbdev->dev, "no queues\n");
3024 + return -EINVAL;
3025 + }
3026 + return 0;
3027 +@@ -1839,6 +1824,7 @@ static int talk_to_netback(struct xenbus_device *dev,
3028 + goto out;
3029 + }
3030 +
3031 ++ rtnl_lock();
3032 + if (info->queues)
3033 + xennet_destroy_queues(info);
3034 +
3035 +@@ -1849,6 +1835,7 @@ static int talk_to_netback(struct xenbus_device *dev,
3036 + info->queues = NULL;
3037 + goto out;
3038 + }
3039 ++ rtnl_unlock();
3040 +
3041 + /* Create shared ring, alloc event channel -- for each queue */
3042 + for (i = 0; i < num_queues; ++i) {
3043 +@@ -1945,8 +1932,10 @@ abort_transaction_no_dev_fatal:
3044 + xenbus_transaction_end(xbt, 1);
3045 + destroy_ring:
3046 + xennet_disconnect_backend(info);
3047 ++ rtnl_lock();
3048 + xennet_destroy_queues(info);
3049 + out:
3050 ++ rtnl_unlock();
3051 + device_unregister(&dev->dev);
3052 + return err;
3053 + }
3054 +@@ -1982,6 +1971,15 @@ static int xennet_connect(struct net_device *dev)
3055 + netdev_update_features(dev);
3056 + rtnl_unlock();
3057 +
3058 ++ if (dev->reg_state == NETREG_UNINITIALIZED) {
3059 ++ err = register_netdev(dev);
3060 ++ if (err) {
3061 ++ pr_warn("%s: register_netdev err=%d\n", __func__, err);
3062 ++ device_unregister(&np->xbdev->dev);
3063 ++ return err;
3064 ++ }
3065 ++ }
3066 ++
3067 + /*
3068 + * All public and private state should now be sane. Get
3069 + * ready to start sending and receiving packets and give the driver
3070 +@@ -2172,10 +2170,14 @@ static int xennet_remove(struct xenbus_device *dev)
3071 +
3072 + xennet_disconnect_backend(info);
3073 +
3074 +- unregister_netdev(info->netdev);
3075 ++ if (info->netdev->reg_state == NETREG_REGISTERED)
3076 ++ unregister_netdev(info->netdev);
3077 +
3078 +- if (info->queues)
3079 ++ if (info->queues) {
3080 ++ rtnl_lock();
3081 + xennet_destroy_queues(info);
3082 ++ rtnl_unlock();
3083 ++ }
3084 + xennet_free_netdev(info->netdev);
3085 +
3086 + return 0;
3087 +diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
3088 +index 3bbdf60f8908..49f3fba75f4d 100644
3089 +--- a/drivers/ntb/ntb_transport.c
3090 ++++ b/drivers/ntb/ntb_transport.c
3091 +@@ -955,6 +955,9 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
3092 + mw_base = nt->mw_vec[mw_num].phys_addr;
3093 + mw_size = nt->mw_vec[mw_num].phys_size;
3094 +
3095 ++ if (max_mw_size && mw_size > max_mw_size)
3096 ++ mw_size = max_mw_size;
3097 ++
3098 + tx_size = (unsigned int)mw_size / num_qps_mw;
3099 + qp_offset = tx_size * (qp_num / mw_count);
3100 +
3101 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
3102 +index 1c8aedf21370..e86fcc9e9852 100644
3103 +--- a/drivers/nvme/host/pci.c
3104 ++++ b/drivers/nvme/host/pci.c
3105 +@@ -1583,7 +1583,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
3106 + nvmeq->cq_vector = qid - 1;
3107 + result = adapter_alloc_cq(dev, qid, nvmeq);
3108 + if (result < 0)
3109 +- return result;
3110 ++ goto release_vector;
3111 +
3112 + result = adapter_alloc_sq(dev, qid, nvmeq);
3113 + if (result < 0)
3114 +@@ -1597,9 +1597,12 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
3115 + return result;
3116 +
3117 + release_sq:
3118 ++ dev->online_queues--;
3119 + adapter_delete_sq(dev, qid);
3120 + release_cq:
3121 + adapter_delete_cq(dev, qid);
3122 ++ release_vector:
3123 ++ nvmeq->cq_vector = -1;
3124 + return result;
3125 + }
3126 +
3127 +diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
3128 +index 312cb5b74dec..1d288fa4f4d6 100644
3129 +--- a/drivers/parisc/lba_pci.c
3130 ++++ b/drivers/parisc/lba_pci.c
3131 +@@ -1365,9 +1365,27 @@ lba_hw_init(struct lba_device *d)
3132 + WRITE_REG32(stat, d->hba.base_addr + LBA_ERROR_CONFIG);
3133 + }
3134 +
3135 +- /* Set HF mode as the default (vs. -1 mode). */
3136 ++
3137 ++ /*
3138 ++ * Hard Fail vs. Soft Fail on PCI "Master Abort".
3139 ++ *
3140 ++ * "Master Abort" means the MMIO transaction timed out - usually due to
3141 ++ * the device not responding to an MMIO read. We would like HF to be
3142 ++ * enabled to find driver problems, though it means the system will
3143 ++ * crash with a HPMC.
3144 ++ *
3145 ++ * In SoftFail mode "~0L" is returned as a result of a timeout on the
3146 ++ * pci bus. This is like how PCI busses on x86 and most other
3147 ++ * architectures behave. In order to increase compatibility with
3148 ++ * existing (x86) PCI hardware and existing Linux drivers we enable
3149 ++ * Soft Faul mode on PA-RISC now too.
3150 ++ */
3151 + stat = READ_REG32(d->hba.base_addr + LBA_STAT_CTL);
3152 ++#if defined(ENABLE_HARDFAIL)
3153 + WRITE_REG32(stat | HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL);
3154 ++#else
3155 ++ WRITE_REG32(stat & ~HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL);
3156 ++#endif
3157 +
3158 + /*
3159 + ** Writing a zero to STAT_CTL.rf (bit 0) will clear reset signal
3160 +diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
3161 +index 32bd8ab79d53..dd9ebdc968c8 100644
3162 +--- a/drivers/pci/pci-driver.c
3163 ++++ b/drivers/pci/pci-driver.c
3164 +@@ -1140,11 +1140,14 @@ static int pci_pm_runtime_suspend(struct device *dev)
3165 + int error;
3166 +
3167 + /*
3168 +- * If pci_dev->driver is not set (unbound), the device should
3169 +- * always remain in D0 regardless of the runtime PM status
3170 ++ * If pci_dev->driver is not set (unbound), we leave the device in D0,
3171 ++ * but it may go to D3cold when the bridge above it runtime suspends.
3172 ++ * Save its config space in case that happens.
3173 + */
3174 +- if (!pci_dev->driver)
3175 ++ if (!pci_dev->driver) {
3176 ++ pci_save_state(pci_dev);
3177 + return 0;
3178 ++ }
3179 +
3180 + if (!pm || !pm->runtime_suspend)
3181 + return -ENOSYS;
3182 +@@ -1195,16 +1198,18 @@ static int pci_pm_runtime_resume(struct device *dev)
3183 + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
3184 +
3185 + /*
3186 +- * If pci_dev->driver is not set (unbound), the device should
3187 +- * always remain in D0 regardless of the runtime PM status
3188 ++ * Restoring config space is necessary even if the device is not bound
3189 ++ * to a driver because although we left it in D0, it may have gone to
3190 ++ * D3cold when the bridge above it runtime suspended.
3191 + */
3192 ++ pci_restore_standard_config(pci_dev);
3193 ++
3194 + if (!pci_dev->driver)
3195 + return 0;
3196 +
3197 + if (!pm || !pm->runtime_resume)
3198 + return -ENOSYS;
3199 +
3200 +- pci_restore_standard_config(pci_dev);
3201 + pci_fixup_device(pci_fixup_resume_early, pci_dev);
3202 + __pci_enable_wake(pci_dev, PCI_D0, true, false);
3203 + pci_fixup_device(pci_fixup_resume, pci_dev);
3204 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
3205 +index 4eb1cf0ed00c..5697b32819cb 100644
3206 +--- a/drivers/pci/quirks.c
3207 ++++ b/drivers/pci/quirks.c
3208 +@@ -3614,6 +3614,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9120,
3209 + quirk_dma_func1_alias);
3210 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123,
3211 + quirk_dma_func1_alias);
3212 ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128,
3213 ++ quirk_dma_func1_alias);
3214 + /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
3215 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
3216 + quirk_dma_func1_alias);
3217 +@@ -3626,6 +3628,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x917a,
3218 + /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */
3219 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
3220 + quirk_dma_func1_alias);
3221 ++/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c127 */
3222 ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220,
3223 ++ quirk_dma_func1_alias);
3224 + /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c49 */
3225 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230,
3226 + quirk_dma_func1_alias);
3227 +diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
3228 +index 499e437c7e91..f9d77b4c44ef 100644
3229 +--- a/drivers/regulator/of_regulator.c
3230 ++++ b/drivers/regulator/of_regulator.c
3231 +@@ -274,6 +274,7 @@ int of_regulator_match(struct device *dev, struct device_node *node,
3232 + dev_err(dev,
3233 + "failed to parse DT for regulator %s\n",
3234 + child->name);
3235 ++ of_node_put(child);
3236 + return -EINVAL;
3237 + }
3238 + match->of_node = of_node_get(child);
3239 +diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c
3240 +index e1cfa06810ef..e79f2a181ad2 100644
3241 +--- a/drivers/rtc/hctosys.c
3242 ++++ b/drivers/rtc/hctosys.c
3243 +@@ -49,6 +49,11 @@ static int __init rtc_hctosys(void)
3244 +
3245 + tv64.tv_sec = rtc_tm_to_time64(&tm);
3246 +
3247 ++#if BITS_PER_LONG == 32
3248 ++ if (tv64.tv_sec > INT_MAX)
3249 ++ goto err_read;
3250 ++#endif
3251 ++
3252 + err = do_settimeofday64(&tv64);
3253 +
3254 + dev_info(rtc->dev.parent,
3255 +diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
3256 +index afab89f5be48..a161fbf6f172 100644
3257 +--- a/drivers/rtc/rtc-snvs.c
3258 ++++ b/drivers/rtc/rtc-snvs.c
3259 +@@ -132,20 +132,23 @@ static int snvs_rtc_set_time(struct device *dev, struct rtc_time *tm)
3260 + {
3261 + struct snvs_rtc_data *data = dev_get_drvdata(dev);
3262 + unsigned long time;
3263 ++ int ret;
3264 +
3265 + rtc_tm_to_time(tm, &time);
3266 +
3267 + /* Disable RTC first */
3268 +- snvs_rtc_enable(data, false);
3269 ++ ret = snvs_rtc_enable(data, false);
3270 ++ if (ret)
3271 ++ return ret;
3272 +
3273 + /* Write 32-bit time to 47-bit timer, leaving 15 LSBs blank */
3274 + regmap_write(data->regmap, data->offset + SNVS_LPSRTCLR, time << CNTR_TO_SECS_SH);
3275 + regmap_write(data->regmap, data->offset + SNVS_LPSRTCMR, time >> (32 - CNTR_TO_SECS_SH));
3276 +
3277 + /* Enable RTC again */
3278 +- snvs_rtc_enable(data, true);
3279 ++ ret = snvs_rtc_enable(data, true);
3280 +
3281 +- return 0;
3282 ++ return ret;
3283 + }
3284 +
3285 + static int snvs_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
3286 +@@ -287,7 +290,11 @@ static int snvs_rtc_probe(struct platform_device *pdev)
3287 + regmap_write(data->regmap, data->offset + SNVS_LPSR, 0xffffffff);
3288 +
3289 + /* Enable RTC */
3290 +- snvs_rtc_enable(data, true);
3291 ++ ret = snvs_rtc_enable(data, true);
3292 ++ if (ret) {
3293 ++ dev_err(&pdev->dev, "failed to enable rtc %d\n", ret);
3294 ++ goto error_rtc_device_register;
3295 ++ }
3296 +
3297 + device_init_wakeup(&pdev->dev, true);
3298 +
3299 +diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c
3300 +index 560d9a5e0225..a9528083061d 100644
3301 +--- a/drivers/rtc/rtc-tx4939.c
3302 ++++ b/drivers/rtc/rtc-tx4939.c
3303 +@@ -86,7 +86,8 @@ static int tx4939_rtc_read_time(struct device *dev, struct rtc_time *tm)
3304 + for (i = 2; i < 6; i++)
3305 + buf[i] = __raw_readl(&rtcreg->dat);
3306 + spin_unlock_irq(&pdata->lock);
3307 +- sec = (buf[5] << 24) | (buf[4] << 16) | (buf[3] << 8) | buf[2];
3308 ++ sec = ((unsigned long)buf[5] << 24) | (buf[4] << 16) |
3309 ++ (buf[3] << 8) | buf[2];
3310 + rtc_time_to_tm(sec, tm);
3311 + return rtc_valid_tm(tm);
3312 + }
3313 +@@ -147,7 +148,8 @@ static int tx4939_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
3314 + alrm->enabled = (ctl & TX4939_RTCCTL_ALME) ? 1 : 0;
3315 + alrm->pending = (ctl & TX4939_RTCCTL_ALMD) ? 1 : 0;
3316 + spin_unlock_irq(&pdata->lock);
3317 +- sec = (buf[5] << 24) | (buf[4] << 16) | (buf[3] << 8) | buf[2];
3318 ++ sec = ((unsigned long)buf[5] << 24) | (buf[4] << 16) |
3319 ++ (buf[3] << 8) | buf[2];
3320 + rtc_time_to_tm(sec, &alrm->time);
3321 + return rtc_valid_tm(&alrm->time);
3322 + }
3323 +diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
3324 +index 92e03b42e661..3fc73b5894f0 100644
3325 +--- a/drivers/s390/cio/device_fsm.c
3326 ++++ b/drivers/s390/cio/device_fsm.c
3327 +@@ -822,6 +822,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
3328 +
3329 + ccw_device_set_timeout(cdev, 0);
3330 + cdev->private->iretry = 255;
3331 ++ cdev->private->async_kill_io_rc = -ETIMEDOUT;
3332 + ret = ccw_device_cancel_halt_clear(cdev);
3333 + if (ret == -EBUSY) {
3334 + ccw_device_set_timeout(cdev, 3*HZ);
3335 +@@ -898,7 +899,7 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
3336 + /* OK, i/o is dead now. Call interrupt handler. */
3337 + if (cdev->handler)
3338 + cdev->handler(cdev, cdev->private->intparm,
3339 +- ERR_PTR(-EIO));
3340 ++ ERR_PTR(cdev->private->async_kill_io_rc));
3341 + }
3342 +
3343 + static void
3344 +@@ -915,14 +916,16 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
3345 + ccw_device_online_verify(cdev, 0);
3346 + if (cdev->handler)
3347 + cdev->handler(cdev, cdev->private->intparm,
3348 +- ERR_PTR(-EIO));
3349 ++ ERR_PTR(cdev->private->async_kill_io_rc));
3350 + }
3351 +
3352 + void ccw_device_kill_io(struct ccw_device *cdev)
3353 + {
3354 + int ret;
3355 +
3356 ++ ccw_device_set_timeout(cdev, 0);
3357 + cdev->private->iretry = 255;
3358 ++ cdev->private->async_kill_io_rc = -EIO;
3359 + ret = ccw_device_cancel_halt_clear(cdev);
3360 + if (ret == -EBUSY) {
3361 + ccw_device_set_timeout(cdev, 3*HZ);
3362 +diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
3363 +index b108f4a5c7dd..b142c7a389b7 100644
3364 +--- a/drivers/s390/cio/io_sch.h
3365 ++++ b/drivers/s390/cio/io_sch.h
3366 +@@ -155,6 +155,7 @@ struct ccw_device_private {
3367 + unsigned long intparm; /* user interruption parameter */
3368 + struct qdio_irq *qdio_data;
3369 + struct irb irb; /* device status */
3370 ++ int async_kill_io_rc;
3371 + struct senseid senseid; /* SenseID info */
3372 + struct pgid pgid[8]; /* path group IDs per chpid*/
3373 + struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
3374 +diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
3375 +index 766a9176b4ad..cf531ad8b6ee 100644
3376 +--- a/drivers/scsi/aacraid/commsup.c
3377 ++++ b/drivers/scsi/aacraid/commsup.c
3378 +@@ -1321,9 +1321,10 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
3379 + host = aac->scsi_host_ptr;
3380 + scsi_block_requests(host);
3381 + aac_adapter_disable_int(aac);
3382 +- if (aac->thread->pid != current->pid) {
3383 ++ if (aac->thread && aac->thread->pid != current->pid) {
3384 + spin_unlock_irq(host->host_lock);
3385 + kthread_stop(aac->thread);
3386 ++ aac->thread = NULL;
3387 + jafo = 1;
3388 + }
3389 +
3390 +@@ -1392,6 +1393,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
3391 + aac->name);
3392 + if (IS_ERR(aac->thread)) {
3393 + retval = PTR_ERR(aac->thread);
3394 ++ aac->thread = NULL;
3395 + goto out;
3396 + }
3397 + }
3398 +diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
3399 +index aa6eccb8940b..8da8b46da722 100644
3400 +--- a/drivers/scsi/aacraid/linit.c
3401 ++++ b/drivers/scsi/aacraid/linit.c
3402 +@@ -1085,6 +1085,7 @@ static void __aac_shutdown(struct aac_dev * aac)
3403 + up(&fib->event_wait);
3404 + }
3405 + kthread_stop(aac->thread);
3406 ++ aac->thread = NULL;
3407 + }
3408 + aac_send_shutdown(aac);
3409 + aac_adapter_disable_int(aac);
3410 +@@ -1189,8 +1190,10 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3411 + * Map in the registers from the adapter.
3412 + */
3413 + aac->base_size = AAC_MIN_FOOTPRINT_SIZE;
3414 +- if ((*aac_drivers[index].init)(aac))
3415 ++ if ((*aac_drivers[index].init)(aac)) {
3416 ++ error = -ENODEV;
3417 + goto out_unmap;
3418 ++ }
3419 +
3420 + if (aac->sync_mode) {
3421 + if (aac_sync_mode)
3422 +diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
3423 +index decdc71b6b86..f6d7c4712e66 100644
3424 +--- a/drivers/scsi/arm/fas216.c
3425 ++++ b/drivers/scsi/arm/fas216.c
3426 +@@ -2009,7 +2009,7 @@ static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt,
3427 + * have valid data in the sense buffer that could
3428 + * confuse the higher levels.
3429 + */
3430 +- memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer));
3431 ++ memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3432 + //printk("scsi%d.%c: sense buffer: ", info->host->host_no, '0' + SCpnt->device->id);
3433 + //{ int i; for (i = 0; i < 32; i++) printk("%02x ", SCpnt->sense_buffer[i]); printk("\n"); }
3434 + /*
3435 +diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
3436 +index 0002caf687dd..eb3b5c0f299f 100644
3437 +--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
3438 ++++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
3439 +@@ -1858,6 +1858,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
3440 + /* we will not receive ABTS response for this IO */
3441 + BNX2FC_IO_DBG(io_req, "Timer context finished processing "
3442 + "this scsi cmd\n");
3443 ++ return;
3444 + }
3445 +
3446 + /* Cancel the timeout_work, as we received IO completion */
3447 +diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
3448 +index 4639dac64e7f..f096766150bc 100644
3449 +--- a/drivers/scsi/lpfc/lpfc_attr.c
3450 ++++ b/drivers/scsi/lpfc/lpfc_attr.c
3451 +@@ -634,7 +634,12 @@ lpfc_issue_lip(struct Scsi_Host *shost)
3452 + LPFC_MBOXQ_t *pmboxq;
3453 + int mbxstatus = MBXERR_ERROR;
3454 +
3455 ++ /*
3456 ++ * If the link is offline, disabled or BLOCK_MGMT_IO
3457 ++ * it doesn't make any sense to allow issue_lip
3458 ++ */
3459 + if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3460 ++ (phba->hba_flag & LINK_DISABLED) ||
3461 + (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO))
3462 + return -EPERM;
3463 +
3464 +diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
3465 +index be901f6db6d3..4131addfb872 100644
3466 +--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
3467 ++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
3468 +@@ -691,8 +691,9 @@ lpfc_work_done(struct lpfc_hba *phba)
3469 + (phba->hba_flag & HBA_SP_QUEUE_EVT)) {
3470 + if (pring->flag & LPFC_STOP_IOCB_EVENT) {
3471 + pring->flag |= LPFC_DEFERRED_RING_EVENT;
3472 +- /* Set the lpfc data pending flag */
3473 +- set_bit(LPFC_DATA_READY, &phba->data_flags);
3474 ++ /* Preserve legacy behavior. */
3475 ++ if (!(phba->hba_flag & HBA_SP_QUEUE_EVT))
3476 ++ set_bit(LPFC_DATA_READY, &phba->data_flags);
3477 + } else {
3478 + if (phba->link_state >= LPFC_LINK_UP) {
3479 + pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
3480 +diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
3481 +index ef43847153ea..3406586b9201 100644
3482 +--- a/drivers/scsi/lpfc/lpfc_sli.c
3483 ++++ b/drivers/scsi/lpfc/lpfc_sli.c
3484 +@@ -115,6 +115,8 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
3485 + /* set consumption flag every once in a while */
3486 + if (!((q->host_index + 1) % q->entry_repost))
3487 + bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
3488 ++ else
3489 ++ bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
3490 + if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
3491 + bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
3492 + lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
3493 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
3494 +index b868ef3b2ca3..7d67a68bcc62 100644
3495 +--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
3496 ++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
3497 +@@ -8637,7 +8637,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3498 + snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
3499 + "fw_event_%s%d", ioc->driver_name, ioc->id);
3500 + ioc->firmware_event_thread = alloc_ordered_workqueue(
3501 +- ioc->firmware_event_name, WQ_MEM_RECLAIM);
3502 ++ ioc->firmware_event_name, 0);
3503 + if (!ioc->firmware_event_thread) {
3504 + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
3505 + ioc->name, __FILE__, __LINE__, __func__);
3506 +diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
3507 +index 1f6a3b86965f..440d79e6aea5 100644
3508 +--- a/drivers/scsi/qla2xxx/qla_isr.c
3509 ++++ b/drivers/scsi/qla2xxx/qla_isr.c
3510 +@@ -268,7 +268,8 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
3511 + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3512 +
3513 + /* Read all mbox registers? */
3514 +- mboxes = (1 << ha->mbx_count) - 1;
3515 ++ WARN_ON_ONCE(ha->mbx_count > 32);
3516 ++ mboxes = (1ULL << ha->mbx_count) - 1;
3517 + if (!ha->mcp)
3518 + ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
3519 + else
3520 +@@ -2495,7 +2496,8 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
3521 + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3522 +
3523 + /* Read all mbox registers? */
3524 +- mboxes = (1 << ha->mbx_count) - 1;
3525 ++ WARN_ON_ONCE(ha->mbx_count > 32);
3526 ++ mboxes = (1ULL << ha->mbx_count) - 1;
3527 + if (!ha->mcp)
3528 + ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
3529 + else
3530 +diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
3531 +index a7cfc270bd08..ce1d063f3e83 100644
3532 +--- a/drivers/scsi/qla4xxx/ql4_def.h
3533 ++++ b/drivers/scsi/qla4xxx/ql4_def.h
3534 +@@ -168,6 +168,8 @@
3535 + #define DEV_DB_NON_PERSISTENT 0
3536 + #define DEV_DB_PERSISTENT 1
3537 +
3538 ++#define QL4_ISP_REG_DISCONNECT 0xffffffffU
3539 ++
3540 + #define COPY_ISID(dst_isid, src_isid) { \
3541 + int i, j; \
3542 + for (i = 0, j = ISID_SIZE - 1; i < ISID_SIZE;) \
3543 +diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
3544 +index 01c3610a60cf..d8c03431d0aa 100644
3545 +--- a/drivers/scsi/qla4xxx/ql4_os.c
3546 ++++ b/drivers/scsi/qla4xxx/ql4_os.c
3547 +@@ -262,6 +262,24 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
3548 +
3549 + static struct scsi_transport_template *qla4xxx_scsi_transport;
3550 +
3551 ++static int qla4xxx_isp_check_reg(struct scsi_qla_host *ha)
3552 ++{
3553 ++ u32 reg_val = 0;
3554 ++ int rval = QLA_SUCCESS;
3555 ++
3556 ++ if (is_qla8022(ha))
3557 ++ reg_val = readl(&ha->qla4_82xx_reg->host_status);
3558 ++ else if (is_qla8032(ha) || is_qla8042(ha))
3559 ++ reg_val = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
3560 ++ else
3561 ++ reg_val = readw(&ha->reg->ctrl_status);
3562 ++
3563 ++ if (reg_val == QL4_ISP_REG_DISCONNECT)
3564 ++ rval = QLA_ERROR;
3565 ++
3566 ++ return rval;
3567 ++}
3568 ++
3569 + static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
3570 + uint32_t iface_type, uint32_t payload_size,
3571 + uint32_t pid, struct sockaddr *dst_addr)
3572 +@@ -9196,10 +9214,17 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
3573 + struct srb *srb = NULL;
3574 + int ret = SUCCESS;
3575 + int wait = 0;
3576 ++ int rval;
3577 +
3578 + ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n",
3579 + ha->host_no, id, lun, cmd, cmd->cmnd[0]);
3580 +
3581 ++ rval = qla4xxx_isp_check_reg(ha);
3582 ++ if (rval != QLA_SUCCESS) {
3583 ++ ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
3584 ++ return FAILED;
3585 ++ }
3586 ++
3587 + spin_lock_irqsave(&ha->hardware_lock, flags);
3588 + srb = (struct srb *) CMD_SP(cmd);
3589 + if (!srb) {
3590 +@@ -9251,6 +9276,7 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
3591 + struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
3592 + struct ddb_entry *ddb_entry = cmd->device->hostdata;
3593 + int ret = FAILED, stat;
3594 ++ int rval;
3595 +
3596 + if (!ddb_entry)
3597 + return ret;
3598 +@@ -9270,6 +9296,12 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
3599 + cmd, jiffies, cmd->request->timeout / HZ,
3600 + ha->dpc_flags, cmd->result, cmd->allowed));
3601 +
3602 ++ rval = qla4xxx_isp_check_reg(ha);
3603 ++ if (rval != QLA_SUCCESS) {
3604 ++ ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
3605 ++ return FAILED;
3606 ++ }
3607 ++
3608 + /* FIXME: wait for hba to go online */
3609 + stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
3610 + if (stat != QLA_SUCCESS) {
3611 +@@ -9313,6 +9345,7 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
3612 + struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
3613 + struct ddb_entry *ddb_entry = cmd->device->hostdata;
3614 + int stat, ret;
3615 ++ int rval;
3616 +
3617 + if (!ddb_entry)
3618 + return FAILED;
3619 +@@ -9330,6 +9363,12 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
3620 + ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
3621 + ha->dpc_flags, cmd->result, cmd->allowed));
3622 +
3623 ++ rval = qla4xxx_isp_check_reg(ha);
3624 ++ if (rval != QLA_SUCCESS) {
3625 ++ ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
3626 ++ return FAILED;
3627 ++ }
3628 ++
3629 + stat = qla4xxx_reset_target(ha, ddb_entry);
3630 + if (stat != QLA_SUCCESS) {
3631 + starget_printk(KERN_INFO, scsi_target(cmd->device),
3632 +@@ -9384,9 +9423,16 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
3633 + {
3634 + int return_status = FAILED;
3635 + struct scsi_qla_host *ha;
3636 ++ int rval;
3637 +
3638 + ha = to_qla_host(cmd->device->host);
3639 +
3640 ++ rval = qla4xxx_isp_check_reg(ha);
3641 ++ if (rval != QLA_SUCCESS) {
3642 ++ ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
3643 ++ return FAILED;
3644 ++ }
3645 ++
3646 + if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba)
3647 + qla4_83xx_set_idc_dontreset(ha);
3648 +
3649 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
3650 +index 8c9e4a3ec3fb..6fffb73766de 100644
3651 +--- a/drivers/scsi/sd.c
3652 ++++ b/drivers/scsi/sd.c
3653 +@@ -2395,6 +2395,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
3654 + int res;
3655 + struct scsi_device *sdp = sdkp->device;
3656 + struct scsi_mode_data data;
3657 ++ int disk_ro = get_disk_ro(sdkp->disk);
3658 + int old_wp = sdkp->write_prot;
3659 +
3660 + set_disk_ro(sdkp->disk, 0);
3661 +@@ -2435,7 +2436,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
3662 + "Test WP failed, assume Write Enabled\n");
3663 + } else {
3664 + sdkp->write_prot = ((data.device_specific & 0x80) != 0);
3665 +- set_disk_ro(sdkp->disk, sdkp->write_prot);
3666 ++ set_disk_ro(sdkp->disk, sdkp->write_prot || disk_ro);
3667 + if (sdkp->first_scan || old_wp != sdkp->write_prot) {
3668 + sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
3669 + sdkp->write_prot ? "on" : "off");
3670 +diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
3671 +index 804586aeaffe..de53c9694b68 100644
3672 +--- a/drivers/scsi/sr.c
3673 ++++ b/drivers/scsi/sr.c
3674 +@@ -522,6 +522,8 @@ static int sr_block_open(struct block_device *bdev, fmode_t mode)
3675 + struct scsi_cd *cd;
3676 + int ret = -ENXIO;
3677 +
3678 ++ check_disk_change(bdev);
3679 ++
3680 + mutex_lock(&sr_mutex);
3681 + cd = scsi_cd_get(bdev->bd_disk);
3682 + if (cd) {
3683 +@@ -582,18 +584,28 @@ out:
3684 + static unsigned int sr_block_check_events(struct gendisk *disk,
3685 + unsigned int clearing)
3686 + {
3687 +- struct scsi_cd *cd = scsi_cd(disk);
3688 ++ unsigned int ret = 0;
3689 ++ struct scsi_cd *cd;
3690 +
3691 +- if (atomic_read(&cd->device->disk_events_disable_depth))
3692 ++ cd = scsi_cd_get(disk);
3693 ++ if (!cd)
3694 + return 0;
3695 +
3696 +- return cdrom_check_events(&cd->cdi, clearing);
3697 ++ if (!atomic_read(&cd->device->disk_events_disable_depth))
3698 ++ ret = cdrom_check_events(&cd->cdi, clearing);
3699 ++
3700 ++ scsi_cd_put(cd);
3701 ++ return ret;
3702 + }
3703 +
3704 + static int sr_block_revalidate_disk(struct gendisk *disk)
3705 + {
3706 +- struct scsi_cd *cd = scsi_cd(disk);
3707 + struct scsi_sense_hdr sshdr;
3708 ++ struct scsi_cd *cd;
3709 ++
3710 ++ cd = scsi_cd_get(disk);
3711 ++ if (!cd)
3712 ++ return -ENXIO;
3713 +
3714 + /* if the unit is not ready, nothing more to do */
3715 + if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr))
3716 +@@ -602,6 +614,7 @@ static int sr_block_revalidate_disk(struct gendisk *disk)
3717 + sr_cd_check(&cd->cdi);
3718 + get_sectorsize(cd);
3719 + out:
3720 ++ scsi_cd_put(cd);
3721 + return 0;
3722 + }
3723 +
3724 +diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
3725 +index 351d81dc2200..44b7a69d022a 100644
3726 +--- a/drivers/scsi/storvsc_drv.c
3727 ++++ b/drivers/scsi/storvsc_drv.c
3728 +@@ -1538,7 +1538,7 @@ static struct scsi_host_template scsi_driver = {
3729 + .eh_timed_out = storvsc_eh_timed_out,
3730 + .slave_alloc = storvsc_device_alloc,
3731 + .slave_configure = storvsc_device_configure,
3732 +- .cmd_per_lun = 255,
3733 ++ .cmd_per_lun = 2048,
3734 + .this_id = -1,
3735 + .use_clustering = ENABLE_CLUSTERING,
3736 + /* Make sure we dont get a sg segment crosses a page boundary */
3737 +diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
3738 +index 6b349e301869..c6425e3df5a0 100644
3739 +--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
3740 ++++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
3741 +@@ -536,7 +536,7 @@ sym_getsync(struct sym_hcb *np, u_char dt, u_char sfac, u_char *divp, u_char *fa
3742 + * Look for the greatest clock divisor that allows an
3743 + * input speed faster than the period.
3744 + */
3745 +- while (div-- > 0)
3746 ++ while (--div > 0)
3747 + if (kpc >= (div_10M[div] << 2)) break;
3748 +
3749 + /*
3750 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
3751 +index 096c867069e9..18f26cf1e24d 100644
3752 +--- a/drivers/scsi/ufs/ufshcd.c
3753 ++++ b/drivers/scsi/ufs/ufshcd.c
3754 +@@ -2923,6 +2923,8 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)
3755 + /* REPORT SUPPORTED OPERATION CODES is not supported */
3756 + sdev->no_report_opcodes = 1;
3757 +
3758 ++ /* WRITE_SAME command is not supported */
3759 ++ sdev->no_write_same = 1;
3760 +
3761 + ufshcd_set_queue_depth(sdev);
3762 +
3763 +diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
3764 +index e06864f64beb..0f6bc6b8e4c6 100644
3765 +--- a/drivers/staging/rtl8192u/r8192U_core.c
3766 ++++ b/drivers/staging/rtl8192u/r8192U_core.c
3767 +@@ -1749,6 +1749,8 @@ static short rtl8192_usb_initendpoints(struct net_device *dev)
3768 +
3769 + priv->rx_urb[16] = usb_alloc_urb(0, GFP_KERNEL);
3770 + priv->oldaddr = kmalloc(16, GFP_KERNEL);
3771 ++ if (!priv->oldaddr)
3772 ++ return -ENOMEM;
3773 + oldaddr = priv->oldaddr;
3774 + align = ((long)oldaddr) & 3;
3775 + if (align) {
3776 +diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
3777 +index 03ebe401fff7..040018d59608 100644
3778 +--- a/drivers/tty/serial/arc_uart.c
3779 ++++ b/drivers/tty/serial/arc_uart.c
3780 +@@ -597,6 +597,11 @@ static int arc_serial_probe(struct platform_device *pdev)
3781 + if (dev_id < 0)
3782 + dev_id = 0;
3783 +
3784 ++ if (dev_id >= ARRAY_SIZE(arc_uart_ports)) {
3785 ++ dev_err(&pdev->dev, "serial%d out of range\n", dev_id);
3786 ++ return -EINVAL;
3787 ++ }
3788 ++
3789 + uart = &arc_uart_ports[dev_id];
3790 + port = &uart->port;
3791 +
3792 +diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
3793 +index 3d790033744e..01e2274b23f2 100644
3794 +--- a/drivers/tty/serial/fsl_lpuart.c
3795 ++++ b/drivers/tty/serial/fsl_lpuart.c
3796 +@@ -1818,6 +1818,10 @@ static int lpuart_probe(struct platform_device *pdev)
3797 + dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
3798 + return ret;
3799 + }
3800 ++ if (ret >= ARRAY_SIZE(lpuart_ports)) {
3801 ++ dev_err(&pdev->dev, "serial%d out of range\n", ret);
3802 ++ return -EINVAL;
3803 ++ }
3804 + sport->port.line = ret;
3805 + sport->lpuart32 = of_device_is_compatible(np, "fsl,ls1021a-lpuart");
3806 +
3807 +diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
3808 +index 98176d12b3e1..07ede982b472 100644
3809 +--- a/drivers/tty/serial/imx.c
3810 ++++ b/drivers/tty/serial/imx.c
3811 +@@ -1923,6 +1923,12 @@ static int serial_imx_probe(struct platform_device *pdev)
3812 + else if (ret < 0)
3813 + return ret;
3814 +
3815 ++ if (sport->port.line >= ARRAY_SIZE(imx_ports)) {
3816 ++ dev_err(&pdev->dev, "serial%d out of range\n",
3817 ++ sport->port.line);
3818 ++ return -EINVAL;
3819 ++ }
3820 ++
3821 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3822 + base = devm_ioremap_resource(&pdev->dev, res);
3823 + if (IS_ERR(base))
3824 +diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
3825 +index cd0414bbe094..daa4a65ef6ff 100644
3826 +--- a/drivers/tty/serial/mxs-auart.c
3827 ++++ b/drivers/tty/serial/mxs-auart.c
3828 +@@ -1274,6 +1274,10 @@ static int mxs_auart_probe(struct platform_device *pdev)
3829 + s->port.line = pdev->id < 0 ? 0 : pdev->id;
3830 + else if (ret < 0)
3831 + return ret;
3832 ++ if (s->port.line >= ARRAY_SIZE(auart_port)) {
3833 ++ dev_err(&pdev->dev, "serial%d out of range\n", s->port.line);
3834 ++ return -EINVAL;
3835 ++ }
3836 +
3837 + if (of_id) {
3838 + pdev->id_entry = of_id->data;
3839 +diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
3840 +index e6bc1a6be4a4..312343beb249 100644
3841 +--- a/drivers/tty/serial/samsung.c
3842 ++++ b/drivers/tty/serial/samsung.c
3843 +@@ -1807,6 +1807,10 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
3844 +
3845 + dbg("s3c24xx_serial_probe(%p) %d\n", pdev, index);
3846 +
3847 ++ if (index >= ARRAY_SIZE(s3c24xx_serial_ports)) {
3848 ++ dev_err(&pdev->dev, "serial%d out of range\n", index);
3849 ++ return -EINVAL;
3850 ++ }
3851 + ourport = &s3c24xx_serial_ports[index];
3852 +
3853 + ourport->drv_data = s3c24xx_get_driver_data(pdev);
3854 +diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
3855 +index 009e0dbc12d2..4f2f4aca8d2e 100644
3856 +--- a/drivers/tty/serial/xilinx_uartps.c
3857 ++++ b/drivers/tty/serial/xilinx_uartps.c
3858 +@@ -1026,7 +1026,7 @@ static struct uart_port *cdns_uart_get_port(int id)
3859 + struct uart_port *port;
3860 +
3861 + /* Try the given port id if failed use default method */
3862 +- if (cdns_uart_port[id].mapbase != 0) {
3863 ++ if (id < CDNS_UART_NR_PORTS && cdns_uart_port[id].mapbase != 0) {
3864 + /* Find the next unused port */
3865 + for (id = 0; id < CDNS_UART_NR_PORTS; id++)
3866 + if (cdns_uart_port[id].mapbase == 0)
3867 +diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
3868 +index a738a68d2292..a899d47c2a7c 100644
3869 +--- a/drivers/usb/dwc2/core.h
3870 ++++ b/drivers/usb/dwc2/core.h
3871 +@@ -187,7 +187,7 @@ struct dwc2_hsotg_ep {
3872 + unsigned char dir_in;
3873 + unsigned char index;
3874 + unsigned char mc;
3875 +- unsigned char interval;
3876 ++ u16 interval;
3877 +
3878 + unsigned int halted:1;
3879 + unsigned int periodic:1;
3880 +diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
3881 +index 0abf73c91beb..98705b83d2dc 100644
3882 +--- a/drivers/usb/dwc2/gadget.c
3883 ++++ b/drivers/usb/dwc2/gadget.c
3884 +@@ -2424,12 +2424,6 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
3885 + dwc2_writel(dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
3886 + DXEPCTL_USBACTEP, hsotg->regs + DIEPCTL0);
3887 +
3888 +- dwc2_hsotg_enqueue_setup(hsotg);
3889 +-
3890 +- dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
3891 +- dwc2_readl(hsotg->regs + DIEPCTL0),
3892 +- dwc2_readl(hsotg->regs + DOEPCTL0));
3893 +-
3894 + /* clear global NAKs */
3895 + val = DCTL_CGOUTNAK | DCTL_CGNPINNAK;
3896 + if (!is_usb_reset)
3897 +@@ -2440,6 +2434,12 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
3898 + mdelay(3);
3899 +
3900 + hsotg->lx_state = DWC2_L0;
3901 ++
3902 ++ dwc2_hsotg_enqueue_setup(hsotg);
3903 ++
3904 ++ dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
3905 ++ dwc2_readl(hsotg->regs + DIEPCTL0),
3906 ++ dwc2_readl(hsotg->regs + DOEPCTL0));
3907 + }
3908 +
3909 + static void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg)
3910 +diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
3911 +index 68d11d7d4028..8dfc94d389ea 100644
3912 +--- a/drivers/usb/dwc3/core.h
3913 ++++ b/drivers/usb/dwc3/core.h
3914 +@@ -202,6 +202,8 @@
3915 + #define DWC3_GUSB3PIPECTL_TX_DEEPH(n) ((n) << 1)
3916 +
3917 + /* Global TX Fifo Size Register */
3918 ++#define DWC31_GTXFIFOSIZ_TXFRAMNUM BIT(15) /* DWC_usb31 only */
3919 ++#define DWC31_GTXFIFOSIZ_TXFDEF(n) ((n) & 0x7fff) /* DWC_usb31 only */
3920 + #define DWC3_GTXFIFOSIZ_TXFDEF(n) ((n) & 0xffff)
3921 + #define DWC3_GTXFIFOSIZ_TXFSTADDR(n) ((n) & 0xffff0000)
3922 +
3923 +diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
3924 +index f70dd3dd4393..eb445c2ab15e 100644
3925 +--- a/drivers/usb/gadget/composite.c
3926 ++++ b/drivers/usb/gadget/composite.c
3927 +@@ -1328,7 +1328,7 @@ static int count_ext_compat(struct usb_configuration *c)
3928 + return res;
3929 + }
3930 +
3931 +-static void fill_ext_compat(struct usb_configuration *c, u8 *buf)
3932 ++static int fill_ext_compat(struct usb_configuration *c, u8 *buf)
3933 + {
3934 + int i, count;
3935 +
3936 +@@ -1355,10 +1355,12 @@ static void fill_ext_compat(struct usb_configuration *c, u8 *buf)
3937 + buf += 23;
3938 + }
3939 + count += 24;
3940 +- if (count >= 4096)
3941 +- return;
3942 ++ if (count + 24 >= USB_COMP_EP0_OS_DESC_BUFSIZ)
3943 ++ return count;
3944 + }
3945 + }
3946 ++
3947 ++ return count;
3948 + }
3949 +
3950 + static int count_ext_prop(struct usb_configuration *c, int interface)
3951 +@@ -1403,25 +1405,20 @@ static int fill_ext_prop(struct usb_configuration *c, int interface, u8 *buf)
3952 + struct usb_os_desc *d;
3953 + struct usb_os_desc_ext_prop *ext_prop;
3954 + int j, count, n, ret;
3955 +- u8 *start = buf;
3956 +
3957 + f = c->interface[interface];
3958 ++ count = 10; /* header length */
3959 + for (j = 0; j < f->os_desc_n; ++j) {
3960 + if (interface != f->os_desc_table[j].if_id)
3961 + continue;
3962 + d = f->os_desc_table[j].os_desc;
3963 + if (d)
3964 + list_for_each_entry(ext_prop, &d->ext_prop, entry) {
3965 +- /* 4kB minus header length */
3966 +- n = buf - start;
3967 +- if (n >= 4086)
3968 +- return 0;
3969 +-
3970 +- count = ext_prop->data_len +
3971 ++ n = ext_prop->data_len +
3972 + ext_prop->name_len + 14;
3973 +- if (count > 4086 - n)
3974 +- return -EINVAL;
3975 +- usb_ext_prop_put_size(buf, count);
3976 ++ if (count + n >= USB_COMP_EP0_OS_DESC_BUFSIZ)
3977 ++ return count;
3978 ++ usb_ext_prop_put_size(buf, n);
3979 + usb_ext_prop_put_type(buf, ext_prop->type);
3980 + ret = usb_ext_prop_put_name(buf, ext_prop->name,
3981 + ext_prop->name_len);
3982 +@@ -1447,11 +1444,12 @@ static int fill_ext_prop(struct usb_configuration *c, int interface, u8 *buf)
3983 + default:
3984 + return -EINVAL;
3985 + }
3986 +- buf += count;
3987 ++ buf += n;
3988 ++ count += n;
3989 + }
3990 + }
3991 +
3992 +- return 0;
3993 ++ return count;
3994 + }
3995 +
3996 + /*
3997 +@@ -1720,6 +1718,7 @@ unknown:
3998 + req->complete = composite_setup_complete;
3999 + buf = req->buf;
4000 + os_desc_cfg = cdev->os_desc_config;
4001 ++ w_length = min_t(u16, w_length, USB_COMP_EP0_OS_DESC_BUFSIZ);
4002 + memset(buf, 0, w_length);
4003 + buf[5] = 0x01;
4004 + switch (ctrl->bRequestType & USB_RECIP_MASK) {
4005 +@@ -1743,8 +1742,8 @@ unknown:
4006 + count += 16; /* header */
4007 + put_unaligned_le32(count, buf);
4008 + buf += 16;
4009 +- fill_ext_compat(os_desc_cfg, buf);
4010 +- value = w_length;
4011 ++ value = fill_ext_compat(os_desc_cfg, buf);
4012 ++ value = min_t(u16, w_length, value);
4013 + }
4014 + break;
4015 + case USB_RECIP_INTERFACE:
4016 +@@ -1773,8 +1772,7 @@ unknown:
4017 + interface, buf);
4018 + if (value < 0)
4019 + return value;
4020 +-
4021 +- value = w_length;
4022 ++ value = min_t(u16, w_length, value);
4023 + }
4024 + break;
4025 + }
4026 +@@ -2038,8 +2036,8 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev,
4027 + goto end;
4028 + }
4029 +
4030 +- /* OS feature descriptor length <= 4kB */
4031 +- cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL);
4032 ++ cdev->os_desc_req->buf = kmalloc(USB_COMP_EP0_OS_DESC_BUFSIZ,
4033 ++ GFP_KERNEL);
4034 + if (!cdev->os_desc_req->buf) {
4035 + ret = PTR_ERR(cdev->os_desc_req->buf);
4036 + kfree(cdev->os_desc_req);
4037 +diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
4038 +index 7deebd0b21ae..4191feb765b1 100644
4039 +--- a/drivers/usb/gadget/function/f_fs.c
4040 ++++ b/drivers/usb/gadget/function/f_fs.c
4041 +@@ -649,11 +649,15 @@ static void ffs_user_copy_worker(struct work_struct *work)
4042 + bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
4043 +
4044 + if (io_data->read && ret > 0) {
4045 ++ mm_segment_t oldfs = get_fs();
4046 ++
4047 ++ set_fs(USER_DS);
4048 + use_mm(io_data->mm);
4049 + ret = copy_to_iter(io_data->buf, ret, &io_data->data);
4050 + if (ret != io_data->req->actual && iov_iter_count(&io_data->data))
4051 + ret = -EFAULT;
4052 + unuse_mm(io_data->mm);
4053 ++ set_fs(oldfs);
4054 + }
4055 +
4056 + io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
4057 +@@ -3033,7 +3037,7 @@ static int ffs_func_setup(struct usb_function *f,
4058 + __ffs_event_add(ffs, FUNCTIONFS_SETUP);
4059 + spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
4060 +
4061 +- return 0;
4062 ++ return USB_GADGET_DELAYED_STATUS;
4063 + }
4064 +
4065 + static void ffs_func_suspend(struct usb_function *f)
4066 +diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
4067 +index 12064d3bddf6..b5dab103be38 100644
4068 +--- a/drivers/usb/gadget/function/f_uac2.c
4069 ++++ b/drivers/usb/gadget/function/f_uac2.c
4070 +@@ -1052,6 +1052,8 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
4071 + dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
4072 + return ret;
4073 + }
4074 ++ iad_desc.bFirstInterface = ret;
4075 ++
4076 + std_ac_if_desc.bInterfaceNumber = ret;
4077 + agdev->ac_intf = ret;
4078 + agdev->ac_alt = 0;
4079 +diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
4080 +index aac0ce8aeb0b..8991a4070792 100644
4081 +--- a/drivers/usb/gadget/udc/fsl_udc_core.c
4082 ++++ b/drivers/usb/gadget/udc/fsl_udc_core.c
4083 +@@ -1310,7 +1310,7 @@ static void udc_reset_ep_queue(struct fsl_udc *udc, u8 pipe)
4084 + {
4085 + struct fsl_ep *ep = get_ep_by_pipe(udc, pipe);
4086 +
4087 +- if (ep->name)
4088 ++ if (ep->ep.name)
4089 + nuke(ep, -ESHUTDOWN);
4090 + }
4091 +
4092 +@@ -1698,7 +1698,7 @@ static void dtd_complete_irq(struct fsl_udc *udc)
4093 + curr_ep = get_ep_by_pipe(udc, i);
4094 +
4095 + /* If the ep is configured */
4096 +- if (curr_ep->name == NULL) {
4097 ++ if (!curr_ep->ep.name) {
4098 + WARNING("Invalid EP?");
4099 + continue;
4100 + }
4101 +diff --git a/drivers/usb/gadget/udc/goku_udc.h b/drivers/usb/gadget/udc/goku_udc.h
4102 +index 86d2adafe149..64eb0f2b5ea0 100644
4103 +--- a/drivers/usb/gadget/udc/goku_udc.h
4104 ++++ b/drivers/usb/gadget/udc/goku_udc.h
4105 +@@ -28,7 +28,7 @@ struct goku_udc_regs {
4106 + # define INT_EP1DATASET 0x00040
4107 + # define INT_EP2DATASET 0x00080
4108 + # define INT_EP3DATASET 0x00100
4109 +-#define INT_EPnNAK(n) (0x00100 < (n)) /* 0 < n < 4 */
4110 ++#define INT_EPnNAK(n) (0x00100 << (n)) /* 0 < n < 4 */
4111 + # define INT_EP1NAK 0x00200
4112 + # define INT_EP2NAK 0x00400
4113 + # define INT_EP3NAK 0x00800
4114 +diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
4115 +index 9d1192aea9d0..602c6e42c34d 100644
4116 +--- a/drivers/usb/host/ohci-hcd.c
4117 ++++ b/drivers/usb/host/ohci-hcd.c
4118 +@@ -444,7 +444,8 @@ static int ohci_init (struct ohci_hcd *ohci)
4119 + struct usb_hcd *hcd = ohci_to_hcd(ohci);
4120 +
4121 + /* Accept arbitrarily long scatter-gather lists */
4122 +- hcd->self.sg_tablesize = ~0;
4123 ++ if (!(hcd->driver->flags & HCD_LOCAL_MEM))
4124 ++ hcd->self.sg_tablesize = ~0;
4125 +
4126 + if (distrust_firmware)
4127 + ohci->flags |= OHCI_QUIRK_HUB_POWER;
4128 +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
4129 +index d9363713b7f1..e4cf3322bcb3 100644
4130 +--- a/drivers/usb/host/xhci-mem.c
4131 ++++ b/drivers/usb/host/xhci-mem.c
4132 +@@ -960,6 +960,8 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
4133 + if (dev->out_ctx)
4134 + xhci_free_container_ctx(xhci, dev->out_ctx);
4135 +
4136 ++ if (dev->udev && dev->udev->slot_id)
4137 ++ dev->udev->slot_id = 0;
4138 + kfree(xhci->devs[slot_id]);
4139 + xhci->devs[slot_id] = NULL;
4140 + }
4141 +diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
4142 +index 06d83825923a..3a81b4c4d0dd 100644
4143 +--- a/drivers/usb/musb/musb_core.c
4144 ++++ b/drivers/usb/musb/musb_core.c
4145 +@@ -1775,6 +1775,7 @@ musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
4146 + int vbus;
4147 + u8 devctl;
4148 +
4149 ++ pm_runtime_get_sync(dev);
4150 + spin_lock_irqsave(&musb->lock, flags);
4151 + val = musb->a_wait_bcon;
4152 + vbus = musb_platform_get_vbus_status(musb);
4153 +@@ -1788,6 +1789,7 @@ musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
4154 + vbus = 0;
4155 + }
4156 + spin_unlock_irqrestore(&musb->lock, flags);
4157 ++ pm_runtime_put_sync(dev);
4158 +
4159 + return sprintf(buf, "Vbus %s, timeout %lu msec\n",
4160 + vbus ? "on" : "off", val);
4161 +@@ -2522,7 +2524,8 @@ static int musb_resume(struct device *dev)
4162 + pm_runtime_set_active(dev);
4163 + pm_runtime_enable(dev);
4164 +
4165 +- musb_start(musb);
4166 ++ musb_enable_interrupts(musb);
4167 ++ musb_platform_enable(musb);
4168 +
4169 + return 0;
4170 + }
4171 +diff --git a/drivers/video/fbdev/sbuslib.c b/drivers/video/fbdev/sbuslib.c
4172 +index a350209ffbd3..31c301d6be62 100644
4173 +--- a/drivers/video/fbdev/sbuslib.c
4174 ++++ b/drivers/video/fbdev/sbuslib.c
4175 +@@ -121,7 +121,7 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
4176 + unsigned char __user *ured;
4177 + unsigned char __user *ugreen;
4178 + unsigned char __user *ublue;
4179 +- int index, count, i;
4180 ++ unsigned int index, count, i;
4181 +
4182 + if (get_user(index, &c->index) ||
4183 + __get_user(count, &c->count) ||
4184 +@@ -160,7 +160,7 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
4185 + unsigned char __user *ugreen;
4186 + unsigned char __user *ublue;
4187 + struct fb_cmap *cmap = &info->cmap;
4188 +- int index, count, i;
4189 ++ unsigned int index, count, i;
4190 + u8 red, green, blue;
4191 +
4192 + if (get_user(index, &c->index) ||
4193 +diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
4194 +index aa93df5833dc..2048aad91add 100644
4195 +--- a/drivers/watchdog/f71808e_wdt.c
4196 ++++ b/drivers/watchdog/f71808e_wdt.c
4197 +@@ -520,7 +520,8 @@ static ssize_t watchdog_write(struct file *file, const char __user *buf,
4198 + char c;
4199 + if (get_user(c, buf + i))
4200 + return -EFAULT;
4201 +- expect_close = (c == 'V');
4202 ++ if (c == 'V')
4203 ++ expect_close = true;
4204 + }
4205 +
4206 + /* Properly order writes across fork()ed processes */
4207 +diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h
4208 +index 2b28c00da0df..dfe20b81ced5 100644
4209 +--- a/drivers/watchdog/sp5100_tco.h
4210 ++++ b/drivers/watchdog/sp5100_tco.h
4211 +@@ -54,7 +54,7 @@
4212 + #define SB800_PM_WATCHDOG_CONFIG 0x4C
4213 +
4214 + #define SB800_PCI_WATCHDOG_DECODE_EN (1 << 0)
4215 +-#define SB800_PM_WATCHDOG_DISABLE (1 << 2)
4216 ++#define SB800_PM_WATCHDOG_DISABLE (1 << 1)
4217 + #define SB800_PM_WATCHDOG_SECOND_RES (3 << 0)
4218 + #define SB800_ACPI_MMIO_DECODE_EN (1 << 0)
4219 + #define SB800_ACPI_MMIO_SEL (1 << 1)
4220 +diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
4221 +index 83ec7b89d308..468961c59fa5 100644
4222 +--- a/drivers/xen/events/events_base.c
4223 ++++ b/drivers/xen/events/events_base.c
4224 +@@ -764,8 +764,8 @@ out:
4225 + mutex_unlock(&irq_mapping_update_lock);
4226 + return irq;
4227 + error_irq:
4228 +- for (; i >= 0; i--)
4229 +- __unbind_from_irq(irq + i);
4230 ++ while (nvec--)
4231 ++ __unbind_from_irq(irq + nvec);
4232 + mutex_unlock(&irq_mapping_update_lock);
4233 + return ret;
4234 + }
4235 +diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
4236 +index c49f79ed58c5..4b7ce442d8e5 100644
4237 +--- a/drivers/xen/grant-table.c
4238 ++++ b/drivers/xen/grant-table.c
4239 +@@ -328,7 +328,7 @@ static void gnttab_handle_deferred(unsigned long unused)
4240 + if (entry->page) {
4241 + pr_debug("freeing g.e. %#x (pfn %#lx)\n",
4242 + entry->ref, page_to_pfn(entry->page));
4243 +- __free_page(entry->page);
4244 ++ put_page(entry->page);
4245 + } else
4246 + pr_info("freeing g.e. %#x\n", entry->ref);
4247 + kfree(entry);
4248 +@@ -384,7 +384,7 @@ void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
4249 + if (gnttab_end_foreign_access_ref(ref, readonly)) {
4250 + put_free_entry(ref);
4251 + if (page != 0)
4252 +- free_page(page);
4253 ++ put_page(virt_to_page(page));
4254 + } else
4255 + gnttab_add_deferred(ref, readonly,
4256 + page ? virt_to_page(page) : NULL);
4257 +diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
4258 +index f7b19c25c3a4..1889e928a0da 100644
4259 +--- a/drivers/xen/swiotlb-xen.c
4260 ++++ b/drivers/xen/swiotlb-xen.c
4261 +@@ -359,7 +359,7 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
4262 + * physical address */
4263 + phys = xen_bus_to_phys(dev_addr);
4264 +
4265 +- if (((dev_addr + size - 1 > dma_mask)) ||
4266 ++ if (((dev_addr + size - 1 <= dma_mask)) ||
4267 + range_straddles_page_boundary(phys, size))
4268 + xen_destroy_contiguous_region(phys, order);
4269 +
4270 +diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
4271 +index 2e319d0c395d..84cc98f3cabe 100644
4272 +--- a/drivers/xen/xen-acpi-processor.c
4273 ++++ b/drivers/xen/xen-acpi-processor.c
4274 +@@ -362,9 +362,9 @@ read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv)
4275 + }
4276 + /* There are more ACPI Processor objects than in x2APIC or MADT.
4277 + * This can happen with incorrect ACPI SSDT declerations. */
4278 +- if (acpi_id > nr_acpi_bits) {
4279 +- pr_debug("We only have %u, trying to set %u\n",
4280 +- nr_acpi_bits, acpi_id);
4281 ++ if (acpi_id >= nr_acpi_bits) {
4282 ++ pr_debug("max acpi id %u, trying to set %u\n",
4283 ++ nr_acpi_bits - 1, acpi_id);
4284 + return AE_OK;
4285 + }
4286 + /* OK, There is a ACPI Processor object */
4287 +diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
4288 +index 33a31cfef55d..c2d447687e33 100644
4289 +--- a/drivers/xen/xenbus/xenbus_probe.c
4290 ++++ b/drivers/xen/xenbus/xenbus_probe.c
4291 +@@ -470,8 +470,11 @@ int xenbus_probe_node(struct xen_bus_type *bus,
4292 +
4293 + /* Register with generic device framework. */
4294 + err = device_register(&xendev->dev);
4295 +- if (err)
4296 ++ if (err) {
4297 ++ put_device(&xendev->dev);
4298 ++ xendev = NULL;
4299 + goto fail;
4300 ++ }
4301 +
4302 + return 0;
4303 + fail:
4304 +diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
4305 +index d295d9878dff..8ec79385d3cc 100644
4306 +--- a/drivers/zorro/zorro.c
4307 ++++ b/drivers/zorro/zorro.c
4308 +@@ -16,6 +16,7 @@
4309 + #include <linux/bitops.h>
4310 + #include <linux/string.h>
4311 + #include <linux/platform_device.h>
4312 ++#include <linux/dma-mapping.h>
4313 + #include <linux/slab.h>
4314 +
4315 + #include <asm/byteorder.h>
4316 +@@ -185,6 +186,17 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
4317 + z->dev.parent = &bus->dev;
4318 + z->dev.bus = &zorro_bus_type;
4319 + z->dev.id = i;
4320 ++ switch (z->rom.er_Type & ERT_TYPEMASK) {
4321 ++ case ERT_ZORROIII:
4322 ++ z->dev.coherent_dma_mask = DMA_BIT_MASK(32);
4323 ++ break;
4324 ++
4325 ++ case ERT_ZORROII:
4326 ++ default:
4327 ++ z->dev.coherent_dma_mask = DMA_BIT_MASK(24);
4328 ++ break;
4329 ++ }
4330 ++ z->dev.dma_mask = &z->dev.coherent_dma_mask;
4331 + }
4332 +
4333 + /* ... then register them */
4334 +diff --git a/fs/affs/namei.c b/fs/affs/namei.c
4335 +index 181e05b46e72..92448d0ad900 100644
4336 +--- a/fs/affs/namei.c
4337 ++++ b/fs/affs/namei.c
4338 +@@ -224,9 +224,10 @@ affs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
4339 +
4340 + affs_lock_dir(dir);
4341 + bh = affs_find_entry(dir, dentry);
4342 +- affs_unlock_dir(dir);
4343 +- if (IS_ERR(bh))
4344 ++ if (IS_ERR(bh)) {
4345 ++ affs_unlock_dir(dir);
4346 + return ERR_CAST(bh);
4347 ++ }
4348 + if (bh) {
4349 + u32 ino = bh->b_blocknr;
4350 +
4351 +@@ -240,10 +241,13 @@ affs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
4352 + }
4353 + affs_brelse(bh);
4354 + inode = affs_iget(sb, ino);
4355 +- if (IS_ERR(inode))
4356 ++ if (IS_ERR(inode)) {
4357 ++ affs_unlock_dir(dir);
4358 + return ERR_CAST(inode);
4359 ++ }
4360 + }
4361 + d_add(dentry, inode);
4362 ++ affs_unlock_dir(dir);
4363 + return NULL;
4364 + }
4365 +
4366 +diff --git a/fs/aio.c b/fs/aio.c
4367 +index 88ede4a84ce0..4efaf29354a6 100644
4368 +--- a/fs/aio.c
4369 ++++ b/fs/aio.c
4370 +@@ -1066,8 +1066,8 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
4371 +
4372 + ctx = rcu_dereference(table->table[id]);
4373 + if (ctx && ctx->user_id == ctx_id) {
4374 +- percpu_ref_get(&ctx->users);
4375 +- ret = ctx;
4376 ++ if (percpu_ref_tryget_live(&ctx->users))
4377 ++ ret = ctx;
4378 + }
4379 + out:
4380 + rcu_read_unlock();
4381 +diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
4382 +index e2f5be261532..38ee08675468 100644
4383 +--- a/fs/btrfs/ctree.c
4384 ++++ b/fs/btrfs/ctree.c
4385 +@@ -2769,6 +2769,8 @@ again:
4386 + * contention with the cow code
4387 + */
4388 + if (cow) {
4389 ++ bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
4390 ++
4391 + /*
4392 + * if we don't really need to cow this block
4393 + * then we don't want to set the path blocking,
4394 +@@ -2793,9 +2795,13 @@ again:
4395 + }
4396 +
4397 + btrfs_set_path_blocking(p);
4398 +- err = btrfs_cow_block(trans, root, b,
4399 +- p->nodes[level + 1],
4400 +- p->slots[level + 1], &b);
4401 ++ if (last_level)
4402 ++ err = btrfs_cow_block(trans, root, b, NULL, 0,
4403 ++ &b);
4404 ++ else
4405 ++ err = btrfs_cow_block(trans, root, b,
4406 ++ p->nodes[level + 1],
4407 ++ p->slots[level + 1], &b);
4408 + if (err) {
4409 + ret = err;
4410 + goto done;
4411 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
4412 +index 85b207d19aa5..208b3f5ffb3f 100644
4413 +--- a/fs/btrfs/disk-io.c
4414 ++++ b/fs/btrfs/disk-io.c
4415 +@@ -1196,7 +1196,7 @@ static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void)
4416 + if (!writers)
4417 + return ERR_PTR(-ENOMEM);
4418 +
4419 +- ret = percpu_counter_init(&writers->counter, 0, GFP_KERNEL);
4420 ++ ret = percpu_counter_init(&writers->counter, 0, GFP_NOFS);
4421 + if (ret < 0) {
4422 + kfree(writers);
4423 + return ERR_PTR(ret);
4424 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
4425 +index 260f94b019c9..982a9d509817 100644
4426 +--- a/fs/btrfs/extent-tree.c
4427 ++++ b/fs/btrfs/extent-tree.c
4428 +@@ -4392,6 +4392,7 @@ again:
4429 + if (wait_for_alloc) {
4430 + mutex_unlock(&fs_info->chunk_mutex);
4431 + wait_for_alloc = 0;
4432 ++ cond_resched();
4433 + goto again;
4434 + }
4435 +
4436 +diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
4437 +index d4a6eef31854..052973620595 100644
4438 +--- a/fs/btrfs/file.c
4439 ++++ b/fs/btrfs/file.c
4440 +@@ -1861,10 +1861,19 @@ int btrfs_release_file(struct inode *inode, struct file *filp)
4441 + static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
4442 + {
4443 + int ret;
4444 ++ struct blk_plug plug;
4445 +
4446 ++ /*
4447 ++ * This is only called in fsync, which would do synchronous writes, so
4448 ++ * a plug can merge adjacent IOs as much as possible. Esp. in case of
4449 ++ * multiple disks using raid profile, a large IO can be split to
4450 ++ * several segments of stripe length (currently 64K).
4451 ++ */
4452 ++ blk_start_plug(&plug);
4453 + atomic_inc(&BTRFS_I(inode)->sync_writers);
4454 + ret = btrfs_fdatawrite_range(inode, start, end);
4455 + atomic_dec(&BTRFS_I(inode)->sync_writers);
4456 ++ blk_finish_plug(&plug);
4457 +
4458 + return ret;
4459 + }
4460 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
4461 +index 81b5a461d94e..1f01a8172308 100644
4462 +--- a/fs/btrfs/inode.c
4463 ++++ b/fs/btrfs/inode.c
4464 +@@ -6413,8 +6413,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4465 + goto out_unlock_inode;
4466 + } else {
4467 + btrfs_update_inode(trans, root, inode);
4468 +- unlock_new_inode(inode);
4469 +- d_instantiate(dentry, inode);
4470 ++ d_instantiate_new(dentry, inode);
4471 + }
4472 +
4473 + out_unlock:
4474 +@@ -6489,8 +6488,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
4475 + goto out_unlock_inode;
4476 +
4477 + BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4478 +- unlock_new_inode(inode);
4479 +- d_instantiate(dentry, inode);
4480 ++ d_instantiate_new(dentry, inode);
4481 +
4482 + out_unlock:
4483 + btrfs_end_transaction(trans, root);
4484 +@@ -6633,12 +6631,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
4485 + if (err)
4486 + goto out_fail_inode;
4487 +
4488 +- d_instantiate(dentry, inode);
4489 +- /*
4490 +- * mkdir is special. We're unlocking after we call d_instantiate
4491 +- * to avoid a race with nfsd calling d_instantiate.
4492 +- */
4493 +- unlock_new_inode(inode);
4494 ++ d_instantiate_new(dentry, inode);
4495 + drop_on_err = 0;
4496 +
4497 + out_fail:
4498 +@@ -9789,8 +9782,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
4499 + goto out_unlock_inode;
4500 + }
4501 +
4502 +- unlock_new_inode(inode);
4503 +- d_instantiate(dentry, inode);
4504 ++ d_instantiate_new(dentry, inode);
4505 +
4506 + out_unlock:
4507 + btrfs_end_transaction(trans, root);
4508 +diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
4509 +index 1a33d3eb36de..b9fa99577bf7 100644
4510 +--- a/fs/btrfs/raid56.c
4511 ++++ b/fs/btrfs/raid56.c
4512 +@@ -2160,11 +2160,21 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
4513 + }
4514 +
4515 + /*
4516 +- * reconstruct from the q stripe if they are
4517 +- * asking for mirror 3
4518 ++ * Loop retry:
4519 ++ * for 'mirror == 2', reconstruct from all other stripes.
4520 ++ * for 'mirror_num > 2', select a stripe to fail on every retry.
4521 + */
4522 +- if (mirror_num == 3)
4523 +- rbio->failb = rbio->real_stripes - 2;
4524 ++ if (mirror_num > 2) {
4525 ++ /*
4526 ++ * 'mirror == 3' is to fail the p stripe and
4527 ++ * reconstruct from the q stripe. 'mirror > 3' is to
4528 ++ * fail a data stripe and reconstruct from p+q stripe.
4529 ++ */
4530 ++ rbio->failb = rbio->real_stripes - (mirror_num - 1);
4531 ++ ASSERT(rbio->failb > 0);
4532 ++ if (rbio->failb <= rbio->faila)
4533 ++ rbio->failb--;
4534 ++ }
4535 +
4536 + ret = lock_stripe_add(rbio);
4537 +
4538 +diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
4539 +index 19b56873b797..83c73738165e 100644
4540 +--- a/fs/btrfs/send.c
4541 ++++ b/fs/btrfs/send.c
4542 +@@ -4674,6 +4674,9 @@ static int send_hole(struct send_ctx *sctx, u64 end)
4543 + u64 len;
4544 + int ret = 0;
4545 +
4546 ++ if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
4547 ++ return send_update_extent(sctx, offset, end - offset);
4548 ++
4549 + p = fs_path_alloc();
4550 + if (!p)
4551 + return -ENOMEM;
4552 +diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
4553 +index 846d277b1901..2b2978c04e80 100644
4554 +--- a/fs/btrfs/tests/qgroup-tests.c
4555 ++++ b/fs/btrfs/tests/qgroup-tests.c
4556 +@@ -70,7 +70,7 @@ static int insert_normal_tree_ref(struct btrfs_root *root, u64 bytenr,
4557 + btrfs_set_extent_generation(leaf, item, 1);
4558 + btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_TREE_BLOCK);
4559 + block_info = (struct btrfs_tree_block_info *)(item + 1);
4560 +- btrfs_set_tree_block_level(leaf, block_info, 1);
4561 ++ btrfs_set_tree_block_level(leaf, block_info, 0);
4562 + iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
4563 + if (parent > 0) {
4564 + btrfs_set_extent_inline_ref_type(leaf, iref,
4565 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
4566 +index 6ba022ed4a52..738f5d6beb95 100644
4567 +--- a/fs/btrfs/tree-log.c
4568 ++++ b/fs/btrfs/tree-log.c
4569 +@@ -2223,8 +2223,10 @@ again:
4570 + nritems = btrfs_header_nritems(path->nodes[0]);
4571 + if (path->slots[0] >= nritems) {
4572 + ret = btrfs_next_leaf(root, path);
4573 +- if (ret)
4574 ++ if (ret == 1)
4575 + break;
4576 ++ else if (ret < 0)
4577 ++ goto out;
4578 + }
4579 + btrfs_item_key_to_cpu(path->nodes[0], &found_key,
4580 + path->slots[0]);
4581 +@@ -3378,8 +3380,11 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
4582 + * from this directory and from this transaction
4583 + */
4584 + ret = btrfs_next_leaf(root, path);
4585 +- if (ret == 1) {
4586 +- last_offset = (u64)-1;
4587 ++ if (ret) {
4588 ++ if (ret == 1)
4589 ++ last_offset = (u64)-1;
4590 ++ else
4591 ++ err = ret;
4592 + goto done;
4593 + }
4594 + btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
4595 +@@ -3830,6 +3835,7 @@ fill_holes:
4596 + ASSERT(ret == 0);
4597 + src = src_path->nodes[0];
4598 + i = 0;
4599 ++ need_find_last_extent = true;
4600 + }
4601 +
4602 + btrfs_item_key_to_cpu(src, &key, i);
4603 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
4604 +index ed75d70b4bc2..b4d63a9842fa 100644
4605 +--- a/fs/btrfs/volumes.c
4606 ++++ b/fs/btrfs/volumes.c
4607 +@@ -5056,7 +5056,14 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
4608 + else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
4609 + ret = 2;
4610 + else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
4611 +- ret = 3;
4612 ++ /*
4613 ++ * There could be two corrupted data stripes, we need
4614 ++ * to loop retry in order to rebuild the correct data.
4615 ++ *
4616 ++ * Fail a stripe at a time on every retry except the
4617 ++ * stripe under reconstruction.
4618 ++ */
4619 ++ ret = map->num_stripes;
4620 + else
4621 + ret = 1;
4622 + free_extent_map(em);
4623 +diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
4624 +index 0c92af11f4f4..8632380d2b94 100644
4625 +--- a/fs/cifs/cifssmb.c
4626 ++++ b/fs/cifs/cifssmb.c
4627 +@@ -6421,9 +6421,7 @@ SetEARetry:
4628 + pSMB->InformationLevel =
4629 + cpu_to_le16(SMB_SET_FILE_EA);
4630 +
4631 +- parm_data =
4632 +- (struct fealist *) (((char *) &pSMB->hdr.Protocol) +
4633 +- offset);
4634 ++ parm_data = (void *)pSMB + offsetof(struct smb_hdr, Protocol) + offset;
4635 + pSMB->ParameterOffset = cpu_to_le16(param_offset);
4636 + pSMB->DataOffset = cpu_to_le16(offset);
4637 + pSMB->SetupCount = 1;
4638 +diff --git a/fs/dcache.c b/fs/dcache.c
4639 +index 751a0d88f049..250c1222e30c 100644
4640 +--- a/fs/dcache.c
4641 ++++ b/fs/dcache.c
4642 +@@ -1897,6 +1897,28 @@ struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
4643 +
4644 + EXPORT_SYMBOL(d_instantiate_unique);
4645 +
4646 ++/*
4647 ++ * This should be equivalent to d_instantiate() + unlock_new_inode(),
4648 ++ * with lockdep-related part of unlock_new_inode() done before
4649 ++ * anything else. Use that instead of open-coding d_instantiate()/
4650 ++ * unlock_new_inode() combinations.
4651 ++ */
4652 ++void d_instantiate_new(struct dentry *entry, struct inode *inode)
4653 ++{
4654 ++ BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
4655 ++ BUG_ON(!inode);
4656 ++ lockdep_annotate_inode_mutex_key(inode);
4657 ++ security_d_instantiate(entry, inode);
4658 ++ spin_lock(&inode->i_lock);
4659 ++ __d_instantiate(entry, inode);
4660 ++ WARN_ON(!(inode->i_state & I_NEW));
4661 ++ inode->i_state &= ~I_NEW;
4662 ++ smp_mb();
4663 ++ wake_up_bit(&inode->i_state, __I_NEW);
4664 ++ spin_unlock(&inode->i_lock);
4665 ++}
4666 ++EXPORT_SYMBOL(d_instantiate_new);
4667 ++
4668 + /**
4669 + * d_instantiate_no_diralias - instantiate a non-aliased dentry
4670 + * @entry: dentry to complete
4671 +diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
4672 +index e2e47ba5d313..844d0c4da84f 100644
4673 +--- a/fs/ecryptfs/inode.c
4674 ++++ b/fs/ecryptfs/inode.c
4675 +@@ -287,8 +287,7 @@ ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry,
4676 + iput(ecryptfs_inode);
4677 + goto out;
4678 + }
4679 +- unlock_new_inode(ecryptfs_inode);
4680 +- d_instantiate(ecryptfs_dentry, ecryptfs_inode);
4681 ++ d_instantiate_new(ecryptfs_dentry, ecryptfs_inode);
4682 + out:
4683 + return rc;
4684 + }
4685 +diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
4686 +index 3267a80dbbe2..da3d40ef1668 100644
4687 +--- a/fs/ext2/namei.c
4688 ++++ b/fs/ext2/namei.c
4689 +@@ -40,8 +40,7 @@ static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode)
4690 + {
4691 + int err = ext2_add_link(dentry, inode);
4692 + if (!err) {
4693 +- unlock_new_inode(inode);
4694 +- d_instantiate(dentry, inode);
4695 ++ d_instantiate_new(dentry, inode);
4696 + return 0;
4697 + }
4698 + inode_dec_link_count(inode);
4699 +@@ -267,8 +266,7 @@ static int ext2_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
4700 + if (err)
4701 + goto out_fail;
4702 +
4703 +- unlock_new_inode(inode);
4704 +- d_instantiate(dentry, inode);
4705 ++ d_instantiate_new(dentry, inode);
4706 + out:
4707 + return err;
4708 +
4709 +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
4710 +index 32960b3ecd4f..97472088d65a 100644
4711 +--- a/fs/ext4/namei.c
4712 ++++ b/fs/ext4/namei.c
4713 +@@ -2429,8 +2429,7 @@ static int ext4_add_nondir(handle_t *handle,
4714 + int err = ext4_add_entry(handle, dentry, inode);
4715 + if (!err) {
4716 + ext4_mark_inode_dirty(handle, inode);
4717 +- unlock_new_inode(inode);
4718 +- d_instantiate(dentry, inode);
4719 ++ d_instantiate_new(dentry, inode);
4720 + return 0;
4721 + }
4722 + drop_nlink(inode);
4723 +@@ -2669,8 +2668,7 @@ out_clear_inode:
4724 + err = ext4_mark_inode_dirty(handle, dir);
4725 + if (err)
4726 + goto out_clear_inode;
4727 +- unlock_new_inode(inode);
4728 +- d_instantiate(dentry, inode);
4729 ++ d_instantiate_new(dentry, inode);
4730 + if (IS_DIRSYNC(dir))
4731 + ext4_handle_sync(handle);
4732 +
4733 +diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
4734 +index 484df6850747..e5553cd8fe4e 100644
4735 +--- a/fs/f2fs/namei.c
4736 ++++ b/fs/f2fs/namei.c
4737 +@@ -150,8 +150,7 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
4738 +
4739 + alloc_nid_done(sbi, ino);
4740 +
4741 +- d_instantiate(dentry, inode);
4742 +- unlock_new_inode(inode);
4743 ++ d_instantiate_new(dentry, inode);
4744 +
4745 + if (IS_DIRSYNC(dir))
4746 + f2fs_sync_fs(sbi->sb, 1);
4747 +@@ -399,8 +398,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
4748 + err = page_symlink(inode, p_str, p_len);
4749 +
4750 + err_out:
4751 +- d_instantiate(dentry, inode);
4752 +- unlock_new_inode(inode);
4753 ++ d_instantiate_new(dentry, inode);
4754 +
4755 + /*
4756 + * Let's flush symlink data in order to avoid broken symlink as much as
4757 +@@ -454,8 +452,7 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
4758 +
4759 + alloc_nid_done(sbi, inode->i_ino);
4760 +
4761 +- d_instantiate(dentry, inode);
4762 +- unlock_new_inode(inode);
4763 ++ d_instantiate_new(dentry, inode);
4764 +
4765 + if (IS_DIRSYNC(dir))
4766 + f2fs_sync_fs(sbi->sb, 1);
4767 +@@ -499,8 +496,7 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
4768 +
4769 + alloc_nid_done(sbi, inode->i_ino);
4770 +
4771 +- d_instantiate(dentry, inode);
4772 +- unlock_new_inode(inode);
4773 ++ d_instantiate_new(dentry, inode);
4774 +
4775 + if (IS_DIRSYNC(dir))
4776 + f2fs_sync_fs(sbi->sb, 1);
4777 +diff --git a/fs/fscache/page.c b/fs/fscache/page.c
4778 +index 6b35fc4860a0..1de16a5a5c4e 100644
4779 +--- a/fs/fscache/page.c
4780 ++++ b/fs/fscache/page.c
4781 +@@ -776,6 +776,7 @@ static void fscache_write_op(struct fscache_operation *_op)
4782 +
4783 + _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
4784 +
4785 ++again:
4786 + spin_lock(&object->lock);
4787 + cookie = object->cookie;
4788 +
4789 +@@ -816,10 +817,6 @@ static void fscache_write_op(struct fscache_operation *_op)
4790 + goto superseded;
4791 + page = results[0];
4792 + _debug("gang %d [%lx]", n, page->index);
4793 +- if (page->index >= op->store_limit) {
4794 +- fscache_stat(&fscache_n_store_pages_over_limit);
4795 +- goto superseded;
4796 +- }
4797 +
4798 + radix_tree_tag_set(&cookie->stores, page->index,
4799 + FSCACHE_COOKIE_STORING_TAG);
4800 +@@ -829,6 +826,9 @@ static void fscache_write_op(struct fscache_operation *_op)
4801 + spin_unlock(&cookie->stores_lock);
4802 + spin_unlock(&object->lock);
4803 +
4804 ++ if (page->index >= op->store_limit)
4805 ++ goto discard_page;
4806 ++
4807 + fscache_stat(&fscache_n_store_pages);
4808 + fscache_stat(&fscache_n_cop_write_page);
4809 + ret = object->cache->ops->write_page(op, page);
4810 +@@ -844,6 +844,11 @@ static void fscache_write_op(struct fscache_operation *_op)
4811 + _leave("");
4812 + return;
4813 +
4814 ++discard_page:
4815 ++ fscache_stat(&fscache_n_store_pages_over_limit);
4816 ++ fscache_end_page_write(object, page);
4817 ++ goto again;
4818 ++
4819 + superseded:
4820 + /* this writer is going away and there aren't any more things to
4821 + * write */
4822 +diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
4823 +index 1543aa1b2a93..8744bd773823 100644
4824 +--- a/fs/gfs2/file.c
4825 ++++ b/fs/gfs2/file.c
4826 +@@ -806,7 +806,7 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
4827 + struct gfs2_inode *ip = GFS2_I(inode);
4828 + struct gfs2_alloc_parms ap = { .aflags = 0, };
4829 + unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
4830 +- loff_t bytes, max_bytes, max_blks = UINT_MAX;
4831 ++ loff_t bytes, max_bytes, max_blks;
4832 + int error;
4833 + const loff_t pos = offset;
4834 + const loff_t count = len;
4835 +@@ -858,7 +858,8 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
4836 + return error;
4837 + /* ap.allowed tells us how many blocks quota will allow
4838 + * us to write. Check if this reduces max_blks */
4839 +- if (ap.allowed && ap.allowed < max_blks)
4840 ++ max_blks = UINT_MAX;
4841 ++ if (ap.allowed)
4842 + max_blks = ap.allowed;
4843 +
4844 + error = gfs2_inplace_reserve(ip, &ap);
4845 +diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
4846 +index ad04b3acae2b..a81ed38d8442 100644
4847 +--- a/fs/gfs2/quota.h
4848 ++++ b/fs/gfs2/quota.h
4849 +@@ -43,6 +43,8 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip,
4850 + {
4851 + struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
4852 + int ret;
4853 ++
4854 ++ ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
4855 + if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
4856 + return 0;
4857 + ret = gfs2_quota_lock(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
4858 +diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
4859 +index 30c4c9ebb693..e27317169697 100644
4860 +--- a/fs/jffs2/dir.c
4861 ++++ b/fs/jffs2/dir.c
4862 +@@ -207,8 +207,7 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry,
4863 + __func__, inode->i_ino, inode->i_mode, inode->i_nlink,
4864 + f->inocache->pino_nlink, inode->i_mapping->nrpages);
4865 +
4866 +- unlock_new_inode(inode);
4867 +- d_instantiate(dentry, inode);
4868 ++ d_instantiate_new(dentry, inode);
4869 + return 0;
4870 +
4871 + fail:
4872 +@@ -428,8 +427,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
4873 + mutex_unlock(&dir_f->sem);
4874 + jffs2_complete_reservation(c);
4875 +
4876 +- unlock_new_inode(inode);
4877 +- d_instantiate(dentry, inode);
4878 ++ d_instantiate_new(dentry, inode);
4879 + return 0;
4880 +
4881 + fail:
4882 +@@ -573,8 +571,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, umode_t mode
4883 + mutex_unlock(&dir_f->sem);
4884 + jffs2_complete_reservation(c);
4885 +
4886 +- unlock_new_inode(inode);
4887 +- d_instantiate(dentry, inode);
4888 ++ d_instantiate_new(dentry, inode);
4889 + return 0;
4890 +
4891 + fail:
4892 +@@ -745,8 +742,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, umode_t mode
4893 + mutex_unlock(&dir_f->sem);
4894 + jffs2_complete_reservation(c);
4895 +
4896 +- unlock_new_inode(inode);
4897 +- d_instantiate(dentry, inode);
4898 ++ d_instantiate_new(dentry, inode);
4899 + return 0;
4900 +
4901 + fail:
4902 +diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
4903 +index 2caf1682036d..85e2594fe95c 100644
4904 +--- a/fs/jffs2/fs.c
4905 ++++ b/fs/jffs2/fs.c
4906 +@@ -361,7 +361,6 @@ error_io:
4907 + ret = -EIO;
4908 + error:
4909 + mutex_unlock(&f->sem);
4910 +- jffs2_do_clear_inode(c, f);
4911 + iget_failed(inode);
4912 + return ERR_PTR(ret);
4913 + }
4914 +diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
4915 +index 9d7551f5c32a..f217ae750adb 100644
4916 +--- a/fs/jfs/namei.c
4917 ++++ b/fs/jfs/namei.c
4918 +@@ -178,8 +178,7 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, umode_t mode,
4919 + unlock_new_inode(ip);
4920 + iput(ip);
4921 + } else {
4922 +- unlock_new_inode(ip);
4923 +- d_instantiate(dentry, ip);
4924 ++ d_instantiate_new(dentry, ip);
4925 + }
4926 +
4927 + out2:
4928 +@@ -313,8 +312,7 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, umode_t mode)
4929 + unlock_new_inode(ip);
4930 + iput(ip);
4931 + } else {
4932 +- unlock_new_inode(ip);
4933 +- d_instantiate(dentry, ip);
4934 ++ d_instantiate_new(dentry, ip);
4935 + }
4936 +
4937 + out2:
4938 +@@ -1058,8 +1056,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
4939 + unlock_new_inode(ip);
4940 + iput(ip);
4941 + } else {
4942 +- unlock_new_inode(ip);
4943 +- d_instantiate(dentry, ip);
4944 ++ d_instantiate_new(dentry, ip);
4945 + }
4946 +
4947 + out2:
4948 +@@ -1443,8 +1440,7 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry,
4949 + unlock_new_inode(ip);
4950 + iput(ip);
4951 + } else {
4952 +- unlock_new_inode(ip);
4953 +- d_instantiate(dentry, ip);
4954 ++ d_instantiate_new(dentry, ip);
4955 + }
4956 +
4957 + out1:
4958 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
4959 +index 0f397e62de5a..41c8ddbc80dc 100644
4960 +--- a/fs/nfs/nfs4proc.c
4961 ++++ b/fs/nfs/nfs4proc.c
4962 +@@ -1780,7 +1780,7 @@ static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *sta
4963 + return ret;
4964 + }
4965 +
4966 +-static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, int err)
4967 ++static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err)
4968 + {
4969 + switch (err) {
4970 + default:
4971 +@@ -1827,7 +1827,11 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
4972 + return -EAGAIN;
4973 + case -ENOMEM:
4974 + case -NFS4ERR_DENIED:
4975 +- /* kill_proc(fl->fl_pid, SIGLOST, 1); */
4976 ++ if (fl) {
4977 ++ struct nfs4_lock_state *lsp = fl->fl_u.nfs4_fl.owner;
4978 ++ if (lsp)
4979 ++ set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
4980 ++ }
4981 + return 0;
4982 + }
4983 + return err;
4984 +@@ -1863,7 +1867,7 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
4985 + err = nfs4_open_recover_helper(opendata, FMODE_READ);
4986 + }
4987 + nfs4_opendata_put(opendata);
4988 +- return nfs4_handle_delegation_recall_error(server, state, stateid, err);
4989 ++ return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err);
4990 + }
4991 +
4992 + static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
4993 +@@ -6157,7 +6161,7 @@ int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state,
4994 + if (err != 0)
4995 + return err;
4996 + err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
4997 +- return nfs4_handle_delegation_recall_error(server, state, stateid, err);
4998 ++ return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err);
4999 + }
5000 +
5001 + struct nfs_release_lockowner_data {
5002 +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
5003 +index 83fba40396ae..44f5cea49699 100644
5004 +--- a/fs/nfs/nfs4state.c
5005 ++++ b/fs/nfs/nfs4state.c
5006 +@@ -1386,6 +1386,7 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_
5007 + struct inode *inode = state->inode;
5008 + struct nfs_inode *nfsi = NFS_I(inode);
5009 + struct file_lock *fl;
5010 ++ struct nfs4_lock_state *lsp;
5011 + int status = 0;
5012 + struct file_lock_context *flctx = inode->i_flctx;
5013 + struct list_head *list;
5014 +@@ -1426,7 +1427,9 @@ restart:
5015 + case -NFS4ERR_DENIED:
5016 + case -NFS4ERR_RECLAIM_BAD:
5017 + case -NFS4ERR_RECLAIM_CONFLICT:
5018 +- /* kill_proc(fl->fl_pid, SIGLOST, 1); */
5019 ++ lsp = fl->fl_u.nfs4_fl.owner;
5020 ++ if (lsp)
5021 ++ set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
5022 + status = 0;
5023 + }
5024 + spin_lock(&flctx->flc_lock);
5025 +diff --git a/fs/nfs/nfs4sysctl.c b/fs/nfs/nfs4sysctl.c
5026 +index 0fbd3ab1be22..44a7bbbf92f8 100644
5027 +--- a/fs/nfs/nfs4sysctl.c
5028 ++++ b/fs/nfs/nfs4sysctl.c
5029 +@@ -31,7 +31,7 @@ static struct ctl_table nfs4_cb_sysctls[] = {
5030 + .data = &nfs_idmap_cache_timeout,
5031 + .maxlen = sizeof(int),
5032 + .mode = 0644,
5033 +- .proc_handler = proc_dointvec_jiffies,
5034 ++ .proc_handler = proc_dointvec,
5035 + },
5036 + { }
5037 + };
5038 +diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
5039 +index c9a1a491aa91..cd7f5b0abe84 100644
5040 +--- a/fs/nilfs2/namei.c
5041 ++++ b/fs/nilfs2/namei.c
5042 +@@ -50,8 +50,7 @@ static inline int nilfs_add_nondir(struct dentry *dentry, struct inode *inode)
5043 + {
5044 + int err = nilfs_add_link(dentry, inode);
5045 + if (!err) {
5046 +- d_instantiate(dentry, inode);
5047 +- unlock_new_inode(inode);
5048 ++ d_instantiate_new(dentry, inode);
5049 + return 0;
5050 + }
5051 + inode_dec_link_count(inode);
5052 +@@ -246,8 +245,7 @@ static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
5053 + goto out_fail;
5054 +
5055 + nilfs_mark_inode_dirty(inode);
5056 +- d_instantiate(dentry, inode);
5057 +- unlock_new_inode(inode);
5058 ++ d_instantiate_new(dentry, inode);
5059 + out:
5060 + if (!err)
5061 + err = nilfs_transaction_commit(dir->i_sb);
5062 +diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
5063 +index 164307b99405..1e0d8da0d3cd 100644
5064 +--- a/fs/ocfs2/acl.c
5065 ++++ b/fs/ocfs2/acl.c
5066 +@@ -314,7 +314,9 @@ struct posix_acl *ocfs2_iop_get_acl(struct inode *inode, int type)
5067 + return ERR_PTR(ret);
5068 + }
5069 +
5070 ++ down_read(&OCFS2_I(inode)->ip_xattr_sem);
5071 + acl = ocfs2_get_acl_nolock(inode, type, di_bh);
5072 ++ up_read(&OCFS2_I(inode)->ip_xattr_sem);
5073 +
5074 + ocfs2_inode_unlock(inode, 0);
5075 + brelse(di_bh);
5076 +@@ -333,7 +335,9 @@ int ocfs2_acl_chmod(struct inode *inode, struct buffer_head *bh)
5077 + if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
5078 + return 0;
5079 +
5080 ++ down_read(&OCFS2_I(inode)->ip_xattr_sem);
5081 + acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh);
5082 ++ up_read(&OCFS2_I(inode)->ip_xattr_sem);
5083 + if (IS_ERR(acl) || !acl)
5084 + return PTR_ERR(acl);
5085 + ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
5086 +@@ -364,8 +368,10 @@ int ocfs2_init_acl(handle_t *handle,
5087 +
5088 + if (!S_ISLNK(inode->i_mode)) {
5089 + if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
5090 ++ down_read(&OCFS2_I(dir)->ip_xattr_sem);
5091 + acl = ocfs2_get_acl_nolock(dir, ACL_TYPE_DEFAULT,
5092 + dir_bh);
5093 ++ up_read(&OCFS2_I(dir)->ip_xattr_sem);
5094 + if (IS_ERR(acl))
5095 + return PTR_ERR(acl);
5096 + }
5097 +diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
5098 +index 2ee7fe747cea..c55a9c47ac17 100644
5099 +--- a/fs/ocfs2/dlm/dlmdomain.c
5100 ++++ b/fs/ocfs2/dlm/dlmdomain.c
5101 +@@ -674,20 +674,6 @@ static void dlm_leave_domain(struct dlm_ctxt *dlm)
5102 + spin_unlock(&dlm->spinlock);
5103 + }
5104 +
5105 +-int dlm_shutting_down(struct dlm_ctxt *dlm)
5106 +-{
5107 +- int ret = 0;
5108 +-
5109 +- spin_lock(&dlm_domain_lock);
5110 +-
5111 +- if (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN)
5112 +- ret = 1;
5113 +-
5114 +- spin_unlock(&dlm_domain_lock);
5115 +-
5116 +- return ret;
5117 +-}
5118 +-
5119 + void dlm_unregister_domain(struct dlm_ctxt *dlm)
5120 + {
5121 + int leave = 0;
5122 +diff --git a/fs/ocfs2/dlm/dlmdomain.h b/fs/ocfs2/dlm/dlmdomain.h
5123 +index fd6122a38dbd..8a9281411c18 100644
5124 +--- a/fs/ocfs2/dlm/dlmdomain.h
5125 ++++ b/fs/ocfs2/dlm/dlmdomain.h
5126 +@@ -28,7 +28,30 @@
5127 + extern spinlock_t dlm_domain_lock;
5128 + extern struct list_head dlm_domains;
5129 +
5130 +-int dlm_shutting_down(struct dlm_ctxt *dlm);
5131 ++static inline int dlm_joined(struct dlm_ctxt *dlm)
5132 ++{
5133 ++ int ret = 0;
5134 ++
5135 ++ spin_lock(&dlm_domain_lock);
5136 ++ if (dlm->dlm_state == DLM_CTXT_JOINED)
5137 ++ ret = 1;
5138 ++ spin_unlock(&dlm_domain_lock);
5139 ++
5140 ++ return ret;
5141 ++}
5142 ++
5143 ++static inline int dlm_shutting_down(struct dlm_ctxt *dlm)
5144 ++{
5145 ++ int ret = 0;
5146 ++
5147 ++ spin_lock(&dlm_domain_lock);
5148 ++ if (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN)
5149 ++ ret = 1;
5150 ++ spin_unlock(&dlm_domain_lock);
5151 ++
5152 ++ return ret;
5153 ++}
5154 ++
5155 + void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm,
5156 + int node_num);
5157 +
5158 +diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
5159 +index 4a338803e7e9..88149b4387c2 100644
5160 +--- a/fs/ocfs2/dlm/dlmrecovery.c
5161 ++++ b/fs/ocfs2/dlm/dlmrecovery.c
5162 +@@ -1377,6 +1377,15 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
5163 + if (!dlm_grab(dlm))
5164 + return -EINVAL;
5165 +
5166 ++ if (!dlm_joined(dlm)) {
5167 ++ mlog(ML_ERROR, "Domain %s not joined! "
5168 ++ "lockres %.*s, master %u\n",
5169 ++ dlm->name, mres->lockname_len,
5170 ++ mres->lockname, mres->master);
5171 ++ dlm_put(dlm);
5172 ++ return -EINVAL;
5173 ++ }
5174 ++
5175 + BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
5176 +
5177 + real_master = mres->master;
5178 +diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
5179 +index 13534f4fe5b5..722eb5bc9b8f 100644
5180 +--- a/fs/ocfs2/journal.c
5181 ++++ b/fs/ocfs2/journal.c
5182 +@@ -666,23 +666,24 @@ static int __ocfs2_journal_access(handle_t *handle,
5183 + /* we can safely remove this assertion after testing. */
5184 + if (!buffer_uptodate(bh)) {
5185 + mlog(ML_ERROR, "giving me a buffer that's not uptodate!\n");
5186 +- mlog(ML_ERROR, "b_blocknr=%llu\n",
5187 +- (unsigned long long)bh->b_blocknr);
5188 ++ mlog(ML_ERROR, "b_blocknr=%llu, b_state=0x%lx\n",
5189 ++ (unsigned long long)bh->b_blocknr, bh->b_state);
5190 +
5191 + lock_buffer(bh);
5192 + /*
5193 +- * A previous attempt to write this buffer head failed.
5194 +- * Nothing we can do but to retry the write and hope for
5195 +- * the best.
5196 ++ * A previous transaction with a couple of buffer heads fail
5197 ++ * to checkpoint, so all the bhs are marked as BH_Write_EIO.
5198 ++ * For current transaction, the bh is just among those error
5199 ++ * bhs which previous transaction handle. We can't just clear
5200 ++ * its BH_Write_EIO and reuse directly, since other bhs are
5201 ++ * not written to disk yet and that will cause metadata
5202 ++ * inconsistency. So we should set fs read-only to avoid
5203 ++ * further damage.
5204 + */
5205 + if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) {
5206 +- clear_buffer_write_io_error(bh);
5207 +- set_buffer_uptodate(bh);
5208 +- }
5209 +-
5210 +- if (!buffer_uptodate(bh)) {
5211 + unlock_buffer(bh);
5212 +- return -EIO;
5213 ++ return ocfs2_error(osb->sb, "A previous attempt to "
5214 ++ "write this buffer head failed\n");
5215 + }
5216 + unlock_buffer(bh);
5217 + }
5218 +diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
5219 +index 2de4c8a9340c..4f5141350af8 100644
5220 +--- a/fs/ocfs2/super.c
5221 ++++ b/fs/ocfs2/super.c
5222 +@@ -477,9 +477,8 @@ static int ocfs2_init_global_system_inodes(struct ocfs2_super *osb)
5223 + new = ocfs2_get_system_file_inode(osb, i, osb->slot_num);
5224 + if (!new) {
5225 + ocfs2_release_system_inodes(osb);
5226 +- status = -EINVAL;
5227 ++ status = ocfs2_is_soft_readonly(osb) ? -EROFS : -EINVAL;
5228 + mlog_errno(status);
5229 +- /* FIXME: Should ERROR_RO_FS */
5230 + mlog(ML_ERROR, "Unable to load system inode %d, "
5231 + "possibly corrupt fs?", i);
5232 + goto bail;
5233 +@@ -508,7 +507,7 @@ static int ocfs2_init_local_system_inodes(struct ocfs2_super *osb)
5234 + new = ocfs2_get_system_file_inode(osb, i, osb->slot_num);
5235 + if (!new) {
5236 + ocfs2_release_system_inodes(osb);
5237 +- status = -EINVAL;
5238 ++ status = ocfs2_is_soft_readonly(osb) ? -EROFS : -EINVAL;
5239 + mlog(ML_ERROR, "status=%d, sysfile=%d, slot=%d\n",
5240 + status, i, osb->slot_num);
5241 + goto bail;
5242 +diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
5243 +index 877830b05e12..4f0788232f2f 100644
5244 +--- a/fs/ocfs2/xattr.c
5245 ++++ b/fs/ocfs2/xattr.c
5246 +@@ -639,9 +639,11 @@ int ocfs2_calc_xattr_init(struct inode *dir,
5247 + si->value_len);
5248 +
5249 + if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
5250 ++ down_read(&OCFS2_I(dir)->ip_xattr_sem);
5251 + acl_len = ocfs2_xattr_get_nolock(dir, dir_bh,
5252 + OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT,
5253 + "", NULL, 0);
5254 ++ up_read(&OCFS2_I(dir)->ip_xattr_sem);
5255 + if (acl_len > 0) {
5256 + a_size = ocfs2_xattr_entry_real_size(0, acl_len);
5257 + if (S_ISDIR(mode))
5258 +diff --git a/fs/proc/base.c b/fs/proc/base.c
5259 +index 4a666ec7fb64..5f9cec2db6c3 100644
5260 +--- a/fs/proc/base.c
5261 ++++ b/fs/proc/base.c
5262 +@@ -94,6 +94,8 @@
5263 + #include "internal.h"
5264 + #include "fd.h"
5265 +
5266 ++#include "../../lib/kstrtox.h"
5267 ++
5268 + /* NOTE:
5269 + * Implementing inode permission operations in /proc is almost
5270 + * certainly an error. Permission checks need to happen during
5271 +@@ -1836,8 +1838,33 @@ end_instantiate:
5272 + static int dname_to_vma_addr(struct dentry *dentry,
5273 + unsigned long *start, unsigned long *end)
5274 + {
5275 +- if (sscanf(dentry->d_name.name, "%lx-%lx", start, end) != 2)
5276 ++ const char *str = dentry->d_name.name;
5277 ++ unsigned long long sval, eval;
5278 ++ unsigned int len;
5279 ++
5280 ++ len = _parse_integer(str, 16, &sval);
5281 ++ if (len & KSTRTOX_OVERFLOW)
5282 ++ return -EINVAL;
5283 ++ if (sval != (unsigned long)sval)
5284 + return -EINVAL;
5285 ++ str += len;
5286 ++
5287 ++ if (*str != '-')
5288 ++ return -EINVAL;
5289 ++ str++;
5290 ++
5291 ++ len = _parse_integer(str, 16, &eval);
5292 ++ if (len & KSTRTOX_OVERFLOW)
5293 ++ return -EINVAL;
5294 ++ if (eval != (unsigned long)eval)
5295 ++ return -EINVAL;
5296 ++ str += len;
5297 ++
5298 ++ if (*str != '\0')
5299 ++ return -EINVAL;
5300 ++
5301 ++ *start = sval;
5302 ++ *end = eval;
5303 +
5304 + return 0;
5305 + }
5306 +diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
5307 +index 4dbe1e2daeca..5e1054f028af 100644
5308 +--- a/fs/proc/proc_sysctl.c
5309 ++++ b/fs/proc/proc_sysctl.c
5310 +@@ -654,7 +654,10 @@ static bool proc_sys_link_fill_cache(struct file *file,
5311 + struct ctl_table *table)
5312 + {
5313 + bool ret = true;
5314 ++
5315 + head = sysctl_head_grab(head);
5316 ++ if (IS_ERR(head))
5317 ++ return false;
5318 +
5319 + if (S_ISLNK(table->mode)) {
5320 + /* It is not an error if we can not follow the link ignore it */
5321 +diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
5322 +index 3ebc70167e41..eb611bdd4725 100644
5323 +--- a/fs/reiserfs/namei.c
5324 ++++ b/fs/reiserfs/namei.c
5325 +@@ -687,8 +687,7 @@ static int reiserfs_create(struct inode *dir, struct dentry *dentry, umode_t mod
5326 + reiserfs_update_inode_transaction(inode);
5327 + reiserfs_update_inode_transaction(dir);
5328 +
5329 +- unlock_new_inode(inode);
5330 +- d_instantiate(dentry, inode);
5331 ++ d_instantiate_new(dentry, inode);
5332 + retval = journal_end(&th);
5333 +
5334 + out_failed:
5335 +@@ -771,8 +770,7 @@ static int reiserfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode
5336 + goto out_failed;
5337 + }
5338 +
5339 +- unlock_new_inode(inode);
5340 +- d_instantiate(dentry, inode);
5341 ++ d_instantiate_new(dentry, inode);
5342 + retval = journal_end(&th);
5343 +
5344 + out_failed:
5345 +@@ -871,8 +869,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
5346 + /* the above add_entry did not update dir's stat data */
5347 + reiserfs_update_sd(&th, dir);
5348 +
5349 +- unlock_new_inode(inode);
5350 +- d_instantiate(dentry, inode);
5351 ++ d_instantiate_new(dentry, inode);
5352 + retval = journal_end(&th);
5353 + out_failed:
5354 + reiserfs_write_unlock(dir->i_sb);
5355 +@@ -1186,8 +1183,7 @@ static int reiserfs_symlink(struct inode *parent_dir,
5356 + goto out_failed;
5357 + }
5358 +
5359 +- unlock_new_inode(inode);
5360 +- d_instantiate(dentry, inode);
5361 ++ d_instantiate_new(dentry, inode);
5362 + retval = journal_end(&th);
5363 + out_failed:
5364 + reiserfs_write_unlock(parent_dir->i_sb);
5365 +diff --git a/fs/udf/namei.c b/fs/udf/namei.c
5366 +index c97b5a8d1e24..f34c545f4e54 100644
5367 +--- a/fs/udf/namei.c
5368 ++++ b/fs/udf/namei.c
5369 +@@ -611,8 +611,7 @@ static int udf_add_nondir(struct dentry *dentry, struct inode *inode)
5370 + if (fibh.sbh != fibh.ebh)
5371 + brelse(fibh.ebh);
5372 + brelse(fibh.sbh);
5373 +- unlock_new_inode(inode);
5374 +- d_instantiate(dentry, inode);
5375 ++ d_instantiate_new(dentry, inode);
5376 +
5377 + return 0;
5378 + }
5379 +@@ -722,8 +721,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
5380 + inc_nlink(dir);
5381 + dir->i_ctime = dir->i_mtime = current_fs_time(dir->i_sb);
5382 + mark_inode_dirty(dir);
5383 +- unlock_new_inode(inode);
5384 +- d_instantiate(dentry, inode);
5385 ++ d_instantiate_new(dentry, inode);
5386 + if (fibh.sbh != fibh.ebh)
5387 + brelse(fibh.ebh);
5388 + brelse(fibh.sbh);
5389 +diff --git a/fs/udf/super.c b/fs/udf/super.c
5390 +index ee09c97f3ab2..159977ec8e54 100644
5391 +--- a/fs/udf/super.c
5392 ++++ b/fs/udf/super.c
5393 +@@ -2073,8 +2073,9 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
5394 + bool lvid_open = false;
5395 +
5396 + uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
5397 +- uopt.uid = INVALID_UID;
5398 +- uopt.gid = INVALID_GID;
5399 ++ /* By default we'll use overflow[ug]id when UDF inode [ug]id == -1 */
5400 ++ uopt.uid = make_kuid(current_user_ns(), overflowuid);
5401 ++ uopt.gid = make_kgid(current_user_ns(), overflowgid);
5402 + uopt.umask = 0;
5403 + uopt.fmode = UDF_INVALID_MODE;
5404 + uopt.dmode = UDF_INVALID_MODE;
5405 +diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
5406 +index 47966554317c..2ec7689c25cf 100644
5407 +--- a/fs/ufs/namei.c
5408 ++++ b/fs/ufs/namei.c
5409 +@@ -38,8 +38,7 @@ static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode)
5410 + {
5411 + int err = ufs_add_link(dentry, inode);
5412 + if (!err) {
5413 +- unlock_new_inode(inode);
5414 +- d_instantiate(dentry, inode);
5415 ++ d_instantiate_new(dentry, inode);
5416 + return 0;
5417 + }
5418 + inode_dec_link_count(inode);
5419 +@@ -191,8 +190,7 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
5420 + if (err)
5421 + goto out_fail;
5422 +
5423 +- unlock_new_inode(inode);
5424 +- d_instantiate(dentry, inode);
5425 ++ d_instantiate_new(dentry, inode);
5426 + return 0;
5427 +
5428 + out_fail:
5429 +diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
5430 +index f949818fa1c7..fb9636cc927c 100644
5431 +--- a/fs/xfs/libxfs/xfs_attr.c
5432 ++++ b/fs/xfs/libxfs/xfs_attr.c
5433 +@@ -130,9 +130,6 @@ xfs_attr_get(
5434 + if (XFS_FORCED_SHUTDOWN(ip->i_mount))
5435 + return -EIO;
5436 +
5437 +- if (!xfs_inode_hasattr(ip))
5438 +- return -ENOATTR;
5439 +-
5440 + error = xfs_attr_args_init(&args, ip, name, flags);
5441 + if (error)
5442 + return error;
5443 +@@ -417,9 +414,6 @@ xfs_attr_remove(
5444 + if (XFS_FORCED_SHUTDOWN(dp->i_mount))
5445 + return -EIO;
5446 +
5447 +- if (!xfs_inode_hasattr(dp))
5448 +- return -ENOATTR;
5449 +-
5450 + error = xfs_attr_args_init(&args, dp, name, flags);
5451 + if (error)
5452 + return error;
5453 +diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
5454 +index e85a9519a5ae..64ad05cb831a 100644
5455 +--- a/fs/xfs/xfs_discard.c
5456 ++++ b/fs/xfs/xfs_discard.c
5457 +@@ -50,19 +50,19 @@ xfs_trim_extents(
5458 +
5459 + pag = xfs_perag_get(mp, agno);
5460 +
5461 +- error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
5462 +- if (error || !agbp)
5463 +- goto out_put_perag;
5464 +-
5465 +- cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT);
5466 +-
5467 + /*
5468 + * Force out the log. This means any transactions that might have freed
5469 +- * space before we took the AGF buffer lock are now on disk, and the
5470 ++ * space before we take the AGF buffer lock are now on disk, and the
5471 + * volatile disk cache is flushed.
5472 + */
5473 + xfs_log_force(mp, XFS_LOG_SYNC);
5474 +
5475 ++ error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
5476 ++ if (error || !agbp)
5477 ++ goto out_put_perag;
5478 ++
5479 ++ cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT);
5480 ++
5481 + /*
5482 + * Look up the longest btree in the AGF and start with it.
5483 + */
5484 +diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
5485 +index 4814cf971048..25b793325b09 100644
5486 +--- a/include/asm-generic/pgtable.h
5487 ++++ b/include/asm-generic/pgtable.h
5488 +@@ -237,6 +237,21 @@ extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
5489 + extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
5490 + #endif
5491 +
5492 ++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5493 ++/*
5494 ++ * This is an implementation of pmdp_establish() that is only suitable for an
5495 ++ * architecture that doesn't have hardware dirty/accessed bits. In this case we
5496 ++ * can't race with CPU which sets these bits and non-atomic aproach is fine.
5497 ++ */
5498 ++static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma,
5499 ++ unsigned long address, pmd_t *pmdp, pmd_t pmd)
5500 ++{
5501 ++ pmd_t old_pmd = *pmdp;
5502 ++ set_pmd_at(vma->vm_mm, address, pmdp, pmd);
5503 ++ return old_pmd;
5504 ++}
5505 ++#endif
5506 ++
5507 + #ifndef __HAVE_ARCH_PMDP_INVALIDATE
5508 + extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
5509 + pmd_t *pmdp);
5510 +diff --git a/include/linux/dcache.h b/include/linux/dcache.h
5511 +index d516847e0fae..11f4334ab177 100644
5512 +--- a/include/linux/dcache.h
5513 ++++ b/include/linux/dcache.h
5514 +@@ -236,6 +236,7 @@ extern seqlock_t rename_lock;
5515 + * These are the low-level FS interfaces to the dcache..
5516 + */
5517 + extern void d_instantiate(struct dentry *, struct inode *);
5518 ++extern void d_instantiate_new(struct dentry *, struct inode *);
5519 + extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
5520 + extern int d_instantiate_no_diralias(struct dentry *, struct inode *);
5521 + extern void __d_drop(struct dentry *dentry);
5522 +diff --git a/include/linux/suspend.h b/include/linux/suspend.h
5523 +index 8b6ec7ef0854..4a69bca7c6ab 100644
5524 +--- a/include/linux/suspend.h
5525 ++++ b/include/linux/suspend.h
5526 +@@ -377,6 +377,8 @@ extern int swsusp_page_is_forbidden(struct page *);
5527 + extern void swsusp_set_page_free(struct page *);
5528 + extern void swsusp_unset_page_free(struct page *);
5529 + extern unsigned long get_safe_page(gfp_t gfp_mask);
5530 ++extern asmlinkage int swsusp_arch_suspend(void);
5531 ++extern asmlinkage int swsusp_arch_resume(void);
5532 +
5533 + extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
5534 + extern int hibernate(void);
5535 +diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
5536 +index 1074b8921a5d..69c728883266 100644
5537 +--- a/include/linux/usb/composite.h
5538 ++++ b/include/linux/usb/composite.h
5539 +@@ -53,6 +53,9 @@
5540 + /* big enough to hold our biggest descriptor */
5541 + #define USB_COMP_EP0_BUFSIZ 1024
5542 +
5543 ++/* OS feature descriptor length <= 4kB */
5544 ++#define USB_COMP_EP0_OS_DESC_BUFSIZ 4096
5545 ++
5546 + #define USB_MS_TO_HS_INTERVAL(x) (ilog2((x * 1000 / 125)) + 1)
5547 + struct usb_configuration;
5548 +
5549 +diff --git a/include/net/ip.h b/include/net/ip.h
5550 +index 639398af273b..0530bcdbc212 100644
5551 +--- a/include/net/ip.h
5552 ++++ b/include/net/ip.h
5553 +@@ -279,6 +279,13 @@ int ip_decrease_ttl(struct iphdr *iph)
5554 + return --iph->ttl;
5555 + }
5556 +
5557 ++static inline int ip_mtu_locked(const struct dst_entry *dst)
5558 ++{
5559 ++ const struct rtable *rt = (const struct rtable *)dst;
5560 ++
5561 ++ return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU);
5562 ++}
5563 ++
5564 + static inline
5565 + int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
5566 + {
5567 +@@ -286,7 +293,7 @@ int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
5568 +
5569 + return pmtudisc == IP_PMTUDISC_DO ||
5570 + (pmtudisc == IP_PMTUDISC_WANT &&
5571 +- !(dst_metric_locked(dst, RTAX_MTU)));
5572 ++ !ip_mtu_locked(dst));
5573 + }
5574 +
5575 + static inline bool ip_sk_accept_pmtu(const struct sock *sk)
5576 +@@ -312,7 +319,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
5577 + struct net *net = dev_net(dst->dev);
5578 +
5579 + if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
5580 +- dst_metric_locked(dst, RTAX_MTU) ||
5581 ++ ip_mtu_locked(dst) ||
5582 + !forwarding)
5583 + return dst_mtu(dst);
5584 +
5585 +diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
5586 +index bda1721e9622..3afb7c4c7098 100644
5587 +--- a/include/net/ip_fib.h
5588 ++++ b/include/net/ip_fib.h
5589 +@@ -56,6 +56,7 @@ struct fib_nh_exception {
5590 + int fnhe_genid;
5591 + __be32 fnhe_daddr;
5592 + u32 fnhe_pmtu;
5593 ++ bool fnhe_mtu_locked;
5594 + __be32 fnhe_gw;
5595 + unsigned long fnhe_expires;
5596 + struct rtable __rcu *fnhe_rth_input;
5597 +diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
5598 +index ea985aa7a6c5..df528a623548 100644
5599 +--- a/include/net/llc_conn.h
5600 ++++ b/include/net/llc_conn.h
5601 +@@ -104,7 +104,7 @@ void llc_sk_reset(struct sock *sk);
5602 +
5603 + /* Access to a connection */
5604 + int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
5605 +-void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
5606 ++int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
5607 + void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
5608 + void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit);
5609 + void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit);
5610 +diff --git a/include/net/mac80211.h b/include/net/mac80211.h
5611 +index 7a49a31f6ddc..ec11cb1c0d80 100644
5612 +--- a/include/net/mac80211.h
5613 ++++ b/include/net/mac80211.h
5614 +@@ -3898,7 +3898,7 @@ static inline int ieee80211_sta_ps_transition_ni(struct ieee80211_sta *sta,
5615 + * The TX headroom reserved by mac80211 for its own tx_status functions.
5616 + * This is enough for the radiotap header.
5617 + */
5618 +-#define IEEE80211_TX_STATUS_HEADROOM 14
5619 ++#define IEEE80211_TX_STATUS_HEADROOM ALIGN(14, 4)
5620 +
5621 + /**
5622 + * ieee80211_sta_set_buffered - inform mac80211 about driver-buffered frames
5623 +diff --git a/include/net/regulatory.h b/include/net/regulatory.h
5624 +index ebc5a2ed8631..f83cacce3308 100644
5625 +--- a/include/net/regulatory.h
5626 ++++ b/include/net/regulatory.h
5627 +@@ -78,7 +78,7 @@ struct regulatory_request {
5628 + int wiphy_idx;
5629 + enum nl80211_reg_initiator initiator;
5630 + enum nl80211_user_reg_hint_type user_reg_hint_type;
5631 +- char alpha2[2];
5632 ++ char alpha2[3];
5633 + enum nl80211_dfs_regions dfs_region;
5634 + bool intersect;
5635 + bool processed;
5636 +diff --git a/include/net/route.h b/include/net/route.h
5637 +index a3b9ef74a389..d2a92d94ff72 100644
5638 +--- a/include/net/route.h
5639 ++++ b/include/net/route.h
5640 +@@ -64,7 +64,8 @@ struct rtable {
5641 + __be32 rt_gateway;
5642 +
5643 + /* Miscellaneous cached information */
5644 +- u32 rt_pmtu;
5645 ++ u32 rt_mtu_locked:1,
5646 ++ rt_pmtu:31;
5647 +
5648 + u32 rt_table_id;
5649 +
5650 +diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
5651 +index 073b9ac245ba..e844556794dc 100644
5652 +--- a/include/trace/events/timer.h
5653 ++++ b/include/trace/events/timer.h
5654 +@@ -125,6 +125,20 @@ DEFINE_EVENT(timer_class, timer_cancel,
5655 + TP_ARGS(timer)
5656 + );
5657 +
5658 ++#define decode_clockid(type) \
5659 ++ __print_symbolic(type, \
5660 ++ { CLOCK_REALTIME, "CLOCK_REALTIME" }, \
5661 ++ { CLOCK_MONOTONIC, "CLOCK_MONOTONIC" }, \
5662 ++ { CLOCK_BOOTTIME, "CLOCK_BOOTTIME" }, \
5663 ++ { CLOCK_TAI, "CLOCK_TAI" })
5664 ++
5665 ++#define decode_hrtimer_mode(mode) \
5666 ++ __print_symbolic(mode, \
5667 ++ { HRTIMER_MODE_ABS, "ABS" }, \
5668 ++ { HRTIMER_MODE_REL, "REL" }, \
5669 ++ { HRTIMER_MODE_ABS_PINNED, "ABS|PINNED" }, \
5670 ++ { HRTIMER_MODE_REL_PINNED, "REL|PINNED" })
5671 ++
5672 + /**
5673 + * hrtimer_init - called when the hrtimer is initialized
5674 + * @hrtimer: pointer to struct hrtimer
5675 +@@ -151,10 +165,8 @@ TRACE_EVENT(hrtimer_init,
5676 + ),
5677 +
5678 + TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer,
5679 +- __entry->clockid == CLOCK_REALTIME ?
5680 +- "CLOCK_REALTIME" : "CLOCK_MONOTONIC",
5681 +- __entry->mode == HRTIMER_MODE_ABS ?
5682 +- "HRTIMER_MODE_ABS" : "HRTIMER_MODE_REL")
5683 ++ decode_clockid(__entry->clockid),
5684 ++ decode_hrtimer_mode(__entry->mode))
5685 + );
5686 +
5687 + /**
5688 +diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
5689 +index fc9e2d6e5e2f..232367124712 100644
5690 +--- a/include/uapi/drm/virtgpu_drm.h
5691 ++++ b/include/uapi/drm/virtgpu_drm.h
5692 +@@ -60,6 +60,7 @@ struct drm_virtgpu_execbuffer {
5693 + };
5694 +
5695 + #define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
5696 ++#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
5697 +
5698 + struct drm_virtgpu_getparam {
5699 + uint64_t param;
5700 +diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
5701 +index ea9221b0331a..064d2026ab38 100644
5702 +--- a/include/uapi/linux/if_ether.h
5703 ++++ b/include/uapi/linux/if_ether.h
5704 +@@ -29,6 +29,7 @@
5705 + */
5706 +
5707 + #define ETH_ALEN 6 /* Octets in one ethernet addr */
5708 ++#define ETH_TLEN 2 /* Octets in ethernet type field */
5709 + #define ETH_HLEN 14 /* Total octets in header. */
5710 + #define ETH_ZLEN 60 /* Min. octets in frame sans FCS */
5711 + #define ETH_DATA_LEN 1500 /* Max. octets in payload */
5712 +diff --git a/ipc/shm.c b/ipc/shm.c
5713 +index a492dd81cf56..32974cfe5947 100644
5714 +--- a/ipc/shm.c
5715 ++++ b/ipc/shm.c
5716 +@@ -1113,14 +1113,17 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
5717 + goto out;
5718 + else if ((addr = (ulong)shmaddr)) {
5719 + if (addr & (shmlba - 1)) {
5720 +- /*
5721 +- * Round down to the nearest multiple of shmlba.
5722 +- * For sane do_mmap_pgoff() parameters, avoid
5723 +- * round downs that trigger nil-page and MAP_FIXED.
5724 +- */
5725 +- if ((shmflg & SHM_RND) && addr >= shmlba)
5726 +- addr &= ~(shmlba - 1);
5727 +- else
5728 ++ if (shmflg & SHM_RND) {
5729 ++ addr &= ~(shmlba - 1); /* round down */
5730 ++
5731 ++ /*
5732 ++ * Ensure that the round-down is non-nil
5733 ++ * when remapping. This can happen for
5734 ++ * cases when addr < shmlba.
5735 ++ */
5736 ++ if (!addr && (shmflg & SHM_REMAP))
5737 ++ goto out;
5738 ++ } else
5739 + #ifndef __ARCH_FORCE_SHMLBA
5740 + if (addr & ~PAGE_MASK)
5741 + #endif
5742 +diff --git a/kernel/audit.c b/kernel/audit.c
5743 +index 6881b485aa2a..bdf0cf463815 100644
5744 +--- a/kernel/audit.c
5745 ++++ b/kernel/audit.c
5746 +@@ -744,6 +744,8 @@ static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature
5747 + return;
5748 +
5749 + ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_FEATURE_CHANGE);
5750 ++ if (!ab)
5751 ++ return;
5752 + audit_log_task_info(ab, current);
5753 + audit_log_format(ab, " feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d",
5754 + audit_feature_names[which], !!old_feature, !!new_feature,
5755 +diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
5756 +index 4121345498e0..ebc52c7bd8a6 100644
5757 +--- a/kernel/debug/kdb/kdb_main.c
5758 ++++ b/kernel/debug/kdb/kdb_main.c
5759 +@@ -1564,6 +1564,7 @@ static int kdb_md(int argc, const char **argv)
5760 + int symbolic = 0;
5761 + int valid = 0;
5762 + int phys = 0;
5763 ++ int raw = 0;
5764 +
5765 + kdbgetintenv("MDCOUNT", &mdcount);
5766 + kdbgetintenv("RADIX", &radix);
5767 +@@ -1573,9 +1574,10 @@ static int kdb_md(int argc, const char **argv)
5768 + repeat = mdcount * 16 / bytesperword;
5769 +
5770 + if (strcmp(argv[0], "mdr") == 0) {
5771 +- if (argc != 2)
5772 ++ if (argc == 2 || (argc == 0 && last_addr != 0))
5773 ++ valid = raw = 1;
5774 ++ else
5775 + return KDB_ARGCOUNT;
5776 +- valid = 1;
5777 + } else if (isdigit(argv[0][2])) {
5778 + bytesperword = (int)(argv[0][2] - '0');
5779 + if (bytesperword == 0) {
5780 +@@ -1611,7 +1613,10 @@ static int kdb_md(int argc, const char **argv)
5781 + radix = last_radix;
5782 + bytesperword = last_bytesperword;
5783 + repeat = last_repeat;
5784 +- mdcount = ((repeat * bytesperword) + 15) / 16;
5785 ++ if (raw)
5786 ++ mdcount = repeat;
5787 ++ else
5788 ++ mdcount = ((repeat * bytesperword) + 15) / 16;
5789 + }
5790 +
5791 + if (argc) {
5792 +@@ -1628,7 +1633,10 @@ static int kdb_md(int argc, const char **argv)
5793 + diag = kdbgetularg(argv[nextarg], &val);
5794 + if (!diag) {
5795 + mdcount = (int) val;
5796 +- repeat = mdcount * 16 / bytesperword;
5797 ++ if (raw)
5798 ++ repeat = mdcount;
5799 ++ else
5800 ++ repeat = mdcount * 16 / bytesperword;
5801 + }
5802 + }
5803 + if (argc >= nextarg+1) {
5804 +@@ -1638,8 +1646,15 @@ static int kdb_md(int argc, const char **argv)
5805 + }
5806 + }
5807 +
5808 +- if (strcmp(argv[0], "mdr") == 0)
5809 +- return kdb_mdr(addr, mdcount);
5810 ++ if (strcmp(argv[0], "mdr") == 0) {
5811 ++ int ret;
5812 ++ last_addr = addr;
5813 ++ ret = kdb_mdr(addr, mdcount);
5814 ++ last_addr += mdcount;
5815 ++ last_repeat = mdcount;
5816 ++ last_bytesperword = bytesperword; // to make REPEAT happy
5817 ++ return ret;
5818 ++ }
5819 +
5820 + switch (radix) {
5821 + case 10:
5822 +diff --git a/kernel/events/core.c b/kernel/events/core.c
5823 +index 92d1f12f4407..990ac41d8a5f 100644
5824 +--- a/kernel/events/core.c
5825 ++++ b/kernel/events/core.c
5826 +@@ -419,9 +419,15 @@ static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
5827 +
5828 + static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
5829 + {
5830 +- struct perf_cgroup *cgrp_out = cpuctx->cgrp;
5831 +- if (cgrp_out)
5832 +- __update_cgrp_time(cgrp_out);
5833 ++ struct perf_cgroup *cgrp = cpuctx->cgrp;
5834 ++ struct cgroup_subsys_state *css;
5835 ++
5836 ++ if (cgrp) {
5837 ++ for (css = &cgrp->css; css; css = css->parent) {
5838 ++ cgrp = container_of(css, struct perf_cgroup, css);
5839 ++ __update_cgrp_time(cgrp);
5840 ++ }
5841 ++ }
5842 + }
5843 +
5844 + static inline void update_cgrp_time_from_event(struct perf_event *event)
5845 +@@ -449,6 +455,7 @@ perf_cgroup_set_timestamp(struct task_struct *task,
5846 + {
5847 + struct perf_cgroup *cgrp;
5848 + struct perf_cgroup_info *info;
5849 ++ struct cgroup_subsys_state *css;
5850 +
5851 + /*
5852 + * ctx->lock held by caller
5853 +@@ -459,8 +466,12 @@ perf_cgroup_set_timestamp(struct task_struct *task,
5854 + return;
5855 +
5856 + cgrp = perf_cgroup_from_task(task, ctx);
5857 +- info = this_cpu_ptr(cgrp->info);
5858 +- info->timestamp = ctx->timestamp;
5859 ++
5860 ++ for (css = &cgrp->css; css; css = css->parent) {
5861 ++ cgrp = container_of(css, struct perf_cgroup, css);
5862 ++ info = this_cpu_ptr(cgrp->info);
5863 ++ info->timestamp = ctx->timestamp;
5864 ++ }
5865 + }
5866 +
5867 + #define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
5868 +@@ -5288,7 +5299,8 @@ static void perf_output_read_group(struct perf_output_handle *handle,
5869 + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
5870 + values[n++] = running;
5871 +
5872 +- if (leader != event)
5873 ++ if ((leader != event) &&
5874 ++ (leader->state == PERF_EVENT_STATE_ACTIVE))
5875 + leader->pmu->read(leader);
5876 +
5877 + values[n++] = perf_event_count(leader);
5878 +diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
5879 +index 8173bc7fec92..3b40c8809e52 100644
5880 +--- a/kernel/locking/qspinlock.c
5881 ++++ b/kernel/locking/qspinlock.c
5882 +@@ -423,6 +423,14 @@ queue:
5883 + tail = encode_tail(smp_processor_id(), idx);
5884 +
5885 + node += idx;
5886 ++
5887 ++ /*
5888 ++ * Ensure that we increment the head node->count before initialising
5889 ++ * the actual node. If the compiler is kind enough to reorder these
5890 ++ * stores, then an IRQ could overwrite our assignments.
5891 ++ */
5892 ++ barrier();
5893 ++
5894 + node->locked = 0;
5895 + node->next = NULL;
5896 + pv_init_node(node);
5897 +diff --git a/kernel/power/power.h b/kernel/power/power.h
5898 +index caadb566e82b..25367fc0b152 100644
5899 +--- a/kernel/power/power.h
5900 ++++ b/kernel/power/power.h
5901 +@@ -85,9 +85,6 @@ extern int in_suspend;
5902 + extern dev_t swsusp_resume_device;
5903 + extern sector_t swsusp_resume_block;
5904 +
5905 +-extern asmlinkage int swsusp_arch_suspend(void);
5906 +-extern asmlinkage int swsusp_arch_resume(void);
5907 +-
5908 + extern int create_basic_memory_bitmaps(void);
5909 + extern void free_basic_memory_bitmaps(void);
5910 + extern int hibernate_preallocate_memory(void);
5911 +diff --git a/kernel/relay.c b/kernel/relay.c
5912 +index 0b4570cfacae..f6d5f08bdfaa 100644
5913 +--- a/kernel/relay.c
5914 ++++ b/kernel/relay.c
5915 +@@ -163,7 +163,7 @@ static struct rchan_buf *relay_create_buf(struct rchan *chan)
5916 + {
5917 + struct rchan_buf *buf;
5918 +
5919 +- if (chan->n_subbufs > UINT_MAX / sizeof(size_t *))
5920 ++ if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t *))
5921 + return NULL;
5922 +
5923 + buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
5924 +diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
5925 +index 2ef31c93e195..801b4ec40702 100644
5926 +--- a/kernel/sched/rt.c
5927 ++++ b/kernel/sched/rt.c
5928 +@@ -822,6 +822,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
5929 + struct rq *rq = rq_of_rt_rq(rt_rq);
5930 +
5931 + raw_spin_lock(&rq->lock);
5932 ++ update_rq_clock(rq);
5933 ++
5934 + if (rt_rq->rt_time) {
5935 + u64 runtime;
5936 +
5937 +diff --git a/kernel/signal.c b/kernel/signal.c
5938 +index 7d75bc2d042f..8bfbc47f0a23 100644
5939 +--- a/kernel/signal.c
5940 ++++ b/kernel/signal.c
5941 +@@ -1392,6 +1392,10 @@ static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
5942 + return ret;
5943 + }
5944 +
5945 ++ /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
5946 ++ if (pid == INT_MIN)
5947 ++ return -ESRCH;
5948 ++
5949 + read_lock(&tasklist_lock);
5950 + if (pid != -1) {
5951 + ret = __kill_pgrp_info(sig, info,
5952 +diff --git a/kernel/sys.c b/kernel/sys.c
5953 +index 78947de6f969..6624919ef0e7 100644
5954 +--- a/kernel/sys.c
5955 ++++ b/kernel/sys.c
5956 +@@ -53,6 +53,8 @@
5957 + #include <linux/uidgid.h>
5958 + #include <linux/cred.h>
5959 +
5960 ++#include <linux/nospec.h>
5961 ++
5962 + #include <linux/kmsg_dump.h>
5963 + /* Move somewhere else to avoid recompiling? */
5964 + #include <generated/utsrelease.h>
5965 +@@ -1311,6 +1313,7 @@ SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
5966 + if (resource >= RLIM_NLIMITS)
5967 + return -EINVAL;
5968 +
5969 ++ resource = array_index_nospec(resource, RLIM_NLIMITS);
5970 + task_lock(current->group_leader);
5971 + x = current->signal->rlim[resource];
5972 + task_unlock(current->group_leader);
5973 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
5974 +index 8df77ed6aa99..d8a2084b88db 100644
5975 +--- a/kernel/workqueue.c
5976 ++++ b/kernel/workqueue.c
5977 +@@ -5199,7 +5199,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
5978 +
5979 + ret = device_register(&wq_dev->dev);
5980 + if (ret) {
5981 +- kfree(wq_dev);
5982 ++ put_device(&wq_dev->dev);
5983 + wq->wq_dev = NULL;
5984 + return ret;
5985 + }
5986 +diff --git a/lib/test_bpf.c b/lib/test_bpf.c
5987 +index b7908d949a5f..b1495f586f29 100644
5988 +--- a/lib/test_bpf.c
5989 ++++ b/lib/test_bpf.c
5990 +@@ -83,6 +83,7 @@ struct bpf_test {
5991 + __u32 result;
5992 + } test[MAX_SUBTESTS];
5993 + int (*fill_helper)(struct bpf_test *self);
5994 ++ int expected_errcode; /* used when FLAG_EXPECTED_FAIL is set in the aux */
5995 + __u8 frag_data[MAX_DATA];
5996 + };
5997 +
5998 +@@ -1780,7 +1781,9 @@ static struct bpf_test tests[] = {
5999 + },
6000 + CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
6001 + { },
6002 +- { }
6003 ++ { },
6004 ++ .fill_helper = NULL,
6005 ++ .expected_errcode = -EINVAL,
6006 + },
6007 + {
6008 + "check: div_k_0",
6009 +@@ -1790,7 +1793,9 @@ static struct bpf_test tests[] = {
6010 + },
6011 + CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
6012 + { },
6013 +- { }
6014 ++ { },
6015 ++ .fill_helper = NULL,
6016 ++ .expected_errcode = -EINVAL,
6017 + },
6018 + {
6019 + "check: unknown insn",
6020 +@@ -1801,7 +1806,9 @@ static struct bpf_test tests[] = {
6021 + },
6022 + CLASSIC | FLAG_EXPECTED_FAIL,
6023 + { },
6024 +- { }
6025 ++ { },
6026 ++ .fill_helper = NULL,
6027 ++ .expected_errcode = -EINVAL,
6028 + },
6029 + {
6030 + "check: out of range spill/fill",
6031 +@@ -1811,7 +1818,9 @@ static struct bpf_test tests[] = {
6032 + },
6033 + CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
6034 + { },
6035 +- { }
6036 ++ { },
6037 ++ .fill_helper = NULL,
6038 ++ .expected_errcode = -EINVAL,
6039 + },
6040 + {
6041 + "JUMPS + HOLES",
6042 +@@ -1903,6 +1912,8 @@ static struct bpf_test tests[] = {
6043 + CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
6044 + { },
6045 + { },
6046 ++ .fill_helper = NULL,
6047 ++ .expected_errcode = -EINVAL,
6048 + },
6049 + {
6050 + "check: LDX + RET X",
6051 +@@ -1913,6 +1924,8 @@ static struct bpf_test tests[] = {
6052 + CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
6053 + { },
6054 + { },
6055 ++ .fill_helper = NULL,
6056 ++ .expected_errcode = -EINVAL,
6057 + },
6058 + { /* Mainly checking JIT here. */
6059 + "M[]: alt STX + LDX",
6060 +@@ -2087,6 +2100,8 @@ static struct bpf_test tests[] = {
6061 + CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
6062 + { },
6063 + { },
6064 ++ .fill_helper = NULL,
6065 ++ .expected_errcode = -EINVAL,
6066 + },
6067 + { /* Passes checker but fails during runtime. */
6068 + "LD [SKF_AD_OFF-1]",
6069 +@@ -4462,6 +4477,7 @@ static struct bpf_test tests[] = {
6070 + { },
6071 + { },
6072 + .fill_helper = bpf_fill_maxinsns4,
6073 ++ .expected_errcode = -EINVAL,
6074 + },
6075 + { /* Mainly checking JIT here. */
6076 + "BPF_MAXINSNS: Very long jump",
6077 +@@ -4517,10 +4533,15 @@ static struct bpf_test tests[] = {
6078 + {
6079 + "BPF_MAXINSNS: Jump, gap, jump, ...",
6080 + { },
6081 ++#ifdef CONFIG_BPF_JIT_ALWAYS_ON
6082 ++ CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
6083 ++#else
6084 + CLASSIC | FLAG_NO_DATA,
6085 ++#endif
6086 + { },
6087 + { { 0, 0xababcbac } },
6088 + .fill_helper = bpf_fill_maxinsns11,
6089 ++ .expected_errcode = -ENOTSUPP,
6090 + },
6091 + {
6092 + "BPF_MAXINSNS: ld_abs+get_processor_id",
6093 +@@ -5290,7 +5311,7 @@ static struct bpf_prog *generate_filter(int which, int *err)
6094 +
6095 + *err = bpf_prog_create(&fp, &fprog);
6096 + if (tests[which].aux & FLAG_EXPECTED_FAIL) {
6097 +- if (*err == -EINVAL) {
6098 ++ if (*err == tests[which].expected_errcode) {
6099 + pr_cont("PASS\n");
6100 + /* Verifier rejected filter as expected. */
6101 + *err = 0;
6102 +diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
6103 +index bc0a8d8b8f42..ba9adce1422a 100644
6104 +--- a/mm/kasan/kasan.c
6105 ++++ b/mm/kasan/kasan.c
6106 +@@ -548,5 +548,5 @@ static int __init kasan_memhotplug_init(void)
6107 + return 0;
6108 + }
6109 +
6110 +-module_init(kasan_memhotplug_init);
6111 ++core_initcall(kasan_memhotplug_init);
6112 + #endif
6113 +diff --git a/mm/kmemleak.c b/mm/kmemleak.c
6114 +index 1914ab9009d9..84c93879aa5d 100644
6115 +--- a/mm/kmemleak.c
6116 ++++ b/mm/kmemleak.c
6117 +@@ -1524,8 +1524,7 @@ static void start_scan_thread(void)
6118 + }
6119 +
6120 + /*
6121 +- * Stop the automatic memory scanning thread. This function must be called
6122 +- * with the scan_mutex held.
6123 ++ * Stop the automatic memory scanning thread.
6124 + */
6125 + static void stop_scan_thread(void)
6126 + {
6127 +@@ -1788,12 +1787,15 @@ static void kmemleak_do_cleanup(struct work_struct *work)
6128 + {
6129 + stop_scan_thread();
6130 +
6131 ++ mutex_lock(&scan_mutex);
6132 + /*
6133 +- * Once the scan thread has stopped, it is safe to no longer track
6134 +- * object freeing. Ordering of the scan thread stopping and the memory
6135 +- * accesses below is guaranteed by the kthread_stop() function.
6136 ++ * Once it is made sure that kmemleak_scan has stopped, it is safe to no
6137 ++ * longer track object freeing. Ordering of the scan thread stopping and
6138 ++ * the memory accesses below is guaranteed by the kthread_stop()
6139 ++ * function.
6140 + */
6141 + kmemleak_free_enabled = 0;
6142 ++ mutex_unlock(&scan_mutex);
6143 +
6144 + if (!kmemleak_found_leaks)
6145 + __kmemleak_do_cleanup();
6146 +diff --git a/mm/ksm.c b/mm/ksm.c
6147 +index 2f028e6d0831..0b496edc704b 100644
6148 +--- a/mm/ksm.c
6149 ++++ b/mm/ksm.c
6150 +@@ -1494,8 +1494,22 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
6151 + tree_rmap_item =
6152 + unstable_tree_search_insert(rmap_item, page, &tree_page);
6153 + if (tree_rmap_item) {
6154 ++ bool split;
6155 ++
6156 + kpage = try_to_merge_two_pages(rmap_item, page,
6157 + tree_rmap_item, tree_page);
6158 ++ /*
6159 ++ * If both pages we tried to merge belong to the same compound
6160 ++ * page, then we actually ended up increasing the reference
6161 ++ * count of the same compound page twice, and split_huge_page
6162 ++ * failed.
6163 ++ * Here we set a flag if that happened, and we use it later to
6164 ++ * try split_huge_page again. Since we call put_page right
6165 ++ * afterwards, the reference count will be correct and
6166 ++ * split_huge_page should succeed.
6167 ++ */
6168 ++ split = PageTransCompound(page)
6169 ++ && compound_head(page) == compound_head(tree_page);
6170 + put_page(tree_page);
6171 + if (kpage) {
6172 + /*
6173 +@@ -1520,6 +1534,20 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
6174 + break_cow(tree_rmap_item);
6175 + break_cow(rmap_item);
6176 + }
6177 ++ } else if (split) {
6178 ++ /*
6179 ++ * We are here if we tried to merge two pages and
6180 ++ * failed because they both belonged to the same
6181 ++ * compound page. We will split the page now, but no
6182 ++ * merging will take place.
6183 ++ * We do not want to add the cost of a full lock; if
6184 ++ * the page is locked, it is better to skip it and
6185 ++ * perhaps try again later.
6186 ++ */
6187 ++ if (!trylock_page(page))
6188 ++ return;
6189 ++ split_huge_page(page);
6190 ++ unlock_page(page);
6191 + }
6192 + }
6193 + }
6194 +diff --git a/mm/mempolicy.c b/mm/mempolicy.c
6195 +index c947014d128a..b777590c3e13 100644
6196 +--- a/mm/mempolicy.c
6197 ++++ b/mm/mempolicy.c
6198 +@@ -1232,6 +1232,7 @@ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
6199 + unsigned long maxnode)
6200 + {
6201 + unsigned long k;
6202 ++ unsigned long t;
6203 + unsigned long nlongs;
6204 + unsigned long endmask;
6205 +
6206 +@@ -1248,13 +1249,19 @@ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
6207 + else
6208 + endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
6209 +
6210 +- /* When the user specified more nodes than supported just check
6211 +- if the non supported part is all zero. */
6212 ++ /*
6213 ++ * When the user specified more nodes than supported just check
6214 ++ * if the non supported part is all zero.
6215 ++ *
6216 ++ * If maxnode have more longs than MAX_NUMNODES, check
6217 ++ * the bits in that area first. And then go through to
6218 ++ * check the rest bits which equal or bigger than MAX_NUMNODES.
6219 ++ * Otherwise, just check bits [MAX_NUMNODES, maxnode).
6220 ++ */
6221 + if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
6222 + if (nlongs > PAGE_SIZE/sizeof(long))
6223 + return -EINVAL;
6224 + for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
6225 +- unsigned long t;
6226 + if (get_user(t, nmask + k))
6227 + return -EFAULT;
6228 + if (k == nlongs - 1) {
6229 +@@ -1267,6 +1274,16 @@ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
6230 + endmask = ~0UL;
6231 + }
6232 +
6233 ++ if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
6234 ++ unsigned long valid_mask = endmask;
6235 ++
6236 ++ valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
6237 ++ if (get_user(t, nmask + nlongs - 1))
6238 ++ return -EFAULT;
6239 ++ if (t & valid_mask)
6240 ++ return -EINVAL;
6241 ++ }
6242 ++
6243 + if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
6244 + return -EFAULT;
6245 + nodes_addr(*nodes)[nlongs-1] &= endmask;
6246 +@@ -1393,10 +1410,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
6247 + goto out_put;
6248 + }
6249 +
6250 +- if (!nodes_subset(*new, node_states[N_MEMORY])) {
6251 +- err = -EINVAL;
6252 ++ task_nodes = cpuset_mems_allowed(current);
6253 ++ nodes_and(*new, *new, task_nodes);
6254 ++ if (nodes_empty(*new))
6255 ++ goto out_put;
6256 ++
6257 ++ nodes_and(*new, *new, node_states[N_MEMORY]);
6258 ++ if (nodes_empty(*new))
6259 + goto out_put;
6260 +- }
6261 +
6262 + err = security_task_movememory(task);
6263 + if (err)
6264 +@@ -2121,6 +2142,9 @@ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
6265 + case MPOL_INTERLEAVE:
6266 + return !!nodes_equal(a->v.nodes, b->v.nodes);
6267 + case MPOL_PREFERRED:
6268 ++ /* a's ->flags is the same as b's */
6269 ++ if (a->flags & MPOL_F_LOCAL)
6270 ++ return true;
6271 + return a->v.preferred_node == b->v.preferred_node;
6272 + default:
6273 + BUG();
6274 +diff --git a/mm/swapfile.c b/mm/swapfile.c
6275 +index c1a0f3dea8b5..674bf177ce44 100644
6276 +--- a/mm/swapfile.c
6277 ++++ b/mm/swapfile.c
6278 +@@ -2258,6 +2258,10 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
6279 + maxpages = swp_offset(pte_to_swp_entry(
6280 + swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
6281 + last_page = swap_header->info.last_page;
6282 ++ if (!last_page) {
6283 ++ pr_warn("Empty swap-file\n");
6284 ++ return 0;
6285 ++ }
6286 + if (last_page > maxpages) {
6287 + pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
6288 + maxpages << (PAGE_SHIFT - 10),
6289 +diff --git a/mm/vmscan.c b/mm/vmscan.c
6290 +index 12a69e6c10ba..b58ca729f20a 100644
6291 +--- a/mm/vmscan.c
6292 ++++ b/mm/vmscan.c
6293 +@@ -1312,6 +1312,7 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
6294 +
6295 + if (PageDirty(page)) {
6296 + struct address_space *mapping;
6297 ++ bool migrate_dirty;
6298 +
6299 + /* ISOLATE_CLEAN means only clean pages */
6300 + if (mode & ISOLATE_CLEAN)
6301 +@@ -1320,10 +1321,19 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
6302 + /*
6303 + * Only pages without mappings or that have a
6304 + * ->migratepage callback are possible to migrate
6305 +- * without blocking
6306 ++ * without blocking. However, we can be racing with
6307 ++ * truncation so it's necessary to lock the page
6308 ++ * to stabilise the mapping as truncation holds
6309 ++ * the page lock until after the page is removed
6310 ++ * from the page cache.
6311 + */
6312 ++ if (!trylock_page(page))
6313 ++ return ret;
6314 ++
6315 + mapping = page_mapping(page);
6316 +- if (mapping && !mapping->a_ops->migratepage)
6317 ++ migrate_dirty = mapping && mapping->a_ops->migratepage;
6318 ++ unlock_page(page);
6319 ++ if (!migrate_dirty)
6320 + return ret;
6321 + }
6322 + }
6323 +@@ -3831,7 +3841,13 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
6324 + */
6325 + int page_evictable(struct page *page)
6326 + {
6327 +- return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
6328 ++ int ret;
6329 ++
6330 ++ /* Prevent address_space of inode and swap cache from being freed */
6331 ++ rcu_read_lock();
6332 ++ ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
6333 ++ rcu_read_unlock();
6334 ++ return ret;
6335 + }
6336 +
6337 + #ifdef CONFIG_SHMEM
6338 +diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
6339 +index 5f19133c5530..c2dff7c6e960 100644
6340 +--- a/net/batman-adv/distributed-arp-table.c
6341 ++++ b/net/batman-adv/distributed-arp-table.c
6342 +@@ -374,7 +374,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
6343 + batadv_arp_hw_src(skb, hdr_size), &ip_src,
6344 + batadv_arp_hw_dst(skb, hdr_size), &ip_dst);
6345 +
6346 +- if (hdr_size == 0)
6347 ++ if (hdr_size < sizeof(struct batadv_unicast_packet))
6348 + return;
6349 +
6350 + unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
6351 +diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
6352 +index 700c96c82a15..5d2f9d4879b2 100644
6353 +--- a/net/batman-adv/fragmentation.c
6354 ++++ b/net/batman-adv/fragmentation.c
6355 +@@ -278,7 +278,8 @@ batadv_frag_merge_packets(struct hlist_head *chain)
6356 + /* Move the existing MAC header to just before the payload. (Override
6357 + * the fragment header.)
6358 + */
6359 +- skb_pull_rcsum(skb_out, hdr_size);
6360 ++ skb_pull(skb_out, hdr_size);
6361 ++ skb_out->ip_summed = CHECKSUM_NONE;
6362 + memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN);
6363 + skb_set_mac_header(skb_out, -ETH_HLEN);
6364 + skb_reset_network_header(skb_out);
6365 +diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
6366 +index e6c8382c79ba..6abfba1e227f 100644
6367 +--- a/net/batman-adv/gateway_client.c
6368 ++++ b/net/batman-adv/gateway_client.c
6369 +@@ -798,6 +798,9 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
6370 +
6371 + vid = batadv_get_vid(skb, 0);
6372 +
6373 ++ if (is_multicast_ether_addr(ethhdr->h_dest))
6374 ++ goto out;
6375 ++
6376 + orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
6377 + ethhdr->h_dest, vid);
6378 + if (!orig_dst_node)
6379 +diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
6380 +index eb76386f8d4b..8aa2d65df86f 100644
6381 +--- a/net/batman-adv/multicast.c
6382 ++++ b/net/batman-adv/multicast.c
6383 +@@ -428,8 +428,8 @@ static struct batadv_orig_node *
6384 + batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv,
6385 + struct ethhdr *ethhdr)
6386 + {
6387 +- return batadv_transtable_search(bat_priv, ethhdr->h_source,
6388 +- ethhdr->h_dest, BATADV_NO_FLAGS);
6389 ++ return batadv_transtable_search(bat_priv, NULL, ethhdr->h_dest,
6390 ++ BATADV_NO_FLAGS);
6391 + }
6392 +
6393 + /**
6394 +diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
6395 +index 720f1a5b81ac..9f1fe6169bef 100644
6396 +--- a/net/batman-adv/soft-interface.c
6397 ++++ b/net/batman-adv/soft-interface.c
6398 +@@ -430,13 +430,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
6399 +
6400 + /* skb->dev & skb->pkt_type are set here */
6401 + skb->protocol = eth_type_trans(skb, soft_iface);
6402 +-
6403 +- /* should not be necessary anymore as we use skb_pull_rcsum()
6404 +- * TODO: please verify this and remove this TODO
6405 +- * -- Dec 21st 2009, Simon Wunderlich
6406 +- */
6407 +-
6408 +- /* skb->ip_summed = CHECKSUM_UNNECESSARY; */
6409 ++ skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
6410 +
6411 + batadv_inc_counter(bat_priv, BATADV_CNT_RX);
6412 + batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
6413 +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
6414 +index 50b76011f470..51eab9b5baa1 100644
6415 +--- a/net/bridge/netfilter/ebtables.c
6416 ++++ b/net/bridge/netfilter/ebtables.c
6417 +@@ -1614,7 +1614,8 @@ static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
6418 + int off = ebt_compat_match_offset(match, m->match_size);
6419 + compat_uint_t msize = m->match_size - off;
6420 +
6421 +- BUG_ON(off >= m->match_size);
6422 ++ if (WARN_ON(off >= m->match_size))
6423 ++ return -EINVAL;
6424 +
6425 + if (copy_to_user(cm->u.name, match->name,
6426 + strlen(match->name) + 1) || put_user(msize, &cm->match_size))
6427 +@@ -1641,7 +1642,8 @@ static int compat_target_to_user(struct ebt_entry_target *t,
6428 + int off = xt_compat_target_offset(target);
6429 + compat_uint_t tsize = t->target_size - off;
6430 +
6431 +- BUG_ON(off >= t->target_size);
6432 ++ if (WARN_ON(off >= t->target_size))
6433 ++ return -EINVAL;
6434 +
6435 + if (copy_to_user(cm->u.name, target->name,
6436 + strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
6437 +@@ -1869,7 +1871,8 @@ static int ebt_buf_add(struct ebt_entries_buf_state *state,
6438 + if (state->buf_kern_start == NULL)
6439 + goto count_only;
6440 +
6441 +- BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
6442 ++ if (WARN_ON(state->buf_kern_offset + sz > state->buf_kern_len))
6443 ++ return -EINVAL;
6444 +
6445 + memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
6446 +
6447 +@@ -1882,7 +1885,8 @@ static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
6448 + {
6449 + char *b = state->buf_kern_start;
6450 +
6451 +- BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
6452 ++ if (WARN_ON(b && state->buf_kern_offset > state->buf_kern_len))
6453 ++ return -EINVAL;
6454 +
6455 + if (b != NULL && sz > 0)
6456 + memset(b + state->buf_kern_offset, 0, sz);
6457 +@@ -1959,8 +1963,10 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
6458 + pad = XT_ALIGN(size_kern) - size_kern;
6459 +
6460 + if (pad > 0 && dst) {
6461 +- BUG_ON(state->buf_kern_len <= pad);
6462 +- BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
6463 ++ if (WARN_ON(state->buf_kern_len <= pad))
6464 ++ return -EINVAL;
6465 ++ if (WARN_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad))
6466 ++ return -EINVAL;
6467 + memset(dst + size_kern, 0, pad);
6468 + }
6469 + return off + match_size;
6470 +@@ -2011,7 +2017,8 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
6471 + if (ret < 0)
6472 + return ret;
6473 +
6474 +- BUG_ON(ret < match32->match_size);
6475 ++ if (WARN_ON(ret < match32->match_size))
6476 ++ return -EINVAL;
6477 + growth += ret - match32->match_size;
6478 + growth += ebt_compat_entry_padsize();
6479 +
6480 +@@ -2081,8 +2088,12 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
6481 + * offsets are relative to beginning of struct ebt_entry (i.e., 0).
6482 + */
6483 + for (i = 0; i < 4 ; ++i) {
6484 +- if (offsets[i] >= *total)
6485 ++ if (offsets[i] > *total)
6486 ++ return -EINVAL;
6487 ++
6488 ++ if (i < 3 && offsets[i] == *total)
6489 + return -EINVAL;
6490 ++
6491 + if (i == 0)
6492 + continue;
6493 + if (offsets[i-1] > offsets[i])
6494 +@@ -2121,7 +2132,8 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
6495 +
6496 + startoff = state->buf_user_offset - startoff;
6497 +
6498 +- BUG_ON(*total < startoff);
6499 ++ if (WARN_ON(*total < startoff))
6500 ++ return -EINVAL;
6501 + *total -= startoff;
6502 + return 0;
6503 + }
6504 +@@ -2249,7 +2261,8 @@ static int compat_do_replace(struct net *net, void __user *user,
6505 + state.buf_kern_len = size64;
6506 +
6507 + ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
6508 +- BUG_ON(ret < 0); /* parses same data again */
6509 ++ if (WARN_ON(ret < 0))
6510 ++ goto out_unlock;
6511 +
6512 + vfree(entries_tmp);
6513 + tmp.entries_size = size64;
6514 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
6515 +index 5668dd3f9969..fa02c680eebc 100644
6516 +--- a/net/core/skbuff.c
6517 ++++ b/net/core/skbuff.c
6518 +@@ -4295,13 +4295,18 @@ EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
6519 +
6520 + static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
6521 + {
6522 ++ int mac_len;
6523 ++
6524 + if (skb_cow(skb, skb_headroom(skb)) < 0) {
6525 + kfree_skb(skb);
6526 + return NULL;
6527 + }
6528 +
6529 +- memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN,
6530 +- 2 * ETH_ALEN);
6531 ++ mac_len = skb->data - skb_mac_header(skb);
6532 ++ if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) {
6533 ++ memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
6534 ++ mac_len - VLAN_HLEN - ETH_TLEN);
6535 ++ }
6536 + skb->mac_header += VLAN_HLEN;
6537 + return skb;
6538 + }
6539 +diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
6540 +index a03f834f16d5..fa79e8118b9b 100644
6541 +--- a/net/ipv4/ip_vti.c
6542 ++++ b/net/ipv4/ip_vti.c
6543 +@@ -366,8 +366,6 @@ static int vti_tunnel_init(struct net_device *dev)
6544 + memcpy(dev->dev_addr, &iph->saddr, 4);
6545 + memcpy(dev->broadcast, &iph->daddr, 4);
6546 +
6547 +- dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
6548 +- dev->mtu = ETH_DATA_LEN;
6549 + dev->flags = IFF_NOARP;
6550 + dev->addr_len = 4;
6551 + dev->features |= NETIF_F_LLTX;
6552 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
6553 +index f0020260b0d4..3251dede1815 100644
6554 +--- a/net/ipv4/route.c
6555 ++++ b/net/ipv4/route.c
6556 +@@ -612,6 +612,7 @@ static inline u32 fnhe_hashfun(__be32 daddr)
6557 + static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
6558 + {
6559 + rt->rt_pmtu = fnhe->fnhe_pmtu;
6560 ++ rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
6561 + rt->dst.expires = fnhe->fnhe_expires;
6562 +
6563 + if (fnhe->fnhe_gw) {
6564 +@@ -622,7 +623,7 @@ static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnh
6565 + }
6566 +
6567 + static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
6568 +- u32 pmtu, unsigned long expires)
6569 ++ u32 pmtu, bool lock, unsigned long expires)
6570 + {
6571 + struct fnhe_hash_bucket *hash;
6572 + struct fib_nh_exception *fnhe;
6573 +@@ -659,8 +660,10 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
6574 + fnhe->fnhe_genid = genid;
6575 + if (gw)
6576 + fnhe->fnhe_gw = gw;
6577 +- if (pmtu)
6578 ++ if (pmtu) {
6579 + fnhe->fnhe_pmtu = pmtu;
6580 ++ fnhe->fnhe_mtu_locked = lock;
6581 ++ }
6582 + fnhe->fnhe_expires = max(1UL, expires);
6583 + /* Update all cached dsts too */
6584 + rt = rcu_dereference(fnhe->fnhe_rth_input);
6585 +@@ -684,6 +687,7 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
6586 + fnhe->fnhe_daddr = daddr;
6587 + fnhe->fnhe_gw = gw;
6588 + fnhe->fnhe_pmtu = pmtu;
6589 ++ fnhe->fnhe_mtu_locked = lock;
6590 + fnhe->fnhe_expires = expires;
6591 +
6592 + /* Exception created; mark the cached routes for the nexthop
6593 +@@ -765,7 +769,8 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
6594 + struct fib_nh *nh = &FIB_RES_NH(res);
6595 +
6596 + update_or_create_fnhe(nh, fl4->daddr, new_gw,
6597 +- 0, jiffies + ip_rt_gc_timeout);
6598 ++ 0, false,
6599 ++ jiffies + ip_rt_gc_timeout);
6600 + }
6601 + if (kill_route)
6602 + rt->dst.obsolete = DST_OBSOLETE_KILL;
6603 +@@ -977,15 +982,18 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
6604 + {
6605 + struct dst_entry *dst = &rt->dst;
6606 + struct fib_result res;
6607 ++ bool lock = false;
6608 +
6609 +- if (dst_metric_locked(dst, RTAX_MTU))
6610 ++ if (ip_mtu_locked(dst))
6611 + return;
6612 +
6613 + if (ipv4_mtu(dst) < mtu)
6614 + return;
6615 +
6616 +- if (mtu < ip_rt_min_pmtu)
6617 ++ if (mtu < ip_rt_min_pmtu) {
6618 ++ lock = true;
6619 + mtu = ip_rt_min_pmtu;
6620 ++ }
6621 +
6622 + if (rt->rt_pmtu == mtu &&
6623 + time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
6624 +@@ -995,7 +1003,7 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
6625 + if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
6626 + struct fib_nh *nh = &FIB_RES_NH(res);
6627 +
6628 +- update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
6629 ++ update_or_create_fnhe(nh, fl4->daddr, 0, mtu, lock,
6630 + jiffies + ip_rt_mtu_expires);
6631 + }
6632 + rcu_read_unlock();
6633 +@@ -1250,7 +1258,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
6634 +
6635 + mtu = READ_ONCE(dst->dev->mtu);
6636 +
6637 +- if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
6638 ++ if (unlikely(ip_mtu_locked(dst))) {
6639 + if (rt->rt_uses_gateway && mtu > 576)
6640 + mtu = 576;
6641 + }
6642 +@@ -1473,6 +1481,7 @@ static struct rtable *rt_dst_alloc(struct net_device *dev,
6643 + rt->rt_is_input = 0;
6644 + rt->rt_iif = 0;
6645 + rt->rt_pmtu = 0;
6646 ++ rt->rt_mtu_locked = 0;
6647 + rt->rt_gateway = 0;
6648 + rt->rt_uses_gateway = 0;
6649 + rt->rt_table_id = 0;
6650 +@@ -2393,6 +2402,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
6651 + rt->rt_is_input = ort->rt_is_input;
6652 + rt->rt_iif = ort->rt_iif;
6653 + rt->rt_pmtu = ort->rt_pmtu;
6654 ++ rt->rt_mtu_locked = ort->rt_mtu_locked;
6655 +
6656 + rt->rt_genid = rt_genid_ipv4(net);
6657 + rt->rt_flags = ort->rt_flags;
6658 +@@ -2495,6 +2505,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id,
6659 + memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
6660 + if (rt->rt_pmtu && expires)
6661 + metrics[RTAX_MTU - 1] = rt->rt_pmtu;
6662 ++ if (rt->rt_mtu_locked && expires)
6663 ++ metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU);
6664 + if (rtnetlink_put_metrics(skb, metrics) < 0)
6665 + goto nla_put_failure;
6666 +
6667 +diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
6668 +index 2ab9bbb6faff..5ed6a89894fd 100644
6669 +--- a/net/ipv4/tcp_illinois.c
6670 ++++ b/net/ipv4/tcp_illinois.c
6671 +@@ -6,7 +6,7 @@
6672 + * The algorithm is described in:
6673 + * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm
6674 + * for High-Speed Networks"
6675 +- * http://www.ifp.illinois.edu/~srikant/Papers/liubassri06perf.pdf
6676 ++ * http://tamerbasar.csl.illinois.edu/LiuBasarSrikantPerfEvalArtJun2008.pdf
6677 + *
6678 + * Implemented from description in paper and ns-2 simulation.
6679 + * Copyright (C) 2007 Stephen Hemminger <shemminger@××××××××××××××××.org>
6680 +diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
6681 +index 7b0edb37a115..fddae0164b91 100644
6682 +--- a/net/ipv4/xfrm4_policy.c
6683 ++++ b/net/ipv4/xfrm4_policy.c
6684 +@@ -97,6 +97,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
6685 + xdst->u.rt.rt_gateway = rt->rt_gateway;
6686 + xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway;
6687 + xdst->u.rt.rt_pmtu = rt->rt_pmtu;
6688 ++ xdst->u.rt.rt_mtu_locked = rt->rt_mtu_locked;
6689 + xdst->u.rt.rt_table_id = rt->rt_table_id;
6690 + INIT_LIST_HEAD(&xdst->u.rt.rt_uncached);
6691 +
6692 +diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
6693 +index 51f7c32f04d7..dec4e7bda5f3 100644
6694 +--- a/net/ipv6/sit.c
6695 ++++ b/net/ipv6/sit.c
6696 +@@ -1574,6 +1574,13 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev,
6697 + if (err < 0)
6698 + return err;
6699 +
6700 ++ if (tb[IFLA_MTU]) {
6701 ++ u32 mtu = nla_get_u32(tb[IFLA_MTU]);
6702 ++
6703 ++ if (mtu >= IPV6_MIN_MTU && mtu <= 0xFFF8 - dev->hard_header_len)
6704 ++ dev->mtu = mtu;
6705 ++ }
6706 ++
6707 + #ifdef CONFIG_IPV6_SIT_6RD
6708 + if (ipip6_netlink_6rd_parms(data, &ip6rd))
6709 + err = ipip6_tunnel_update_6rd(nt, &ip6rd);
6710 +diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
6711 +index f8d4ab8ca1a5..4b60f68cb492 100644
6712 +--- a/net/llc/llc_c_ac.c
6713 ++++ b/net/llc/llc_c_ac.c
6714 +@@ -389,7 +389,7 @@ static int llc_conn_ac_send_i_cmd_p_set_0(struct sock *sk, struct sk_buff *skb)
6715 + llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR);
6716 + rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
6717 + if (likely(!rc)) {
6718 +- llc_conn_send_pdu(sk, skb);
6719 ++ rc = llc_conn_send_pdu(sk, skb);
6720 + llc_conn_ac_inc_vs_by_1(sk, skb);
6721 + }
6722 + return rc;
6723 +@@ -916,7 +916,7 @@ static int llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock *sk,
6724 + llc_pdu_init_as_i_cmd(skb, llc->ack_pf, llc->vS, llc->vR);
6725 + rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
6726 + if (likely(!rc)) {
6727 +- llc_conn_send_pdu(sk, skb);
6728 ++ rc = llc_conn_send_pdu(sk, skb);
6729 + llc_conn_ac_inc_vs_by_1(sk, skb);
6730 + }
6731 + return rc;
6732 +@@ -935,14 +935,17 @@ static int llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock *sk,
6733 + int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb)
6734 + {
6735 + struct llc_sock *llc = llc_sk(sk);
6736 ++ int ret;
6737 +
6738 + if (llc->ack_must_be_send) {
6739 +- llc_conn_ac_send_i_rsp_f_set_ackpf(sk, skb);
6740 ++ ret = llc_conn_ac_send_i_rsp_f_set_ackpf(sk, skb);
6741 + llc->ack_must_be_send = 0 ;
6742 + llc->ack_pf = 0;
6743 +- } else
6744 +- llc_conn_ac_send_i_cmd_p_set_0(sk, skb);
6745 +- return 0;
6746 ++ } else {
6747 ++ ret = llc_conn_ac_send_i_cmd_p_set_0(sk, skb);
6748 ++ }
6749 ++
6750 ++ return ret;
6751 + }
6752 +
6753 + /**
6754 +diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
6755 +index d861b74ad068..79c346fd859b 100644
6756 +--- a/net/llc/llc_conn.c
6757 ++++ b/net/llc/llc_conn.c
6758 +@@ -30,7 +30,7 @@
6759 + #endif
6760 +
6761 + static int llc_find_offset(int state, int ev_type);
6762 +-static void llc_conn_send_pdus(struct sock *sk);
6763 ++static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *skb);
6764 + static int llc_conn_service(struct sock *sk, struct sk_buff *skb);
6765 + static int llc_exec_conn_trans_actions(struct sock *sk,
6766 + struct llc_conn_state_trans *trans,
6767 +@@ -193,11 +193,11 @@ out_skb_put:
6768 + return rc;
6769 + }
6770 +
6771 +-void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
6772 ++int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
6773 + {
6774 + /* queue PDU to send to MAC layer */
6775 + skb_queue_tail(&sk->sk_write_queue, skb);
6776 +- llc_conn_send_pdus(sk);
6777 ++ return llc_conn_send_pdus(sk, skb);
6778 + }
6779 +
6780 + /**
6781 +@@ -255,7 +255,7 @@ void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit)
6782 + if (howmany_resend > 0)
6783 + llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
6784 + /* any PDUs to re-send are queued up; start sending to MAC */
6785 +- llc_conn_send_pdus(sk);
6786 ++ llc_conn_send_pdus(sk, NULL);
6787 + out:;
6788 + }
6789 +
6790 +@@ -296,7 +296,7 @@ void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit)
6791 + if (howmany_resend > 0)
6792 + llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
6793 + /* any PDUs to re-send are queued up; start sending to MAC */
6794 +- llc_conn_send_pdus(sk);
6795 ++ llc_conn_send_pdus(sk, NULL);
6796 + out:;
6797 + }
6798 +
6799 +@@ -340,12 +340,16 @@ out:
6800 + /**
6801 + * llc_conn_send_pdus - Sends queued PDUs
6802 + * @sk: active connection
6803 ++ * @hold_skb: the skb held by caller, or NULL if does not care
6804 + *
6805 +- * Sends queued pdus to MAC layer for transmission.
6806 ++ * Sends queued pdus to MAC layer for transmission. When @hold_skb is
6807 ++ * NULL, always return 0. Otherwise, return 0 if @hold_skb is sent
6808 ++ * successfully, or 1 for failure.
6809 + */
6810 +-static void llc_conn_send_pdus(struct sock *sk)
6811 ++static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *hold_skb)
6812 + {
6813 + struct sk_buff *skb;
6814 ++ int ret = 0;
6815 +
6816 + while ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL) {
6817 + struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
6818 +@@ -357,10 +361,20 @@ static void llc_conn_send_pdus(struct sock *sk)
6819 + skb_queue_tail(&llc_sk(sk)->pdu_unack_q, skb);
6820 + if (!skb2)
6821 + break;
6822 +- skb = skb2;
6823 ++ dev_queue_xmit(skb2);
6824 ++ } else {
6825 ++ bool is_target = skb == hold_skb;
6826 ++ int rc;
6827 ++
6828 ++ if (is_target)
6829 ++ skb_get(skb);
6830 ++ rc = dev_queue_xmit(skb);
6831 ++ if (is_target)
6832 ++ ret = rc;
6833 + }
6834 +- dev_queue_xmit(skb);
6835 + }
6836 ++
6837 ++ return ret;
6838 + }
6839 +
6840 + /**
6841 +diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
6842 +index b0380927f05f..3f33ec44bd28 100644
6843 +--- a/net/netlabel/netlabel_unlabeled.c
6844 ++++ b/net/netlabel/netlabel_unlabeled.c
6845 +@@ -1469,6 +1469,16 @@ int netlbl_unlabel_getattr(const struct sk_buff *skb,
6846 + iface = rcu_dereference(netlbl_unlhsh_def);
6847 + if (iface == NULL || !iface->valid)
6848 + goto unlabel_getattr_nolabel;
6849 ++
6850 ++#if IS_ENABLED(CONFIG_IPV6)
6851 ++ /* When resolving a fallback label, check the sk_buff version as
6852 ++ * it is possible (e.g. SCTP) to have family = PF_INET6 while
6853 ++ * receiving ip_hdr(skb)->version = 4.
6854 ++ */
6855 ++ if (family == PF_INET6 && ip_hdr(skb)->version == 4)
6856 ++ family = PF_INET;
6857 ++#endif /* IPv6 */
6858 ++
6859 + switch (family) {
6860 + case PF_INET: {
6861 + struct iphdr *hdr4;
6862 +diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c
6863 +index 3621a902cb6e..d25212b135ea 100644
6864 +--- a/net/nfc/llcp_commands.c
6865 ++++ b/net/nfc/llcp_commands.c
6866 +@@ -149,6 +149,10 @@ struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdreq_tlv(u8 tid, char *uri,
6867 +
6868 + pr_debug("uri: %s, len: %zu\n", uri, uri_len);
6869 +
6870 ++ /* sdreq->tlv_len is u8, takes uri_len, + 3 for header, + 1 for NULL */
6871 ++ if (WARN_ON_ONCE(uri_len > U8_MAX - 4))
6872 ++ return NULL;
6873 ++
6874 + sdreq = kzalloc(sizeof(struct nfc_llcp_sdp_tlv), GFP_KERNEL);
6875 + if (sdreq == NULL)
6876 + return NULL;
6877 +diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
6878 +index 12dfb457275d..32cb0c87e852 100644
6879 +--- a/net/nfc/netlink.c
6880 ++++ b/net/nfc/netlink.c
6881 +@@ -68,7 +68,8 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
6882 + };
6883 +
6884 + static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = {
6885 +- [NFC_SDP_ATTR_URI] = { .type = NLA_STRING },
6886 ++ [NFC_SDP_ATTR_URI] = { .type = NLA_STRING,
6887 ++ .len = U8_MAX - 4 },
6888 + [NFC_SDP_ATTR_SAP] = { .type = NLA_U8 },
6889 + };
6890 +
6891 +diff --git a/net/rds/ib.c b/net/rds/ib.c
6892 +index f222885ac0c7..ed51ccc84b3a 100644
6893 +--- a/net/rds/ib.c
6894 ++++ b/net/rds/ib.c
6895 +@@ -336,7 +336,8 @@ static int rds_ib_laddr_check(struct net *net, __be32 addr)
6896 + /* Create a CMA ID and try to bind it. This catches both
6897 + * IB and iWARP capable NICs.
6898 + */
6899 +- cm_id = rdma_create_id(&init_net, NULL, NULL, RDMA_PS_TCP, IB_QPT_RC);
6900 ++ cm_id = rdma_create_id(&init_net, rds_rdma_cm_event_handler,
6901 ++ NULL, RDMA_PS_TCP, IB_QPT_RC);
6902 + if (IS_ERR(cm_id))
6903 + return PTR_ERR(cm_id);
6904 +
6905 +diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
6906 +index cbf4996dd9c1..ed29bad1f03a 100644
6907 +--- a/scripts/kconfig/expr.c
6908 ++++ b/scripts/kconfig/expr.c
6909 +@@ -113,7 +113,7 @@ void expr_free(struct expr *e)
6910 + break;
6911 + case E_NOT:
6912 + expr_free(e->left.expr);
6913 +- return;
6914 ++ break;
6915 + case E_EQUAL:
6916 + case E_GEQ:
6917 + case E_GTH:
6918 +diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
6919 +index b05cc3d4a9be..8360feaf51ce 100644
6920 +--- a/scripts/kconfig/menu.c
6921 ++++ b/scripts/kconfig/menu.c
6922 +@@ -364,6 +364,7 @@ void menu_finalize(struct menu *parent)
6923 + menu->parent = parent;
6924 + last_menu = menu;
6925 + }
6926 ++ expr_free(basedep);
6927 + if (last_menu) {
6928 + parent->list = parent->next;
6929 + parent->next = last_menu->next;
6930 +diff --git a/scripts/kconfig/zconf.y b/scripts/kconfig/zconf.y
6931 +index 71bf8bff696a..5122ed2d839a 100644
6932 +--- a/scripts/kconfig/zconf.y
6933 ++++ b/scripts/kconfig/zconf.y
6934 +@@ -107,7 +107,27 @@ static struct menu *current_menu, *current_entry;
6935 + %%
6936 + input: nl start | start;
6937 +
6938 +-start: mainmenu_stmt stmt_list | stmt_list;
6939 ++start: mainmenu_stmt stmt_list | no_mainmenu_stmt stmt_list;
6940 ++
6941 ++/* mainmenu entry */
6942 ++
6943 ++mainmenu_stmt: T_MAINMENU prompt nl
6944 ++{
6945 ++ menu_add_prompt(P_MENU, $2, NULL);
6946 ++};
6947 ++
6948 ++/* Default main menu, if there's no mainmenu entry */
6949 ++
6950 ++no_mainmenu_stmt: /* empty */
6951 ++{
6952 ++ /*
6953 ++ * Hack: Keep the main menu title on the heap so we can safely free it
6954 ++ * later regardless of whether it comes from the 'prompt' in
6955 ++ * mainmenu_stmt or here
6956 ++ */
6957 ++ menu_add_prompt(P_MENU, strdup("Linux Kernel Configuration"), NULL);
6958 ++};
6959 ++
6960 +
6961 + stmt_list:
6962 + /* empty */
6963 +@@ -344,13 +364,6 @@ if_block:
6964 + | if_block choice_stmt
6965 + ;
6966 +
6967 +-/* mainmenu entry */
6968 +-
6969 +-mainmenu_stmt: T_MAINMENU prompt nl
6970 +-{
6971 +- menu_add_prompt(P_MENU, $2, NULL);
6972 +-};
6973 +-
6974 + /* menu entry */
6975 +
6976 + menu: T_MENU prompt T_EOL
6977 +@@ -495,6 +508,7 @@ word_opt: /* empty */ { $$ = NULL; }
6978 +
6979 + void conf_parse(const char *name)
6980 + {
6981 ++ const char *tmp;
6982 + struct symbol *sym;
6983 + int i;
6984 +
6985 +@@ -502,7 +516,6 @@ void conf_parse(const char *name)
6986 +
6987 + sym_init();
6988 + _menu_init();
6989 +- rootmenu.prompt = menu_add_prompt(P_MENU, "Linux Kernel Configuration", NULL);
6990 +
6991 + if (getenv("ZCONF_DEBUG"))
6992 + zconfdebug = 1;
6993 +@@ -512,8 +525,10 @@ void conf_parse(const char *name)
6994 + if (!modules_sym)
6995 + modules_sym = sym_find( "n" );
6996 +
6997 ++ tmp = rootmenu.prompt->text;
6998 + rootmenu.prompt->text = _(rootmenu.prompt->text);
6999 + rootmenu.prompt->text = sym_expand_string_value(rootmenu.prompt->text);
7000 ++ free((char*)tmp);
7001 +
7002 + menu_finalize(&rootmenu);
7003 + for_all_symbols(i, sym) {
7004 +diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
7005 +index df303346029b..648a0461f8ed 100644
7006 +--- a/security/integrity/ima/Kconfig
7007 ++++ b/security/integrity/ima/Kconfig
7008 +@@ -10,6 +10,7 @@ config IMA
7009 + select CRYPTO_HASH_INFO
7010 + select TCG_TPM if HAS_IOMEM && !UML
7011 + select TCG_TIS if TCG_TPM && X86
7012 ++ select TCG_CRB if TCG_TPM && ACPI
7013 + select TCG_IBMVTPM if TCG_TPM && PPC_PSERIES
7014 + help
7015 + The Trusted Computing Group(TCG) runtime Integrity
7016 +diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
7017 +index 6eb62936c672..a29209fa5674 100644
7018 +--- a/security/integrity/ima/ima_crypto.c
7019 ++++ b/security/integrity/ima/ima_crypto.c
7020 +@@ -78,6 +78,8 @@ int __init ima_init_crypto(void)
7021 + hash_algo_name[ima_hash_algo], rc);
7022 + return rc;
7023 + }
7024 ++ pr_info("Allocated hash algorithm: %s\n",
7025 ++ hash_algo_name[ima_hash_algo]);
7026 + return 0;
7027 + }
7028 +
7029 +diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
7030 +index 98289ba2a2e6..236dce30e517 100644
7031 +--- a/security/integrity/ima/ima_main.c
7032 ++++ b/security/integrity/ima/ima_main.c
7033 +@@ -16,6 +16,9 @@
7034 + * implements the IMA hooks: ima_bprm_check, ima_file_mmap,
7035 + * and ima_file_check.
7036 + */
7037 ++
7038 ++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7039 ++
7040 + #include <linux/module.h>
7041 + #include <linux/file.h>
7042 + #include <linux/binfmts.h>
7043 +@@ -353,6 +356,16 @@ static int __init init_ima(void)
7044 +
7045 + hash_setup(CONFIG_IMA_DEFAULT_HASH);
7046 + error = ima_init();
7047 ++
7048 ++ if (error && strcmp(hash_algo_name[ima_hash_algo],
7049 ++ CONFIG_IMA_DEFAULT_HASH) != 0) {
7050 ++ pr_info("Allocating %s failed, going to use default hash algorithm %s\n",
7051 ++ hash_algo_name[ima_hash_algo], CONFIG_IMA_DEFAULT_HASH);
7052 ++ hash_setup_done = 0;
7053 ++ hash_setup(CONFIG_IMA_DEFAULT_HASH);
7054 ++ error = ima_init();
7055 ++ }
7056 ++
7057 + if (!error) {
7058 + ima_initialized = 1;
7059 + ima_update_policy_flag();
7060 +diff --git a/sound/core/timer.c b/sound/core/timer.c
7061 +index 5a718b2d3c9a..ef850a99d64a 100644
7062 +--- a/sound/core/timer.c
7063 ++++ b/sound/core/timer.c
7064 +@@ -548,7 +548,7 @@ static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop)
7065 + }
7066 + timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
7067 + snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
7068 +- SNDRV_TIMER_EVENT_CONTINUE);
7069 ++ SNDRV_TIMER_EVENT_PAUSE);
7070 + unlock:
7071 + spin_unlock_irqrestore(&timer->lock, flags);
7072 + return result;
7073 +@@ -570,7 +570,7 @@ static int snd_timer_stop_slave(struct snd_timer_instance *timeri, bool stop)
7074 + list_del_init(&timeri->ack_list);
7075 + list_del_init(&timeri->active_list);
7076 + snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
7077 +- SNDRV_TIMER_EVENT_CONTINUE);
7078 ++ SNDRV_TIMER_EVENT_PAUSE);
7079 + spin_unlock(&timeri->timer->lock);
7080 + }
7081 + spin_unlock_irqrestore(&slave_active_lock, flags);
7082 +diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c
7083 +index 6c58e6f73a01..7c6ef879c520 100644
7084 +--- a/sound/core/vmaster.c
7085 ++++ b/sound/core/vmaster.c
7086 +@@ -68,10 +68,13 @@ static int slave_update(struct link_slave *slave)
7087 + return -ENOMEM;
7088 + uctl->id = slave->slave.id;
7089 + err = slave->slave.get(&slave->slave, uctl);
7090 ++ if (err < 0)
7091 ++ goto error;
7092 + for (ch = 0; ch < slave->info.count; ch++)
7093 + slave->vals[ch] = uctl->value.integer.value[ch];
7094 ++ error:
7095 + kfree(uctl);
7096 +- return 0;
7097 ++ return err < 0 ? err : 0;
7098 + }
7099 +
7100 + /* get the slave ctl info and save the initial values */
7101 +diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
7102 +index e94cfd5c69f7..ebec1a1ae543 100644
7103 +--- a/sound/pci/hda/Kconfig
7104 ++++ b/sound/pci/hda/Kconfig
7105 +@@ -84,7 +84,6 @@ config SND_HDA_PATCH_LOADER
7106 + config SND_HDA_CODEC_REALTEK
7107 + tristate "Build Realtek HD-audio codec support"
7108 + select SND_HDA_GENERIC
7109 +- select INPUT
7110 + help
7111 + Say Y or M here to include Realtek HD-audio codec support in
7112 + snd-hda-intel driver, such as ALC880.
7113 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
7114 +index 6a789278970e..580b8943b965 100644
7115 +--- a/sound/pci/hda/patch_realtek.c
7116 ++++ b/sound/pci/hda/patch_realtek.c
7117 +@@ -3495,6 +3495,7 @@ static void alc280_fixup_hp_gpio4(struct hda_codec *codec,
7118 + }
7119 + }
7120 +
7121 ++#if IS_REACHABLE(INPUT)
7122 + static void gpio2_mic_hotkey_event(struct hda_codec *codec,
7123 + struct hda_jack_callback *event)
7124 + {
7125 +@@ -3627,6 +3628,10 @@ static void alc233_fixup_lenovo_line2_mic_hotkey(struct hda_codec *codec,
7126 + spec->kb_dev = NULL;
7127 + }
7128 + }
7129 ++#else /* INPUT */
7130 ++#define alc280_fixup_hp_gpio2_mic_hotkey NULL
7131 ++#define alc233_fixup_lenovo_line2_mic_hotkey NULL
7132 ++#endif /* INPUT */
7133 +
7134 + static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
7135 + const struct hda_fixup *fix, int action)
7136 +diff --git a/sound/soc/au1x/ac97c.c b/sound/soc/au1x/ac97c.c
7137 +index 29a97d52e8ad..66d6c52e7761 100644
7138 +--- a/sound/soc/au1x/ac97c.c
7139 ++++ b/sound/soc/au1x/ac97c.c
7140 +@@ -91,8 +91,8 @@ static unsigned short au1xac97c_ac97_read(struct snd_ac97 *ac97,
7141 + do {
7142 + mutex_lock(&ctx->lock);
7143 +
7144 +- tmo = 5;
7145 +- while ((RD(ctx, AC97_STATUS) & STAT_CP) && tmo--)
7146 ++ tmo = 6;
7147 ++ while ((RD(ctx, AC97_STATUS) & STAT_CP) && --tmo)
7148 + udelay(21); /* wait an ac97 frame time */
7149 + if (!tmo) {
7150 + pr_debug("ac97rd timeout #1\n");
7151 +@@ -105,7 +105,7 @@ static unsigned short au1xac97c_ac97_read(struct snd_ac97 *ac97,
7152 + * poll, Forrest, poll...
7153 + */
7154 + tmo = 0x10000;
7155 +- while ((RD(ctx, AC97_STATUS) & STAT_CP) && tmo--)
7156 ++ while ((RD(ctx, AC97_STATUS) & STAT_CP) && --tmo)
7157 + asm volatile ("nop");
7158 + data = RD(ctx, AC97_CMDRESP);
7159 +
7160 +diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
7161 +index fd6e247d9fd8..91bad6731c9d 100644
7162 +--- a/sound/soc/samsung/i2s.c
7163 ++++ b/sound/soc/samsung/i2s.c
7164 +@@ -640,8 +640,12 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
7165 + tmp |= mod_slave;
7166 + break;
7167 + case SND_SOC_DAIFMT_CBS_CFS:
7168 +- /* Set default source clock in Master mode */
7169 +- if (i2s->rclk_srcrate == 0)
7170 ++ /*
7171 ++ * Set default source clock in Master mode, only when the
7172 ++ * CLK_I2S_RCLK_SRC clock is not exposed so we ensure any
7173 ++ * clock configuration assigned in DT is not overwritten.
7174 ++ */
7175 ++ if (i2s->rclk_srcrate == 0 && i2s->clk_data.clks == NULL)
7176 + i2s_set_sysclk(dai, SAMSUNG_I2S_RCLKSRC_0,
7177 + 0, SND_SOC_CLOCK_IN);
7178 + break;
7179 +@@ -856,6 +860,11 @@ static int config_setup(struct i2s_dai *i2s)
7180 + return 0;
7181 +
7182 + if (!(i2s->quirks & QUIRK_NO_MUXPSR)) {
7183 ++ struct clk *rclksrc = i2s->clk_table[CLK_I2S_RCLK_SRC];
7184 ++
7185 ++ if (i2s->rclk_srcrate == 0 && rclksrc && !IS_ERR(rclksrc))
7186 ++ i2s->rclk_srcrate = clk_get_rate(rclksrc);
7187 ++
7188 + psr = i2s->rclk_srcrate / i2s->frmclk / rfs;
7189 + writel(((psr - 1) << 8) | PSR_PSREN, i2s->addr + I2SPSR);
7190 + dev_dbg(&i2s->pdev->dev,
7191 +diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
7192 +index e3f34a86413c..c1e76feb3529 100644
7193 +--- a/sound/soc/soc-topology.c
7194 ++++ b/sound/soc/soc-topology.c
7195 +@@ -1188,6 +1188,9 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dmixer_create(
7196 + kfree(sm);
7197 + continue;
7198 + }
7199 ++
7200 ++ /* create any TLV data */
7201 ++ soc_tplg_create_tlv(tplg, &kc[i], &mc->hdr);
7202 + }
7203 + return kc;
7204 +
7205 +diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
7206 +index e176bad19bcb..ca080a129b33 100644
7207 +--- a/tools/lib/bpf/libbpf.c
7208 ++++ b/tools/lib/bpf/libbpf.c
7209 +@@ -487,6 +487,24 @@ bpf_object__init_maps(struct bpf_object *obj, void *data,
7210 + return 0;
7211 + }
7212 +
7213 ++static bool section_have_execinstr(struct bpf_object *obj, int idx)
7214 ++{
7215 ++ Elf_Scn *scn;
7216 ++ GElf_Shdr sh;
7217 ++
7218 ++ scn = elf_getscn(obj->efile.elf, idx);
7219 ++ if (!scn)
7220 ++ return false;
7221 ++
7222 ++ if (gelf_getshdr(scn, &sh) != &sh)
7223 ++ return false;
7224 ++
7225 ++ if (sh.sh_flags & SHF_EXECINSTR)
7226 ++ return true;
7227 ++
7228 ++ return false;
7229 ++}
7230 ++
7231 + static int bpf_object__elf_collect(struct bpf_object *obj)
7232 + {
7233 + Elf *elf = obj->efile.elf;
7234 +@@ -567,6 +585,14 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
7235 + } else if (sh.sh_type == SHT_REL) {
7236 + void *reloc = obj->efile.reloc;
7237 + int nr_reloc = obj->efile.nr_reloc + 1;
7238 ++ int sec = sh.sh_info; /* points to other section */
7239 ++
7240 ++ /* Only do relo for section with exec instructions */
7241 ++ if (!section_have_execinstr(obj, sec)) {
7242 ++ pr_debug("skip relo %s(%d) for section(%d)\n",
7243 ++ name, idx, sec);
7244 ++ continue;
7245 ++ }
7246 +
7247 + reloc = realloc(reloc,
7248 + sizeof(*obj->efile.reloc) * nr_reloc);
7249 +diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
7250 +index 68276f35e323..6e4a10fe9dd0 100644
7251 +--- a/tools/lib/traceevent/event-parse.c
7252 ++++ b/tools/lib/traceevent/event-parse.c
7253 +@@ -4905,21 +4905,22 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
7254 + else
7255 + ls = 2;
7256 +
7257 +- if (*(ptr+1) == 'F' || *(ptr+1) == 'f' ||
7258 +- *(ptr+1) == 'S' || *(ptr+1) == 's') {
7259 ++ if (isalnum(ptr[1]))
7260 + ptr++;
7261 ++
7262 ++ if (*ptr == 'F' || *ptr == 'f' ||
7263 ++ *ptr == 'S' || *ptr == 's') {
7264 + show_func = *ptr;
7265 +- } else if (*(ptr+1) == 'M' || *(ptr+1) == 'm') {
7266 +- print_mac_arg(s, *(ptr+1), data, size, event, arg);
7267 +- ptr++;
7268 ++ } else if (*ptr == 'M' || *ptr == 'm') {
7269 ++ print_mac_arg(s, *ptr, data, size, event, arg);
7270 + arg = arg->next;
7271 + break;
7272 +- } else if (*(ptr+1) == 'I' || *(ptr+1) == 'i') {
7273 ++ } else if (*ptr == 'I' || *ptr == 'i') {
7274 + int n;
7275 +
7276 +- n = print_ip_arg(s, ptr+1, data, size, event, arg);
7277 ++ n = print_ip_arg(s, ptr, data, size, event, arg);
7278 + if (n > 0) {
7279 +- ptr += n;
7280 ++ ptr += n - 1;
7281 + arg = arg->next;
7282 + break;
7283 + }
7284 +diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
7285 +index 88cccea3ca99..64309d73921b 100644
7286 +--- a/tools/lib/traceevent/parse-filter.c
7287 ++++ b/tools/lib/traceevent/parse-filter.c
7288 +@@ -1867,17 +1867,25 @@ static const char *get_field_str(struct filter_arg *arg, struct pevent_record *r
7289 + struct pevent *pevent;
7290 + unsigned long long addr;
7291 + const char *val = NULL;
7292 ++ unsigned int size;
7293 + char hex[64];
7294 +
7295 + /* If the field is not a string convert it */
7296 + if (arg->str.field->flags & FIELD_IS_STRING) {
7297 + val = record->data + arg->str.field->offset;
7298 ++ size = arg->str.field->size;
7299 ++
7300 ++ if (arg->str.field->flags & FIELD_IS_DYNAMIC) {
7301 ++ addr = *(unsigned int *)val;
7302 ++ val = record->data + (addr & 0xffff);
7303 ++ size = addr >> 16;
7304 ++ }
7305 +
7306 + /*
7307 + * We need to copy the data since we can't be sure the field
7308 + * is null terminated.
7309 + */
7310 +- if (*(val + arg->str.field->size - 1)) {
7311 ++ if (*(val + size - 1)) {
7312 + /* copy it */
7313 + memcpy(arg->str.buffer, val, arg->str.field->size);
7314 + /* the buffer is already NULL terminated */
7315 +diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
7316 +index d677e018e504..bf907c50fcae 100644
7317 +--- a/tools/perf/tests/vmlinux-kallsyms.c
7318 ++++ b/tools/perf/tests/vmlinux-kallsyms.c
7319 +@@ -126,7 +126,7 @@ int test__vmlinux_matches_kallsyms(void)
7320 +
7321 + if (pair && UM(pair->start) == mem_start) {
7322 + next_pair:
7323 +- if (strcmp(sym->name, pair->name) == 0) {
7324 ++ if (arch__compare_symbol_names(sym->name, pair->name) == 0) {
7325 + /*
7326 + * kallsyms don't have the symbol end, so we
7327 + * set that by using the next symbol start - 1,
7328 +diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
7329 +index 397fb4ed3c97..f0bd4825f95a 100644
7330 +--- a/tools/perf/util/evsel.c
7331 ++++ b/tools/perf/util/evsel.c
7332 +@@ -624,13 +624,13 @@ static void apply_config_terms(struct perf_evsel *evsel,
7333 + struct perf_evsel_config_term *term;
7334 + struct list_head *config_terms = &evsel->config_terms;
7335 + struct perf_event_attr *attr = &evsel->attr;
7336 +- struct callchain_param param;
7337 ++ /* callgraph default */
7338 ++ struct callchain_param param = {
7339 ++ .record_mode = callchain_param.record_mode,
7340 ++ };
7341 + u32 dump_size = 0;
7342 + char *callgraph_buf = NULL;
7343 +
7344 +- /* callgraph default */
7345 +- param.record_mode = callchain_param.record_mode;
7346 +-
7347 + list_for_each_entry(term, config_terms, list) {
7348 + switch (term->type) {
7349 + case PERF_EVSEL__CONFIG_TERM_PERIOD:
7350 +diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
7351 +index 4fd37d6708cb..f6720afa9f34 100644
7352 +--- a/tools/perf/util/hist.c
7353 ++++ b/tools/perf/util/hist.c
7354 +@@ -720,7 +720,7 @@ iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
7355 + * cumulated only one time to prevent entries more than 100%
7356 + * overhead.
7357 + */
7358 +- he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1));
7359 ++ he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1));
7360 + if (he_cache == NULL)
7361 + return -ENOMEM;
7362 +
7363 +@@ -881,8 +881,6 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
7364 + if (err)
7365 + return err;
7366 +
7367 +- iter->max_stack = max_stack_depth;
7368 +-
7369 + err = iter->ops->prepare_entry(iter, al);
7370 + if (err)
7371 + goto out;
7372 +diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
7373 +index a48a2078d288..46b7591acd9c 100644
7374 +--- a/tools/perf/util/hist.h
7375 ++++ b/tools/perf/util/hist.h
7376 +@@ -91,7 +91,6 @@ struct hist_entry_iter {
7377 + int curr;
7378 +
7379 + bool hide_unresolved;
7380 +- int max_stack;
7381 +
7382 + struct perf_evsel *evsel;
7383 + struct perf_sample *sample;
7384 +diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
7385 +index 24ebd3e3eb7d..5d2e479430d1 100644
7386 +--- a/tools/testing/selftests/Makefile
7387 ++++ b/tools/testing/selftests/Makefile
7388 +@@ -90,6 +90,7 @@ ifdef INSTALL_PATH
7389 + for TARGET in $(TARGETS); do \
7390 + echo "echo ; echo Running tests in $$TARGET" >> $(ALL_SCRIPT); \
7391 + echo "echo ========================================" >> $(ALL_SCRIPT); \
7392 ++ echo "[ -w /dev/kmsg ] && echo \"kselftest: Running tests in $$TARGET\" >> /dev/kmsg" >> $(ALL_SCRIPT); \
7393 + echo "cd $$TARGET" >> $(ALL_SCRIPT); \
7394 + make -s --no-print-directory -C $$TARGET emit_tests >> $(ALL_SCRIPT); \
7395 + echo "cd \$$ROOT" >> $(ALL_SCRIPT); \
7396 +diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc
7397 +new file mode 100644
7398 +index 000000000000..5ba73035e1d9
7399 +--- /dev/null
7400 ++++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc
7401 +@@ -0,0 +1,46 @@
7402 ++#!/bin/sh
7403 ++# SPDX-License-Identifier: GPL-2.0
7404 ++# description: Kprobe event string type argument
7405 ++
7406 ++[ -f kprobe_events ] || exit_unsupported # this is configurable
7407 ++
7408 ++echo 0 > events/enable
7409 ++echo > kprobe_events
7410 ++
7411 ++case `uname -m` in
7412 ++x86_64)
7413 ++ ARG2=%si
7414 ++ OFFS=8
7415 ++;;
7416 ++i[3456]86)
7417 ++ ARG2=%cx
7418 ++ OFFS=4
7419 ++;;
7420 ++aarch64)
7421 ++ ARG2=%x1
7422 ++ OFFS=8
7423 ++;;
7424 ++arm*)
7425 ++ ARG2=%r1
7426 ++ OFFS=4
7427 ++;;
7428 ++*)
7429 ++ echo "Please implement other architecture here"
7430 ++ exit_untested
7431 ++esac
7432 ++
7433 ++: "Test get argument (1)"
7434 ++echo "p:testprobe create_trace_kprobe arg1=+0(+0(${ARG2})):string" > kprobe_events
7435 ++echo 1 > events/kprobes/testprobe/enable
7436 ++! echo test >> kprobe_events
7437 ++tail -n 1 trace | grep -qe "testprobe.* arg1=\"test\""
7438 ++
7439 ++echo 0 > events/kprobes/testprobe/enable
7440 ++: "Test get argument (2)"
7441 ++echo "p:testprobe create_trace_kprobe arg1=+0(+0(${ARG2})):string arg2=+0(+${OFFS}(${ARG2})):string" > kprobe_events
7442 ++echo 1 > events/kprobes/testprobe/enable
7443 ++! echo test1 test2 >> kprobe_events
7444 ++tail -n 1 trace | grep -qe "testprobe.* arg1=\"test1\" arg2=\"test2\""
7445 ++
7446 ++echo 0 > events/enable
7447 ++echo > kprobe_events
7448 +diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc
7449 +new file mode 100644
7450 +index 000000000000..231bcd2c4eb5
7451 +--- /dev/null
7452 ++++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc
7453 +@@ -0,0 +1,97 @@
7454 ++#!/bin/sh
7455 ++# SPDX-License-Identifier: GPL-2.0
7456 ++# description: Kprobe event argument syntax
7457 ++
7458 ++[ -f kprobe_events ] || exit_unsupported # this is configurable
7459 ++
7460 ++grep "x8/16/32/64" README > /dev/null || exit_unsupported # version issue
7461 ++
7462 ++echo 0 > events/enable
7463 ++echo > kprobe_events
7464 ++
7465 ++PROBEFUNC="vfs_read"
7466 ++GOODREG=
7467 ++BADREG=
7468 ++GOODSYM="_sdata"
7469 ++if ! grep -qw ${GOODSYM} /proc/kallsyms ; then
7470 ++ GOODSYM=$PROBEFUNC
7471 ++fi
7472 ++BADSYM="deaqswdefr"
7473 ++SYMADDR=0x`grep -w ${GOODSYM} /proc/kallsyms | cut -f 1 -d " "`
7474 ++GOODTYPE="x16"
7475 ++BADTYPE="y16"
7476 ++
7477 ++case `uname -m` in
7478 ++x86_64|i[3456]86)
7479 ++ GOODREG=%ax
7480 ++ BADREG=%ex
7481 ++;;
7482 ++aarch64)
7483 ++ GOODREG=%x0
7484 ++ BADREG=%ax
7485 ++;;
7486 ++arm*)
7487 ++ GOODREG=%r0
7488 ++ BADREG=%ax
7489 ++;;
7490 ++esac
7491 ++
7492 ++test_goodarg() # Good-args
7493 ++{
7494 ++ while [ "$1" ]; do
7495 ++ echo "p ${PROBEFUNC} $1" > kprobe_events
7496 ++ shift 1
7497 ++ done;
7498 ++}
7499 ++
7500 ++test_badarg() # Bad-args
7501 ++{
7502 ++ while [ "$1" ]; do
7503 ++ ! echo "p ${PROBEFUNC} $1" > kprobe_events
7504 ++ shift 1
7505 ++ done;
7506 ++}
7507 ++
7508 ++echo > kprobe_events
7509 ++
7510 ++: "Register access"
7511 ++test_goodarg ${GOODREG}
7512 ++test_badarg ${BADREG}
7513 ++
7514 ++: "Symbol access"
7515 ++test_goodarg "@${GOODSYM}" "@${SYMADDR}" "@${GOODSYM}+10" "@${GOODSYM}-10"
7516 ++test_badarg "@" "@${BADSYM}" "@${GOODSYM}*10" "@${GOODSYM}/10" \
7517 ++ "@${GOODSYM}%10" "@${GOODSYM}&10" "@${GOODSYM}|10"
7518 ++
7519 ++: "Stack access"
7520 ++test_goodarg "\$stack" "\$stack0" "\$stack1"
7521 ++test_badarg "\$stackp" "\$stack0+10" "\$stack1-10"
7522 ++
7523 ++: "Retval access"
7524 ++echo "r ${PROBEFUNC} \$retval" > kprobe_events
7525 ++! echo "p ${PROBEFUNC} \$retval" > kprobe_events
7526 ++
7527 ++: "Comm access"
7528 ++test_goodarg "\$comm"
7529 ++
7530 ++: "Indirect memory access"
7531 ++test_goodarg "+0(${GOODREG})" "-0(${GOODREG})" "+10(\$stack)" \
7532 ++ "+0(\$stack1)" "+10(@${GOODSYM}-10)" "+0(+10(+20(\$stack)))"
7533 ++test_badarg "+(${GOODREG})" "(${GOODREG}+10)" "-(${GOODREG})" "(${GOODREG})" \
7534 ++ "+10(\$comm)" "+0(${GOODREG})+10"
7535 ++
7536 ++: "Name assignment"
7537 ++test_goodarg "varname=${GOODREG}"
7538 ++test_badarg "varname=varname2=${GOODREG}"
7539 ++
7540 ++: "Type syntax"
7541 ++test_goodarg "${GOODREG}:${GOODTYPE}"
7542 ++test_badarg "${GOODREG}::${GOODTYPE}" "${GOODREG}:${BADTYPE}" \
7543 ++ "${GOODTYPE}:${GOODREG}"
7544 ++
7545 ++: "Combination check"
7546 ++
7547 ++test_goodarg "\$comm:string" "+0(\$stack):string"
7548 ++test_badarg "\$comm:x64" "\$stack:string" "${GOODREG}:string"
7549 ++
7550 ++echo > kprobe_events
7551 +diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc b/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc
7552 +new file mode 100644
7553 +index 000000000000..4fda01a08da4
7554 +--- /dev/null
7555 ++++ b/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc
7556 +@@ -0,0 +1,43 @@
7557 ++#!/bin/sh
7558 ++# SPDX-License-Identifier: GPL-2.0
7559 ++# description: Kprobe events - probe points
7560 ++
7561 ++[ -f kprobe_events ] || exit_unsupported # this is configurable
7562 ++
7563 ++TARGET_FUNC=create_trace_kprobe
7564 ++
7565 ++dec_addr() { # hexaddr
7566 ++ printf "%d" "0x"`echo $1 | tail -c 8`
7567 ++}
7568 ++
7569 ++set_offs() { # prev target next
7570 ++ A1=`dec_addr $1`
7571 ++ A2=`dec_addr $2`
7572 ++ A3=`dec_addr $3`
7573 ++ TARGET="0x$2" # an address
7574 ++ PREV=`expr $A1 - $A2` # offset to previous symbol
7575 ++ NEXT=+`expr $A3 - $A2` # offset to next symbol
7576 ++ OVERFLOW=+`printf "0x%x" ${PREV}` # overflow offset to previous symbol
7577 ++}
7578 ++
7579 ++# We have to decode symbol addresses to get correct offsets.
7580 ++# If the offset is not an instruction boundary, it cause -EILSEQ.
7581 ++set_offs `grep -A1 -B1 ${TARGET_FUNC} /proc/kallsyms | cut -f 1 -d " " | xargs`
7582 ++
7583 ++UINT_TEST=no
7584 ++# printf "%x" -1 returns (unsigned long)-1.
7585 ++if [ `printf "%x" -1 | wc -c` != 9 ]; then
7586 ++ UINT_TEST=yes
7587 ++fi
7588 ++
7589 ++echo 0 > events/enable
7590 ++echo > kprobe_events
7591 ++echo "p:testprobe ${TARGET_FUNC}" > kprobe_events
7592 ++echo "p:testprobe ${TARGET}" > kprobe_events
7593 ++echo "p:testprobe ${TARGET_FUNC}${NEXT}" > kprobe_events
7594 ++! echo "p:testprobe ${TARGET_FUNC}${PREV}" > kprobe_events
7595 ++if [ "${UINT_TEST}" = yes ]; then
7596 ++! echo "p:testprobe ${TARGET_FUNC}${OVERFLOW}" > kprobe_events
7597 ++fi
7598 ++echo > kprobe_events
7599 ++clear_trace
7600 +diff --git a/tools/testing/selftests/memfd/config b/tools/testing/selftests/memfd/config
7601 +new file mode 100644
7602 +index 000000000000..835c7f4dadcd
7603 +--- /dev/null
7604 ++++ b/tools/testing/selftests/memfd/config
7605 +@@ -0,0 +1 @@
7606 ++CONFIG_FUSE_FS=m
7607 +diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c
7608 +index 412459369686..9b654a070e7d 100644
7609 +--- a/tools/testing/selftests/net/psock_fanout.c
7610 ++++ b/tools/testing/selftests/net/psock_fanout.c
7611 +@@ -97,6 +97,8 @@ static int sock_fanout_open(uint16_t typeflags, int num_packets)
7612 +
7613 + static void sock_fanout_set_ebpf(int fd)
7614 + {
7615 ++ static char log_buf[65536];
7616 ++
7617 + const int len_off = __builtin_offsetof(struct __sk_buff, len);
7618 + struct bpf_insn prog[] = {
7619 + { BPF_ALU64 | BPF_MOV | BPF_X, 6, 1, 0, 0 },
7620 +@@ -109,7 +111,6 @@ static void sock_fanout_set_ebpf(int fd)
7621 + { BPF_ALU | BPF_MOV | BPF_K, 0, 0, 0, 0 },
7622 + { BPF_JMP | BPF_EXIT, 0, 0, 0, 0 }
7623 + };
7624 +- char log_buf[512];
7625 + union bpf_attr attr;
7626 + int pfd;
7627 +
7628 +diff --git a/tools/testing/selftests/powerpc/mm/subpage_prot.c b/tools/testing/selftests/powerpc/mm/subpage_prot.c
7629 +index 440180ff8089..ca29f5872817 100644
7630 +--- a/tools/testing/selftests/powerpc/mm/subpage_prot.c
7631 ++++ b/tools/testing/selftests/powerpc/mm/subpage_prot.c
7632 +@@ -135,6 +135,16 @@ static int run_test(void *addr, unsigned long size)
7633 + return 0;
7634 + }
7635 +
7636 ++static int syscall_available(void)
7637 ++{
7638 ++ int rc;
7639 ++
7640 ++ errno = 0;
7641 ++ rc = syscall(__NR_subpage_prot, 0, 0, 0);
7642 ++
7643 ++ return rc == 0 || (errno != ENOENT && errno != ENOSYS);
7644 ++}
7645 ++
7646 + int test_anon(void)
7647 + {
7648 + unsigned long align;
7649 +@@ -145,6 +155,8 @@ int test_anon(void)
7650 + void *mallocblock;
7651 + unsigned long mallocsize;
7652 +
7653 ++ SKIP_IF(!syscall_available());
7654 ++
7655 + if (getpagesize() != 0x10000) {
7656 + fprintf(stderr, "Kernel page size must be 64K!\n");
7657 + return 1;
7658 +@@ -180,6 +192,8 @@ int test_file(void)
7659 + off_t filesize;
7660 + int fd;
7661 +
7662 ++ SKIP_IF(!syscall_available());
7663 ++
7664 + fd = open(file_name, O_RDWR);
7665 + if (fd == -1) {
7666 + perror("failed to open file");
7667 +diff --git a/tools/thermal/tmon/sysfs.c b/tools/thermal/tmon/sysfs.c
7668 +index 1c12536f2081..18f523557983 100644
7669 +--- a/tools/thermal/tmon/sysfs.c
7670 ++++ b/tools/thermal/tmon/sysfs.c
7671 +@@ -486,6 +486,7 @@ int zone_instance_to_index(int zone_inst)
7672 + int update_thermal_data()
7673 + {
7674 + int i;
7675 ++ int next_thermal_record = cur_thermal_record + 1;
7676 + char tz_name[256];
7677 + static unsigned long samples;
7678 +
7679 +@@ -495,9 +496,9 @@ int update_thermal_data()
7680 + }
7681 +
7682 + /* circular buffer for keeping historic data */
7683 +- if (cur_thermal_record >= NR_THERMAL_RECORDS)
7684 +- cur_thermal_record = 0;
7685 +- gettimeofday(&trec[cur_thermal_record].tv, NULL);
7686 ++ if (next_thermal_record >= NR_THERMAL_RECORDS)
7687 ++ next_thermal_record = 0;
7688 ++ gettimeofday(&trec[next_thermal_record].tv, NULL);
7689 + if (tmon_log) {
7690 + fprintf(tmon_log, "%lu ", ++samples);
7691 + fprintf(tmon_log, "%3.1f ", p_param.t_target);
7692 +@@ -507,11 +508,12 @@ int update_thermal_data()
7693 + snprintf(tz_name, 256, "%s/%s%d", THERMAL_SYSFS, TZONE,
7694 + ptdata.tzi[i].instance);
7695 + sysfs_get_ulong(tz_name, "temp",
7696 +- &trec[cur_thermal_record].temp[i]);
7697 ++ &trec[next_thermal_record].temp[i]);
7698 + if (tmon_log)
7699 + fprintf(tmon_log, "%lu ",
7700 +- trec[cur_thermal_record].temp[i]/1000);
7701 ++ trec[next_thermal_record].temp[i] / 1000);
7702 + }
7703 ++ cur_thermal_record = next_thermal_record;
7704 + for (i = 0; i < ptdata.nr_cooling_dev; i++) {
7705 + char cdev_name[256];
7706 + unsigned long val;
7707 +diff --git a/tools/thermal/tmon/tmon.c b/tools/thermal/tmon/tmon.c
7708 +index 9aa19652e8e8..b43138f8b862 100644
7709 +--- a/tools/thermal/tmon/tmon.c
7710 ++++ b/tools/thermal/tmon/tmon.c
7711 +@@ -336,7 +336,6 @@ int main(int argc, char **argv)
7712 + show_data_w();
7713 + show_cooling_device();
7714 + }
7715 +- cur_thermal_record++;
7716 + time_elapsed += ticktime;
7717 + controller_handler(trec[0].temp[target_tz_index] / 1000,
7718 + &yk);