Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.1 commit in: /
Date: Sun, 06 Aug 2017 18:01:24
Message-Id: 1502042470.5a402b476904049d5733853f1cee6216b9a21424.mpagano@gentoo
1 commit: 5a402b476904049d5733853f1cee6216b9a21424
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sun Aug 6 18:01:10 2017 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sun Aug 6 18:01:10 2017 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5a402b47
7
8 Linux patch 4.1.43
9
10 0000_README | 4 +
11 1042_linux-4.1.43.patch | 5611 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 5615 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 85eb55c..60f0ad1 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -211,6 +211,10 @@ Patch: 1041_linux-4.1.42.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.1.42
21
22 +Patch: 1042_linux-4.1.43.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.1.43
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1042_linux-4.1.43.patch b/1042_linux-4.1.43.patch
31 new file mode 100644
32 index 0000000..ad0b111
33 --- /dev/null
34 +++ b/1042_linux-4.1.43.patch
35 @@ -0,0 +1,5611 @@
36 +diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
37 +index c831001c45f1..4c88aa047790 100644
38 +--- a/Documentation/sysctl/kernel.txt
39 ++++ b/Documentation/sysctl/kernel.txt
40 +@@ -798,14 +798,13 @@ via the /proc/sys interface:
41 + Each write syscall must fully contain the sysctl value to be
42 + written, and multiple writes on the same sysctl file descriptor
43 + will rewrite the sysctl value, regardless of file position.
44 +- 0 - (default) Same behavior as above, but warn about processes that
45 +- perform writes to a sysctl file descriptor when the file position
46 +- is not 0.
47 +- 1 - Respect file position when writing sysctl strings. Multiple writes
48 +- will append to the sysctl value buffer. Anything past the max length
49 +- of the sysctl value buffer will be ignored. Writes to numeric sysctl
50 +- entries must always be at file position 0 and the value must be
51 +- fully contained in the buffer sent in the write syscall.
52 ++ 0 - Same behavior as above, but warn about processes that perform writes
53 ++ to a sysctl file descriptor when the file position is not 0.
54 ++ 1 - (default) Respect file position when writing sysctl strings. Multiple
55 ++ writes will append to the sysctl value buffer. Anything past the max
56 ++ length of the sysctl value buffer will be ignored. Writes to numeric
57 ++ sysctl entries must always be at file position 0 and the value must
58 ++ be fully contained in the buffer sent in the write syscall.
59 +
60 + ==============================================================
61 +
62 +diff --git a/Makefile b/Makefile
63 +index 0c3313f14ff0..50d0a93fa343 100644
64 +--- a/Makefile
65 ++++ b/Makefile
66 +@@ -1,6 +1,6 @@
67 + VERSION = 4
68 + PATCHLEVEL = 1
69 +-SUBLEVEL = 42
70 ++SUBLEVEL = 43
71 + EXTRAVERSION =
72 + NAME = Series 4800
73 +
74 +@@ -622,6 +622,12 @@ endif
75 + # Tell gcc to never replace conditional load with a non-conditional one
76 + KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
77 +
78 ++# check for 'asm goto'
79 ++ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
80 ++ KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
81 ++ KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
82 ++endif
83 ++
84 + ifdef CONFIG_READABLE_ASM
85 + # Disable optimizations that make assembler listings hard to read.
86 + # reorder blocks reorders the control in the function
87 +@@ -777,12 +783,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=date-time)
88 + # use the deterministic mode of AR if available
89 + KBUILD_ARFLAGS := $(call ar-option,D)
90 +
91 +-# check for 'asm goto'
92 +-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
93 +- KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
94 +- KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
95 +-endif
96 +-
97 + include scripts/Makefile.kasan
98 + include scripts/Makefile.extrawarn
99 +
100 +diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
101 +index 78aec6270c2f..90fbda066122 100644
102 +--- a/arch/arm/boot/dts/bcm5301x.dtsi
103 ++++ b/arch/arm/boot/dts/bcm5301x.dtsi
104 +@@ -54,14 +54,14 @@
105 + timer@0200 {
106 + compatible = "arm,cortex-a9-global-timer";
107 + reg = <0x0200 0x100>;
108 +- interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
109 ++ interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
110 + clocks = <&clk_periph>;
111 + };
112 +
113 + local-timer@0600 {
114 + compatible = "arm,cortex-a9-twd-timer";
115 + reg = <0x0600 0x100>;
116 +- interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
117 ++ interrupts = <GIC_PPI 13 IRQ_TYPE_EDGE_RISING>;
118 + clocks = <&clk_periph>;
119 + };
120 +
121 +diff --git a/arch/arm/boot/dts/imx6dl.dtsi b/arch/arm/boot/dts/imx6dl.dtsi
122 +index f94bf72832af..d6da4cc23920 100644
123 +--- a/arch/arm/boot/dts/imx6dl.dtsi
124 ++++ b/arch/arm/boot/dts/imx6dl.dtsi
125 +@@ -30,7 +30,7 @@
126 + /* kHz uV */
127 + 996000 1250000
128 + 792000 1175000
129 +- 396000 1075000
130 ++ 396000 1150000
131 + >;
132 + fsl,soc-operating-points = <
133 + /* ARM kHz SOC-PU uV */
134 +diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
135 +index d2315ffd8f12..f13ae153fb24 100644
136 +--- a/arch/arm/include/asm/elf.h
137 ++++ b/arch/arm/include/asm/elf.h
138 +@@ -112,12 +112,8 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
139 + #define CORE_DUMP_USE_REGSET
140 + #define ELF_EXEC_PAGESIZE 4096
141 +
142 +-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
143 +- use of this is to invoke "./ld.so someprog" to test out a new version of
144 +- the loader. We need to make sure that it is out of the way of the program
145 +- that it will "exec", and that there is sufficient room for the brk. */
146 +-
147 +-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
148 ++/* This is the base location for PIE (ET_DYN with INTERP) loads. */
149 ++#define ELF_ET_DYN_BASE 0x400000UL
150 +
151 + /* When the program starts, a1 contains a pointer to a function to be
152 + registered with atexit, as per the SVR4 ABI. A value of 0 means we
153 +diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
154 +index 7186382672b5..d89d35b40e47 100644
155 +--- a/arch/arm/mm/mmu.c
156 ++++ b/arch/arm/mm/mmu.c
157 +@@ -1136,15 +1136,15 @@ void __init sanity_check_meminfo(void)
158 +
159 + high_memory = __va(arm_lowmem_limit - 1) + 1;
160 +
161 ++ if (!memblock_limit)
162 ++ memblock_limit = arm_lowmem_limit;
163 ++
164 + /*
165 + * Round the memblock limit down to a pmd size. This
166 + * helps to ensure that we will allocate memory from the
167 + * last full pmd, which should be mapped.
168 + */
169 +- if (memblock_limit)
170 +- memblock_limit = round_down(memblock_limit, PMD_SIZE);
171 +- if (!memblock_limit)
172 +- memblock_limit = arm_lowmem_limit;
173 ++ memblock_limit = round_down(memblock_limit, PMD_SIZE);
174 +
175 + memblock_set_current_limit(memblock_limit);
176 + }
177 +diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
178 +index 71f19c4dc0de..ffe7850afdbd 100644
179 +--- a/arch/arm64/include/asm/barrier.h
180 ++++ b/arch/arm64/include/asm/barrier.h
181 +@@ -63,23 +63,33 @@ do { \
182 +
183 + #define smp_store_release(p, v) \
184 + do { \
185 ++ union { typeof(*p) __val; char __c[1]; } __u = \
186 ++ { .__val = (__force typeof(*p)) (v) }; \
187 + compiletime_assert_atomic_type(*p); \
188 + switch (sizeof(*p)) { \
189 + case 1: \
190 + asm volatile ("stlrb %w1, %0" \
191 +- : "=Q" (*p) : "r" (v) : "memory"); \
192 ++ : "=Q" (*p) \
193 ++ : "r" (*(__u8 *)__u.__c) \
194 ++ : "memory"); \
195 + break; \
196 + case 2: \
197 + asm volatile ("stlrh %w1, %0" \
198 +- : "=Q" (*p) : "r" (v) : "memory"); \
199 ++ : "=Q" (*p) \
200 ++ : "r" (*(__u16 *)__u.__c) \
201 ++ : "memory"); \
202 + break; \
203 + case 4: \
204 + asm volatile ("stlr %w1, %0" \
205 +- : "=Q" (*p) : "r" (v) : "memory"); \
206 ++ : "=Q" (*p) \
207 ++ : "r" (*(__u32 *)__u.__c) \
208 ++ : "memory"); \
209 + break; \
210 + case 8: \
211 + asm volatile ("stlr %1, %0" \
212 +- : "=Q" (*p) : "r" (v) : "memory"); \
213 ++ : "=Q" (*p) \
214 ++ : "r" (*(__u64 *)__u.__c) \
215 ++ : "memory"); \
216 + break; \
217 + } \
218 + } while (0)
219 +diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
220 +index 7ac3920b1356..802dd71ed0b3 100644
221 +--- a/arch/arm64/kernel/armv8_deprecated.c
222 ++++ b/arch/arm64/kernel/armv8_deprecated.c
223 +@@ -298,7 +298,8 @@ static void register_insn_emulation_sysctl(struct ctl_table *table)
224 + " .quad 1b, 4b\n" \
225 + " .popsection\n" \
226 + : "=&r" (res), "+r" (data), "=&r" (temp) \
227 +- : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \
228 ++ : "r" ((unsigned long)addr), "i" (-EAGAIN), \
229 ++ "i" (-EFAULT) \
230 + : "memory")
231 +
232 + #define __user_swp_asm(data, addr, res, temp) \
233 +diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
234 +index 7791840cf22c..db07793f7b43 100644
235 +--- a/arch/mips/kernel/entry.S
236 ++++ b/arch/mips/kernel/entry.S
237 +@@ -11,6 +11,7 @@
238 + #include <asm/asm.h>
239 + #include <asm/asmmacro.h>
240 + #include <asm/compiler.h>
241 ++#include <asm/irqflags.h>
242 + #include <asm/regdef.h>
243 + #include <asm/mipsregs.h>
244 + #include <asm/stackframe.h>
245 +@@ -137,6 +138,7 @@ work_pending:
246 + andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
247 + beqz t0, work_notifysig
248 + work_resched:
249 ++ TRACE_IRQS_OFF
250 + jal schedule
251 +
252 + local_irq_disable # make sure need_resched and
253 +@@ -173,6 +175,7 @@ syscall_exit_work:
254 + beqz t0, work_pending # trace bit set?
255 + local_irq_enable # could let syscall_trace_leave()
256 + # call schedule() instead
257 ++ TRACE_IRQS_ON
258 + move a0, sp
259 + jal syscall_trace_leave
260 + b resume_userspace
261 +diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
262 +index 06147179a175..cd25b616075d 100644
263 +--- a/arch/mips/kernel/pm-cps.c
264 ++++ b/arch/mips/kernel/pm-cps.c
265 +@@ -55,7 +55,6 @@ DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT);
266 + * state. Actually per-core rather than per-CPU.
267 + */
268 + static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
269 +-static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc);
270 +
271 + /* Indicates online CPUs coupled with the current CPU */
272 + static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
273 +@@ -624,7 +623,6 @@ static int __init cps_gen_core_entries(unsigned cpu)
274 + {
275 + enum cps_pm_state state;
276 + unsigned core = cpu_data[cpu].core;
277 +- unsigned dlinesz = cpu_data[cpu].dcache.linesz;
278 + void *entry_fn, *core_rc;
279 +
280 + for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
281 +@@ -644,16 +642,11 @@ static int __init cps_gen_core_entries(unsigned cpu)
282 + }
283 +
284 + if (!per_cpu(ready_count, core)) {
285 +- core_rc = kmalloc(dlinesz * 2, GFP_KERNEL);
286 ++ core_rc = kmalloc(sizeof(u32), GFP_KERNEL);
287 + if (!core_rc) {
288 + pr_err("Failed allocate core %u ready_count\n", core);
289 + return -ENOMEM;
290 + }
291 +- per_cpu(ready_count_alloc, core) = core_rc;
292 +-
293 +- /* Ensure ready_count is aligned to a cacheline boundary */
294 +- core_rc += dlinesz - 1;
295 +- core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1));
296 + per_cpu(ready_count, core) = core_rc;
297 + }
298 +
299 +diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
300 +index 74403953e407..2e29b1aed924 100644
301 +--- a/arch/mips/kernel/traps.c
302 ++++ b/arch/mips/kernel/traps.c
303 +@@ -193,6 +193,8 @@ void show_stack(struct task_struct *task, unsigned long *sp)
304 + {
305 + struct pt_regs regs;
306 + mm_segment_t old_fs = get_fs();
307 ++
308 ++ regs.cp0_status = KSU_KERNEL;
309 + if (sp) {
310 + regs.regs[29] = (unsigned long)sp;
311 + regs.regs[31] = 0;
312 +diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
313 +index 2ea5ff6dc22e..c57215a66181 100644
314 +--- a/arch/mips/ralink/mt7620.c
315 ++++ b/arch/mips/ralink/mt7620.c
316 +@@ -98,31 +98,31 @@ static struct rt2880_pmx_group mt7620a_pinmux_data[] = {
317 + };
318 +
319 + static struct rt2880_pmx_func pwm1_grp_mt7628[] = {
320 +- FUNC("sdcx", 3, 19, 1),
321 ++ FUNC("sdxc d6", 3, 19, 1),
322 + FUNC("utif", 2, 19, 1),
323 + FUNC("gpio", 1, 19, 1),
324 +- FUNC("pwm", 0, 19, 1),
325 ++ FUNC("pwm1", 0, 19, 1),
326 + };
327 +
328 + static struct rt2880_pmx_func pwm0_grp_mt7628[] = {
329 +- FUNC("sdcx", 3, 18, 1),
330 ++ FUNC("sdxc d7", 3, 18, 1),
331 + FUNC("utif", 2, 18, 1),
332 + FUNC("gpio", 1, 18, 1),
333 +- FUNC("pwm", 0, 18, 1),
334 ++ FUNC("pwm0", 0, 18, 1),
335 + };
336 +
337 + static struct rt2880_pmx_func uart2_grp_mt7628[] = {
338 +- FUNC("sdcx", 3, 20, 2),
339 ++ FUNC("sdxc d5 d4", 3, 20, 2),
340 + FUNC("pwm", 2, 20, 2),
341 + FUNC("gpio", 1, 20, 2),
342 +- FUNC("uart", 0, 20, 2),
343 ++ FUNC("uart2", 0, 20, 2),
344 + };
345 +
346 + static struct rt2880_pmx_func uart1_grp_mt7628[] = {
347 +- FUNC("sdcx", 3, 45, 2),
348 ++ FUNC("sw_r", 3, 45, 2),
349 + FUNC("pwm", 2, 45, 2),
350 + FUNC("gpio", 1, 45, 2),
351 +- FUNC("uart", 0, 45, 2),
352 ++ FUNC("uart1", 0, 45, 2),
353 + };
354 +
355 + static struct rt2880_pmx_func i2c_grp_mt7628[] = {
356 +@@ -134,21 +134,21 @@ static struct rt2880_pmx_func i2c_grp_mt7628[] = {
357 +
358 + static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 36, 1) };
359 + static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 37, 1) };
360 +-static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 15, 38) };
361 ++static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) };
362 + static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) };
363 +
364 + static struct rt2880_pmx_func sd_mode_grp_mt7628[] = {
365 + FUNC("jtag", 3, 22, 8),
366 + FUNC("utif", 2, 22, 8),
367 + FUNC("gpio", 1, 22, 8),
368 +- FUNC("sdcx", 0, 22, 8),
369 ++ FUNC("sdxc", 0, 22, 8),
370 + };
371 +
372 + static struct rt2880_pmx_func uart0_grp_mt7628[] = {
373 + FUNC("-", 3, 12, 2),
374 + FUNC("-", 2, 12, 2),
375 + FUNC("gpio", 1, 12, 2),
376 +- FUNC("uart", 0, 12, 2),
377 ++ FUNC("uart0", 0, 12, 2),
378 + };
379 +
380 + static struct rt2880_pmx_func i2s_grp_mt7628[] = {
381 +@@ -162,7 +162,7 @@ static struct rt2880_pmx_func spi_cs1_grp_mt7628[] = {
382 + FUNC("-", 3, 6, 1),
383 + FUNC("refclk", 2, 6, 1),
384 + FUNC("gpio", 1, 6, 1),
385 +- FUNC("spi", 0, 6, 1),
386 ++ FUNC("spi cs1", 0, 6, 1),
387 + };
388 +
389 + static struct rt2880_pmx_func spis_grp_mt7628[] = {
390 +@@ -179,28 +179,44 @@ static struct rt2880_pmx_func gpio_grp_mt7628[] = {
391 + FUNC("gpio", 0, 11, 1),
392 + };
393 +
394 +-#define MT7628_GPIO_MODE_MASK 0x3
395 +-
396 +-#define MT7628_GPIO_MODE_PWM1 30
397 +-#define MT7628_GPIO_MODE_PWM0 28
398 +-#define MT7628_GPIO_MODE_UART2 26
399 +-#define MT7628_GPIO_MODE_UART1 24
400 +-#define MT7628_GPIO_MODE_I2C 20
401 +-#define MT7628_GPIO_MODE_REFCLK 18
402 +-#define MT7628_GPIO_MODE_PERST 16
403 +-#define MT7628_GPIO_MODE_WDT 14
404 +-#define MT7628_GPIO_MODE_SPI 12
405 +-#define MT7628_GPIO_MODE_SDMODE 10
406 +-#define MT7628_GPIO_MODE_UART0 8
407 +-#define MT7628_GPIO_MODE_I2S 6
408 +-#define MT7628_GPIO_MODE_CS1 4
409 +-#define MT7628_GPIO_MODE_SPIS 2
410 +-#define MT7628_GPIO_MODE_GPIO 0
411 ++static struct rt2880_pmx_func wled_kn_grp_mt7628[] = {
412 ++ FUNC("rsvd", 3, 35, 1),
413 ++ FUNC("rsvd", 2, 35, 1),
414 ++ FUNC("gpio", 1, 35, 1),
415 ++ FUNC("wled_kn", 0, 35, 1),
416 ++};
417 ++
418 ++static struct rt2880_pmx_func wled_an_grp_mt7628[] = {
419 ++ FUNC("rsvd", 3, 44, 1),
420 ++ FUNC("rsvd", 2, 44, 1),
421 ++ FUNC("gpio", 1, 44, 1),
422 ++ FUNC("wled_an", 0, 44, 1),
423 ++};
424 ++
425 ++#define MT7628_GPIO_MODE_MASK 0x3
426 ++
427 ++#define MT7628_GPIO_MODE_WLED_KN 48
428 ++#define MT7628_GPIO_MODE_WLED_AN 32
429 ++#define MT7628_GPIO_MODE_PWM1 30
430 ++#define MT7628_GPIO_MODE_PWM0 28
431 ++#define MT7628_GPIO_MODE_UART2 26
432 ++#define MT7628_GPIO_MODE_UART1 24
433 ++#define MT7628_GPIO_MODE_I2C 20
434 ++#define MT7628_GPIO_MODE_REFCLK 18
435 ++#define MT7628_GPIO_MODE_PERST 16
436 ++#define MT7628_GPIO_MODE_WDT 14
437 ++#define MT7628_GPIO_MODE_SPI 12
438 ++#define MT7628_GPIO_MODE_SDMODE 10
439 ++#define MT7628_GPIO_MODE_UART0 8
440 ++#define MT7628_GPIO_MODE_I2S 6
441 ++#define MT7628_GPIO_MODE_CS1 4
442 ++#define MT7628_GPIO_MODE_SPIS 2
443 ++#define MT7628_GPIO_MODE_GPIO 0
444 +
445 + static struct rt2880_pmx_group mt7628an_pinmux_data[] = {
446 +- GRP_G("pmw1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK,
447 ++ GRP_G("pwm1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK,
448 + 1, MT7628_GPIO_MODE_PWM1),
449 +- GRP_G("pmw1", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK,
450 ++ GRP_G("pwm0", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK,
451 + 1, MT7628_GPIO_MODE_PWM0),
452 + GRP_G("uart2", uart2_grp_mt7628, MT7628_GPIO_MODE_MASK,
453 + 1, MT7628_GPIO_MODE_UART2),
454 +@@ -224,6 +240,10 @@ static struct rt2880_pmx_group mt7628an_pinmux_data[] = {
455 + 1, MT7628_GPIO_MODE_SPIS),
456 + GRP_G("gpio", gpio_grp_mt7628, MT7628_GPIO_MODE_MASK,
457 + 1, MT7628_GPIO_MODE_GPIO),
458 ++ GRP_G("wled_an", wled_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
459 ++ 1, MT7628_GPIO_MODE_WLED_AN),
460 ++ GRP_G("wled_kn", wled_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
461 ++ 1, MT7628_GPIO_MODE_WLED_KN),
462 + { 0 }
463 + };
464 +
465 +diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
466 +index d0eae5f2bd87..4fb62add2636 100644
467 +--- a/arch/parisc/include/asm/dma-mapping.h
468 ++++ b/arch/parisc/include/asm/dma-mapping.h
469 +@@ -39,6 +39,8 @@ struct hppa_dma_ops {
470 + ** flush/purge and allocate "regular" cacheable pages for everything.
471 + */
472 +
473 ++#define DMA_ERROR_CODE (~(dma_addr_t)0)
474 ++
475 + #ifdef CONFIG_PA11
476 + extern struct hppa_dma_ops pcxl_dma_ops;
477 + extern struct hppa_dma_ops pcx_dma_ops;
478 +@@ -209,12 +211,13 @@ parisc_walk_tree(struct device *dev)
479 + break;
480 + }
481 + }
482 +- BUG_ON(!dev->platform_data);
483 + return dev->platform_data;
484 + }
485 +-
486 +-#define GET_IOC(dev) (HBA_DATA(parisc_walk_tree(dev))->iommu)
487 +-
488 ++
489 ++#define GET_IOC(dev) ({ \
490 ++ void *__pdata = parisc_walk_tree(dev); \
491 ++ __pdata ? HBA_DATA(__pdata)->iommu : NULL; \
492 ++})
493 +
494 + #ifdef CONFIG_IOMMU_CCIO
495 + struct parisc_device;
496 +diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
497 +index 8eefb12d1d33..3781b8c0fad9 100644
498 +--- a/arch/parisc/kernel/syscall_table.S
499 ++++ b/arch/parisc/kernel/syscall_table.S
500 +@@ -361,7 +361,7 @@
501 + ENTRY_SAME(ni_syscall) /* 263: reserved for vserver */
502 + ENTRY_SAME(add_key)
503 + ENTRY_SAME(request_key) /* 265 */
504 +- ENTRY_SAME(keyctl)
505 ++ ENTRY_COMP(keyctl)
506 + ENTRY_SAME(ioprio_set)
507 + ENTRY_SAME(ioprio_get)
508 + ENTRY_SAME(inotify_init)
509 +diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
510 +index 50d64a7fc672..3b7c02f9b726 100644
511 +--- a/arch/parisc/mm/fault.c
512 ++++ b/arch/parisc/mm/fault.c
513 +@@ -303,7 +303,7 @@ bad_area:
514 + case 15: /* Data TLB miss fault/Data page fault */
515 + /* send SIGSEGV when outside of vma */
516 + if (!vma ||
517 +- address < vma->vm_start || address > vma->vm_end) {
518 ++ address < vma->vm_start || address >= vma->vm_end) {
519 + si.si_signo = SIGSEGV;
520 + si.si_code = SEGV_MAPERR;
521 + break;
522 +diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
523 +index ee46ffef608e..743ad7a400d6 100644
524 +--- a/arch/powerpc/include/asm/elf.h
525 ++++ b/arch/powerpc/include/asm/elf.h
526 +@@ -23,12 +23,13 @@
527 + #define CORE_DUMP_USE_REGSET
528 + #define ELF_EXEC_PAGESIZE PAGE_SIZE
529 +
530 +-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
531 +- use of this is to invoke "./ld.so someprog" to test out a new version of
532 +- the loader. We need to make sure that it is out of the way of the program
533 +- that it will "exec", and that there is sufficient room for the brk. */
534 +-
535 +-#define ELF_ET_DYN_BASE 0x20000000
536 ++/*
537 ++ * This is the base location for PIE (ET_DYN with INTERP) loads. On
538 ++ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
539 ++ * space open for things that want to use the area for 32-bit pointers.
540 ++ */
541 ++#define ELF_ET_DYN_BASE (is_32bit_task() ? 0x000400000UL : \
542 ++ 0x100000000UL)
543 +
544 + #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
545 +
546 +diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
547 +index b264937bba68..9340d05bcdc9 100644
548 +--- a/arch/powerpc/kernel/eeh.c
549 ++++ b/arch/powerpc/kernel/eeh.c
550 +@@ -306,9 +306,17 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
551 + *
552 + * For pHyp, we have to enable IO for log retrieval. Otherwise,
553 + * 0xFF's is always returned from PCI config space.
554 ++ *
555 ++ * When the @severity is EEH_LOG_PERM, the PE is going to be
556 ++ * removed. Prior to that, the drivers for devices included in
557 ++ * the PE will be closed. The drivers rely on working IO path
558 ++ * to bring the devices to quiet state. Otherwise, PCI traffic
559 ++ * from those devices after they are removed is like to cause
560 ++ * another unexpected EEH error.
561 + */
562 + if (!(pe->type & EEH_PE_PHB)) {
563 +- if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG))
564 ++ if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG) ||
565 ++ severity == EEH_LOG_PERM)
566 + eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
567 +
568 + /*
569 +diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
570 +index 0a4f23a070ab..ffca0bf5b8b4 100644
571 +--- a/arch/powerpc/kernel/eeh_driver.c
572 ++++ b/arch/powerpc/kernel/eeh_driver.c
573 +@@ -651,7 +651,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
574 + */
575 + #define MAX_WAIT_FOR_RECOVERY 300
576 +
577 +-static void eeh_handle_normal_event(struct eeh_pe *pe)
578 ++static bool eeh_handle_normal_event(struct eeh_pe *pe)
579 + {
580 + struct pci_bus *frozen_bus;
581 + int rc = 0;
582 +@@ -661,7 +661,7 @@ static void eeh_handle_normal_event(struct eeh_pe *pe)
583 + if (!frozen_bus) {
584 + pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
585 + __func__, pe->phb->global_number, pe->addr);
586 +- return;
587 ++ return false;
588 + }
589 +
590 + eeh_pe_update_time_stamp(pe);
591 +@@ -778,7 +778,7 @@ static void eeh_handle_normal_event(struct eeh_pe *pe)
592 + pr_info("EEH: Notify device driver to resume\n");
593 + eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
594 +
595 +- return;
596 ++ return false;
597 +
598 + excess_failures:
599 + /*
600 +@@ -819,7 +819,11 @@ perm_error:
601 + pci_lock_rescan_remove();
602 + pcibios_remove_pci_devices(frozen_bus);
603 + pci_unlock_rescan_remove();
604 ++
605 ++ /* The passed PE should no longer be used */
606 ++ return true;
607 + }
608 ++ return false;
609 + }
610 +
611 + static void eeh_handle_special_event(void)
612 +@@ -885,7 +889,14 @@ static void eeh_handle_special_event(void)
613 + */
614 + if (rc == EEH_NEXT_ERR_FROZEN_PE ||
615 + rc == EEH_NEXT_ERR_FENCED_PHB) {
616 +- eeh_handle_normal_event(pe);
617 ++ /*
618 ++ * eeh_handle_normal_event() can make the PE stale if it
619 ++ * determines that the PE cannot possibly be recovered.
620 ++ * Don't modify the PE state if that's the case.
621 ++ */
622 ++ if (eeh_handle_normal_event(pe))
623 ++ continue;
624 ++
625 + eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
626 + } else {
627 + pci_lock_rescan_remove();
628 +diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
629 +index 7c053f281406..1138fec3dd65 100644
630 +--- a/arch/powerpc/kernel/kprobes.c
631 ++++ b/arch/powerpc/kernel/kprobes.c
632 +@@ -514,6 +514,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
633 + #endif
634 + #endif
635 +
636 ++ /*
637 ++ * jprobes use jprobe_return() which skips the normal return
638 ++ * path of the function, and this messes up the accounting of the
639 ++ * function graph tracer.
640 ++ *
641 ++ * Pause function graph tracing while performing the jprobe function.
642 ++ */
643 ++ pause_graph_tracing();
644 ++
645 + return 1;
646 + }
647 +
648 +@@ -536,6 +545,8 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
649 + * saved regs...
650 + */
651 + memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
652 ++ /* It's OK to start function graph tracing again */
653 ++ unpause_graph_tracing();
654 + preempt_enable_no_resched();
655 + return 1;
656 + }
657 +diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
658 +index 63c37fd2b7a6..c1e10ffadd17 100644
659 +--- a/arch/powerpc/kvm/book3s_hv.c
660 ++++ b/arch/powerpc/kvm/book3s_hv.c
661 +@@ -2238,6 +2238,27 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
662 + return -EINVAL;
663 + }
664 +
665 ++ /*
666 ++ * Don't allow entry with a suspended transaction, because
667 ++ * the guest entry/exit code will lose it.
668 ++ * If the guest has TM enabled, save away their TM-related SPRs
669 ++ * (they will get restored by the TM unavailable interrupt).
670 ++ */
671 ++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
672 ++ if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
673 ++ (current->thread.regs->msr & MSR_TM)) {
674 ++ if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
675 ++ run->exit_reason = KVM_EXIT_FAIL_ENTRY;
676 ++ run->fail_entry.hardware_entry_failure_reason = 0;
677 ++ return -EINVAL;
678 ++ }
679 ++ current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
680 ++ current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
681 ++ current->thread.tm_texasr = mfspr(SPRN_TEXASR);
682 ++ current->thread.regs->msr &= ~MSR_TM;
683 ++ }
684 ++#endif
685 ++
686 + kvmppc_core_prepare_to_enter(vcpu);
687 +
688 + /* No need to go into the guest when all we'll do is come back out */
689 +diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
690 +index d7697ab802f6..8e136b88cdf4 100644
691 +--- a/arch/s390/include/asm/ctl_reg.h
692 ++++ b/arch/s390/include/asm/ctl_reg.h
693 +@@ -15,7 +15,9 @@
694 + BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
695 + asm volatile( \
696 + " lctlg %1,%2,%0\n" \
697 +- : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\
698 ++ : \
699 ++ : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high) \
700 ++ : "memory"); \
701 + }
702 +
703 + #define __ctl_store(array, low, high) { \
704 +diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
705 +index 3ad48f22de78..f133ce08b270 100644
706 +--- a/arch/s390/include/asm/elf.h
707 ++++ b/arch/s390/include/asm/elf.h
708 +@@ -154,14 +154,13 @@ extern unsigned int vdso_enabled;
709 + #define CORE_DUMP_USE_REGSET
710 + #define ELF_EXEC_PAGESIZE 4096
711 +
712 +-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
713 +- use of this is to invoke "./ld.so someprog" to test out a new version of
714 +- the loader. We need to make sure that it is out of the way of the program
715 +- that it will "exec", and that there is sufficient room for the brk. 64-bit
716 +- tasks are aligned to 4GB. */
717 +-#define ELF_ET_DYN_BASE (is_32bit_task() ? \
718 +- (STACK_TOP / 3 * 2) : \
719 +- (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
720 ++/*
721 ++ * This is the base location for PIE (ET_DYN with INTERP) loads. On
722 ++ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
723 ++ * space open for things that want to use the area for 32-bit pointers.
724 ++ */
725 ++#define ELF_ET_DYN_BASE (is_compat_task() ? 0x000400000UL : \
726 ++ 0x100000000UL)
727 +
728 + /* This yields a mask that user programs can use to figure out what
729 + instruction set this CPU supports. */
730 +diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
731 +index ef7d6c8fea66..f354fd84adeb 100644
732 +--- a/arch/s390/mm/vmem.c
733 ++++ b/arch/s390/mm/vmem.c
734 +@@ -372,7 +372,7 @@ void __init vmem_map_init(void)
735 + ro_end = (unsigned long)&_eshared & PAGE_MASK;
736 + for_each_memblock(memory, reg) {
737 + start = reg->base;
738 +- end = reg->base + reg->size - 1;
739 ++ end = reg->base + reg->size;
740 + if (start >= ro_end || end <= ro_start)
741 + vmem_add_mem(start, end - start, 0);
742 + else if (start >= ro_start && end <= ro_end)
743 +diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
744 +index 3b5b7a9c866d..2903ff34174c 100644
745 +--- a/arch/x86/include/asm/elf.h
746 ++++ b/arch/x86/include/asm/elf.h
747 +@@ -245,12 +245,13 @@ extern int force_personality32;
748 + #define CORE_DUMP_USE_REGSET
749 + #define ELF_EXEC_PAGESIZE 4096
750 +
751 +-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
752 +- use of this is to invoke "./ld.so someprog" to test out a new version of
753 +- the loader. We need to make sure that it is out of the way of the program
754 +- that it will "exec", and that there is sufficient room for the brk. */
755 +-
756 +-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
757 ++/*
758 ++ * This is the base location for PIE (ET_DYN with INTERP) loads. On
759 ++ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
760 ++ * space open for things that want to use the area for 32-bit pointers.
761 ++ */
762 ++#define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
763 ++ 0x100000000UL)
764 +
765 + /* This yields a mask that user programs can use to figure out what
766 + instruction set this CPU supports. This could be done in user space,
767 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
768 +index 99a15e38fa06..32e29f926e5a 100644
769 +--- a/arch/x86/kvm/vmx.c
770 ++++ b/arch/x86/kvm/vmx.c
771 +@@ -2118,7 +2118,7 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr)
772 + if (!(vmcs12->exception_bitmap & (1u << nr)))
773 + return 0;
774 +
775 +- nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason,
776 ++ nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
777 + vmcs_read32(VM_EXIT_INTR_INFO),
778 + vmcs_readl(EXIT_QUALIFICATION));
779 + return 1;
780 +@@ -6153,7 +6153,6 @@ static __init int hardware_setup(void)
781 + vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
782 + vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
783 + vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
784 +- vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
785 +
786 + memcpy(vmx_msr_bitmap_legacy_x2apic,
787 + vmx_msr_bitmap_legacy, PAGE_SIZE);
788 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
789 +index c730e4708c7d..9d7ea42482e3 100644
790 +--- a/arch/x86/kvm/x86.c
791 ++++ b/arch/x86/kvm/x86.c
792 +@@ -4910,6 +4910,8 @@ static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
793 +
794 + if (var.unusable) {
795 + memset(desc, 0, sizeof(*desc));
796 ++ if (base3)
797 ++ *base3 = 0;
798 + return false;
799 + }
800 +
801 +@@ -6049,7 +6051,8 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
802 +
803 + kvm_x86_ops->patch_hypercall(vcpu, instruction);
804 +
805 +- return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
806 ++ return emulator_write_emulated(ctxt, rip, instruction, 3,
807 ++ &ctxt->exception);
808 + }
809 +
810 + /*
811 +diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
812 +index fa997dfaef24..2f1c52e252b0 100644
813 +--- a/arch/x86/lib/copy_user_64.S
814 ++++ b/arch/x86/lib/copy_user_64.S
815 +@@ -112,7 +112,7 @@ ENTRY(copy_user_generic_unrolled)
816 + movl %edx,%ecx
817 + andl $63,%edx
818 + shrl $6,%ecx
819 +- jz 17f
820 ++ jz .L_copy_short_string
821 + 1: movq (%rsi),%r8
822 + 2: movq 1*8(%rsi),%r9
823 + 3: movq 2*8(%rsi),%r10
824 +@@ -133,7 +133,8 @@ ENTRY(copy_user_generic_unrolled)
825 + leaq 64(%rdi),%rdi
826 + decl %ecx
827 + jnz 1b
828 +-17: movl %edx,%ecx
829 ++.L_copy_short_string:
830 ++ movl %edx,%ecx
831 + andl $7,%edx
832 + shrl $3,%ecx
833 + jz 20f
834 +@@ -251,6 +252,8 @@ ENDPROC(copy_user_generic_string)
835 + ENTRY(copy_user_enhanced_fast_string)
836 + CFI_STARTPROC
837 + ASM_STAC
838 ++ cmpl $64,%edx
839 ++ jb .L_copy_short_string /* less then 64 bytes, avoid the costly 'rep' */
840 + movl %edx,%ecx
841 + 1: rep
842 + movsb
843 +diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
844 +index 6a3c774eaff6..c2fea3af515d 100644
845 +--- a/arch/x86/mm/mpx.c
846 ++++ b/arch/x86/mm/mpx.c
847 +@@ -312,7 +312,7 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs,
848 + * We were not able to extract an address from the instruction,
849 + * probably because there was something invalid in it.
850 + */
851 +- if (info->si_addr == (void *)-1) {
852 ++ if (info->si_addr == (void __user *)-1) {
853 + err = -EINVAL;
854 + goto err_out;
855 + }
856 +diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
857 +index 0c2fae8d929d..73eb7fd4aec4 100644
858 +--- a/arch/x86/tools/relocs.c
859 ++++ b/arch/x86/tools/relocs.c
860 +@@ -992,11 +992,12 @@ static void emit_relocs(int as_text, int use_real_mode)
861 + die("Segment relocations found but --realmode not specified\n");
862 +
863 + /* Order the relocations for more efficient processing */
864 +- sort_relocs(&relocs16);
865 + sort_relocs(&relocs32);
866 + #if ELF_BITS == 64
867 + sort_relocs(&relocs32neg);
868 + sort_relocs(&relocs64);
869 ++#else
870 ++ sort_relocs(&relocs16);
871 + #endif
872 +
873 + /* Print the relocations */
874 +diff --git a/drivers/base/core.c b/drivers/base/core.c
875 +index 21d13038534e..ed29f61d1338 100644
876 +--- a/drivers/base/core.c
877 ++++ b/drivers/base/core.c
878 +@@ -1981,7 +1981,11 @@ void device_shutdown(void)
879 + pm_runtime_get_noresume(dev);
880 + pm_runtime_barrier(dev);
881 +
882 +- if (dev->bus && dev->bus->shutdown) {
883 ++ if (dev->class && dev->class->shutdown) {
884 ++ if (initcall_debug)
885 ++ dev_info(dev, "shutdown\n");
886 ++ dev->class->shutdown(dev);
887 ++ } else if (dev->bus && dev->bus->shutdown) {
888 + if (initcall_debug)
889 + dev_info(dev, "shutdown\n");
890 + dev->bus->shutdown(dev);
891 +diff --git a/drivers/base/platform.c b/drivers/base/platform.c
892 +index 7403de94832c..29a4ef08e051 100644
893 +--- a/drivers/base/platform.c
894 ++++ b/drivers/base/platform.c
895 +@@ -729,7 +729,7 @@ static ssize_t driver_override_store(struct device *dev,
896 + const char *buf, size_t count)
897 + {
898 + struct platform_device *pdev = to_platform_device(dev);
899 +- char *driver_override, *old = pdev->driver_override, *cp;
900 ++ char *driver_override, *old, *cp;
901 +
902 + if (count > PATH_MAX)
903 + return -EINVAL;
904 +@@ -742,12 +742,15 @@ static ssize_t driver_override_store(struct device *dev,
905 + if (cp)
906 + *cp = '\0';
907 +
908 ++ device_lock(dev);
909 ++ old = pdev->driver_override;
910 + if (strlen(driver_override)) {
911 + pdev->driver_override = driver_override;
912 + } else {
913 + kfree(driver_override);
914 + pdev->driver_override = NULL;
915 + }
916 ++ device_unlock(dev);
917 +
918 + kfree(old);
919 +
920 +@@ -758,8 +761,12 @@ static ssize_t driver_override_show(struct device *dev,
921 + struct device_attribute *attr, char *buf)
922 + {
923 + struct platform_device *pdev = to_platform_device(dev);
924 ++ ssize_t len;
925 +
926 +- return sprintf(buf, "%s\n", pdev->driver_override);
927 ++ device_lock(dev);
928 ++ len = sprintf(buf, "%s\n", pdev->driver_override);
929 ++ device_unlock(dev);
930 ++ return len;
931 + }
932 + static DEVICE_ATTR_RW(driver_override);
933 +
934 +diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
935 +index d2be3f9c211c..dcc09e3e5778 100644
936 +--- a/drivers/base/power/sysfs.c
937 ++++ b/drivers/base/power/sysfs.c
938 +@@ -268,6 +268,8 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
939 + value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
940 + else if (!strcmp(buf, "any") || !strcmp(buf, "any\n"))
941 + value = PM_QOS_LATENCY_ANY;
942 ++ else
943 ++ return -EINVAL;
944 + }
945 + ret = dev_pm_qos_update_user_latency_tolerance(dev, value);
946 + return ret < 0 ? ret : n;
947 +diff --git a/drivers/char/random.c b/drivers/char/random.c
948 +index 9cd6968e2f92..d55156fc064d 100644
949 +--- a/drivers/char/random.c
950 ++++ b/drivers/char/random.c
951 +@@ -1714,13 +1714,15 @@ int random_int_secret_init(void)
952 + return 0;
953 + }
954 +
955 ++static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash)
956 ++ __aligned(sizeof(unsigned long));
957 ++
958 + /*
959 + * Get a random word for internal kernel use only. Similar to urandom but
960 + * with the goal of minimal entropy pool depletion. As a result, the random
961 + * value is not cryptographically secure but for several uses the cost of
962 + * depleting entropy is too high
963 + */
964 +-static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
965 + unsigned int get_random_int(void)
966 + {
967 + __u32 *hash;
968 +@@ -1741,6 +1743,28 @@ unsigned int get_random_int(void)
969 + EXPORT_SYMBOL(get_random_int);
970 +
971 + /*
972 ++ * Same as get_random_int(), but returns unsigned long.
973 ++ */
974 ++unsigned long get_random_long(void)
975 ++{
976 ++ __u32 *hash;
977 ++ unsigned long ret;
978 ++
979 ++ if (arch_get_random_long(&ret))
980 ++ return ret;
981 ++
982 ++ hash = get_cpu_var(get_random_int_hash);
983 ++
984 ++ hash[0] += current->pid + jiffies + random_get_entropy();
985 ++ md5_transform(hash, random_int_secret);
986 ++ ret = *(unsigned long *)hash;
987 ++ put_cpu_var(get_random_int_hash);
988 ++
989 ++ return ret;
990 ++}
991 ++EXPORT_SYMBOL(get_random_long);
992 ++
993 ++/*
994 + * randomize_range() returns a start address such that
995 + *
996 + * [...... <range> .....]
997 +diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
998 +index 810b171b55b7..374b0006aa7a 100644
999 +--- a/drivers/char/virtio_console.c
1000 ++++ b/drivers/char/virtio_console.c
1001 +@@ -1864,7 +1864,7 @@ static void config_work_handler(struct work_struct *work)
1002 + {
1003 + struct ports_device *portdev;
1004 +
1005 +- portdev = container_of(work, struct ports_device, control_work);
1006 ++ portdev = container_of(work, struct ports_device, config_work);
1007 + if (!use_multiport(portdev)) {
1008 + struct virtio_device *vdev;
1009 + struct port *port;
1010 +diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
1011 +index 25a70d06c5bf..55836a538a68 100644
1012 +--- a/drivers/cpufreq/cpufreq_conservative.c
1013 ++++ b/drivers/cpufreq/cpufreq_conservative.c
1014 +@@ -204,8 +204,8 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
1015 + int ret;
1016 + ret = sscanf(buf, "%u", &input);
1017 +
1018 +- /* cannot be lower than 11 otherwise freq will not fall */
1019 +- if (ret != 1 || input < 11 || input > 100 ||
1020 ++ /* cannot be lower than 1 otherwise freq will not fall */
1021 ++ if (ret != 1 || input < 1 || input > 100 ||
1022 + input >= cs_tuners->up_threshold)
1023 + return -EINVAL;
1024 +
1025 +diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
1026 +index d6d425773fa4..5b2db3c6568f 100644
1027 +--- a/drivers/cpufreq/s3c2416-cpufreq.c
1028 ++++ b/drivers/cpufreq/s3c2416-cpufreq.c
1029 +@@ -400,7 +400,6 @@ static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
1030 + rate = clk_get_rate(s3c_freq->hclk);
1031 + if (rate < 133 * 1000 * 1000) {
1032 + pr_err("cpufreq: HCLK not at 133MHz\n");
1033 +- clk_put(s3c_freq->hclk);
1034 + ret = -EINVAL;
1035 + goto err_armclk;
1036 + }
1037 +diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
1038 +index 3178f84d2757..6bff78c5c032 100644
1039 +--- a/drivers/crypto/atmel-sha.c
1040 ++++ b/drivers/crypto/atmel-sha.c
1041 +@@ -963,7 +963,9 @@ static int atmel_sha_finup(struct ahash_request *req)
1042 + ctx->flags |= SHA_FLAGS_FINUP;
1043 +
1044 + err1 = atmel_sha_update(req);
1045 +- if (err1 == -EINPROGRESS || err1 == -EBUSY)
1046 ++ if (err1 == -EINPROGRESS ||
1047 ++ (err1 == -EBUSY && (ahash_request_flags(req) &
1048 ++ CRYPTO_TFM_REQ_MAY_BACKLOG)))
1049 + return err1;
1050 +
1051 + /*
1052 +diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
1053 +index 9742b3d66288..fc6d2d568541 100644
1054 +--- a/drivers/crypto/caam/caamhash.c
1055 ++++ b/drivers/crypto/caam/caamhash.c
1056 +@@ -490,7 +490,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
1057 + ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
1058 + if (!ret) {
1059 + /* in progress */
1060 +- wait_for_completion_interruptible(&result.completion);
1061 ++ wait_for_completion(&result.completion);
1062 + ret = result.err;
1063 + #ifdef DEBUG
1064 + print_hex_dump(KERN_ERR,
1065 +diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
1066 +index e1eaf4ff9762..3ce1d5cdcbd2 100644
1067 +--- a/drivers/crypto/caam/key_gen.c
1068 ++++ b/drivers/crypto/caam/key_gen.c
1069 +@@ -103,7 +103,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
1070 + ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
1071 + if (!ret) {
1072 + /* in progress */
1073 +- wait_for_completion_interruptible(&result.completion);
1074 ++ wait_for_completion(&result.completion);
1075 + ret = result.err;
1076 + #ifdef DEBUG
1077 + print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
1078 +diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
1079 +index f062158d4dc9..eb79d49ab88c 100644
1080 +--- a/drivers/crypto/talitos.c
1081 ++++ b/drivers/crypto/talitos.c
1082 +@@ -634,7 +634,7 @@ static void talitos_unregister_rng(struct device *dev)
1083 + * crypto alg
1084 + */
1085 + #define TALITOS_CRA_PRIORITY 3000
1086 +-#define TALITOS_MAX_KEY_SIZE 96
1087 ++#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
1088 + #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
1089 +
1090 + struct talitos_ctx {
1091 +@@ -1322,6 +1322,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1092 + {
1093 + struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1094 +
1095 ++ if (keylen > TALITOS_MAX_KEY_SIZE) {
1096 ++ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1097 ++ return -EINVAL;
1098 ++ }
1099 ++
1100 + memcpy(&ctx->key, key, keylen);
1101 + ctx->keylen = keylen;
1102 +
1103 +diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
1104 +index 1638d39af595..023c08708b56 100644
1105 +--- a/drivers/dma/ep93xx_dma.c
1106 ++++ b/drivers/dma/ep93xx_dma.c
1107 +@@ -201,7 +201,6 @@ struct ep93xx_dma_engine {
1108 + struct dma_device dma_dev;
1109 + bool m2m;
1110 + int (*hw_setup)(struct ep93xx_dma_chan *);
1111 +- void (*hw_synchronize)(struct ep93xx_dma_chan *);
1112 + void (*hw_shutdown)(struct ep93xx_dma_chan *);
1113 + void (*hw_submit)(struct ep93xx_dma_chan *);
1114 + int (*hw_interrupt)(struct ep93xx_dma_chan *);
1115 +@@ -336,27 +335,21 @@ static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
1116 + return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
1117 + }
1118 +
1119 +-static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac)
1120 ++static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
1121 + {
1122 +- unsigned long flags;
1123 + u32 control;
1124 +
1125 +- spin_lock_irqsave(&edmac->lock, flags);
1126 + control = readl(edmac->regs + M2P_CONTROL);
1127 + control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
1128 + m2p_set_control(edmac, control);
1129 +- spin_unlock_irqrestore(&edmac->lock, flags);
1130 +
1131 + while (m2p_channel_state(edmac) >= M2P_STATE_ON)
1132 +- schedule();
1133 +-}
1134 ++ cpu_relax();
1135 +
1136 +-static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
1137 +-{
1138 + m2p_set_control(edmac, 0);
1139 +
1140 +- while (m2p_channel_state(edmac) != M2P_STATE_IDLE)
1141 +- dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n");
1142 ++ while (m2p_channel_state(edmac) == M2P_STATE_STALL)
1143 ++ cpu_relax();
1144 + }
1145 +
1146 + static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
1147 +@@ -1172,26 +1165,6 @@ fail:
1148 + }
1149 +
1150 + /**
1151 +- * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
1152 +- * current context.
1153 +- * @chan: channel
1154 +- *
1155 +- * Synchronizes the DMA channel termination to the current context. When this
1156 +- * function returns it is guaranteed that all transfers for previously issued
1157 +- * descriptors have stopped and and it is safe to free the memory associated
1158 +- * with them. Furthermore it is guaranteed that all complete callback functions
1159 +- * for a previously submitted descriptor have finished running and it is safe to
1160 +- * free resources accessed from within the complete callbacks.
1161 +- */
1162 +-static void ep93xx_dma_synchronize(struct dma_chan *chan)
1163 +-{
1164 +- struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1165 +-
1166 +- if (edmac->edma->hw_synchronize)
1167 +- edmac->edma->hw_synchronize(edmac);
1168 +-}
1169 +-
1170 +-/**
1171 + * ep93xx_dma_terminate_all - terminate all transactions
1172 + * @chan: channel
1173 + *
1174 +@@ -1354,7 +1327,6 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
1175 + dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1176 + dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
1177 + dma_dev->device_config = ep93xx_dma_slave_config;
1178 +- dma_dev->device_synchronize = ep93xx_dma_synchronize;
1179 + dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
1180 + dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1181 + dma_dev->device_tx_status = ep93xx_dma_tx_status;
1182 +@@ -1372,7 +1344,6 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
1183 + } else {
1184 + dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1185 +
1186 +- edma->hw_synchronize = m2p_hw_synchronize;
1187 + edma->hw_setup = m2p_hw_setup;
1188 + edma->hw_shutdown = m2p_hw_shutdown;
1189 + edma->hw_submit = m2p_hw_submit;
1190 +diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
1191 +index d33ea7ff8614..20e26b3a5a3d 100644
1192 +--- a/drivers/gpu/drm/ast/ast_drv.h
1193 ++++ b/drivers/gpu/drm/ast/ast_drv.h
1194 +@@ -113,7 +113,11 @@ struct ast_private {
1195 + struct ttm_bo_kmap_obj cache_kmap;
1196 + int next_cursor;
1197 + bool support_wide_screen;
1198 +- bool DisableP2A;
1199 ++ enum {
1200 ++ ast_use_p2a,
1201 ++ ast_use_dt,
1202 ++ ast_use_defaults
1203 ++ } config_mode;
1204 +
1205 + enum ast_tx_chip tx_chip_type;
1206 + u8 dp501_maxclk;
1207 +diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
1208 +index dacfe512a93f..fd9a738ff882 100644
1209 +--- a/drivers/gpu/drm/ast/ast_main.c
1210 ++++ b/drivers/gpu/drm/ast/ast_main.c
1211 +@@ -62,13 +62,84 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast,
1212 + return ret;
1213 + }
1214 +
1215 ++static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
1216 ++{
1217 ++ struct device_node *np = dev->pdev->dev.of_node;
1218 ++ struct ast_private *ast = dev->dev_private;
1219 ++ uint32_t data, jregd0, jregd1;
1220 ++
1221 ++ /* Defaults */
1222 ++ ast->config_mode = ast_use_defaults;
1223 ++ *scu_rev = 0xffffffff;
1224 ++
1225 ++ /* Check if we have device-tree properties */
1226 ++ if (np && !of_property_read_u32(np, "aspeed,scu-revision-id",
1227 ++ scu_rev)) {
1228 ++ /* We do, disable P2A access */
1229 ++ ast->config_mode = ast_use_dt;
1230 ++ DRM_INFO("Using device-tree for configuration\n");
1231 ++ return;
1232 ++ }
1233 ++
1234 ++ /* Not all families have a P2A bridge */
1235 ++ if (dev->pdev->device != PCI_CHIP_AST2000)
1236 ++ return;
1237 ++
1238 ++ /*
1239 ++ * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge
1240 ++ * is disabled. We force using P2A if VGA only mode bit
1241 ++ * is set D[7]
1242 ++ */
1243 ++ jregd0 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
1244 ++ jregd1 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
1245 ++ if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) {
1246 ++ /* Double check it's actually working */
1247 ++ data = ast_read32(ast, 0xf004);
1248 ++ if (data != 0xFFFFFFFF) {
1249 ++ /* P2A works, grab silicon revision */
1250 ++ ast->config_mode = ast_use_p2a;
1251 ++
1252 ++ DRM_INFO("Using P2A bridge for configuration\n");
1253 ++
1254 ++ /* Read SCU7c (silicon revision register) */
1255 ++ ast_write32(ast, 0xf004, 0x1e6e0000);
1256 ++ ast_write32(ast, 0xf000, 0x1);
1257 ++ *scu_rev = ast_read32(ast, 0x1207c);
1258 ++ return;
1259 ++ }
1260 ++ }
1261 ++
1262 ++ /* We have a P2A bridge but it's disabled */
1263 ++ DRM_INFO("P2A bridge disabled, using default configuration\n");
1264 ++}
1265 +
1266 + static int ast_detect_chip(struct drm_device *dev, bool *need_post)
1267 + {
1268 + struct ast_private *ast = dev->dev_private;
1269 +- uint32_t data, jreg;
1270 ++ uint32_t jreg, scu_rev;
1271 ++
1272 ++ /*
1273 ++ * If VGA isn't enabled, we need to enable now or subsequent
1274 ++ * access to the scratch registers will fail. We also inform
1275 ++ * our caller that it needs to POST the chip
1276 ++ * (Assumption: VGA not enabled -> need to POST)
1277 ++ */
1278 ++ if (!ast_is_vga_enabled(dev)) {
1279 ++ ast_enable_vga(dev);
1280 ++ DRM_INFO("VGA not enabled on entry, requesting chip POST\n");
1281 ++ *need_post = true;
1282 ++ } else
1283 ++ *need_post = false;
1284 ++
1285 ++
1286 ++ /* Enable extended register access */
1287 ++ ast_enable_mmio(dev);
1288 + ast_open_key(ast);
1289 +
1290 ++ /* Find out whether P2A works or whether to use device-tree */
1291 ++ ast_detect_config_mode(dev, &scu_rev);
1292 ++
1293 ++ /* Identify chipset */
1294 + if (dev->pdev->device == PCI_CHIP_AST1180) {
1295 + ast->chip = AST1100;
1296 + DRM_INFO("AST 1180 detected\n");
1297 +@@ -80,12 +151,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
1298 + ast->chip = AST2300;
1299 + DRM_INFO("AST 2300 detected\n");
1300 + } else if (dev->pdev->revision >= 0x10) {
1301 +- uint32_t data;
1302 +- ast_write32(ast, 0xf004, 0x1e6e0000);
1303 +- ast_write32(ast, 0xf000, 0x1);
1304 +-
1305 +- data = ast_read32(ast, 0x1207c);
1306 +- switch (data & 0x0300) {
1307 ++ switch (scu_rev & 0x0300) {
1308 + case 0x0200:
1309 + ast->chip = AST1100;
1310 + DRM_INFO("AST 1100 detected\n");
1311 +@@ -110,26 +176,6 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
1312 + }
1313 + }
1314 +
1315 +- /*
1316 +- * If VGA isn't enabled, we need to enable now or subsequent
1317 +- * access to the scratch registers will fail. We also inform
1318 +- * our caller that it needs to POST the chip
1319 +- * (Assumption: VGA not enabled -> need to POST)
1320 +- */
1321 +- if (!ast_is_vga_enabled(dev)) {
1322 +- ast_enable_vga(dev);
1323 +- ast_enable_mmio(dev);
1324 +- DRM_INFO("VGA not enabled on entry, requesting chip POST\n");
1325 +- *need_post = true;
1326 +- } else
1327 +- *need_post = false;
1328 +-
1329 +- /* Check P2A Access */
1330 +- ast->DisableP2A = true;
1331 +- data = ast_read32(ast, 0xf004);
1332 +- if (data != 0xFFFFFFFF)
1333 +- ast->DisableP2A = false;
1334 +-
1335 + /* Check if we support wide screen */
1336 + switch (ast->chip) {
1337 + case AST1180:
1338 +@@ -146,17 +192,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
1339 + ast->support_wide_screen = true;
1340 + else {
1341 + ast->support_wide_screen = false;
1342 +- if (ast->DisableP2A == false) {
1343 +- /* Read SCU7c (silicon revision register) */
1344 +- ast_write32(ast, 0xf004, 0x1e6e0000);
1345 +- ast_write32(ast, 0xf000, 0x1);
1346 +- data = ast_read32(ast, 0x1207c);
1347 +- data &= 0x300;
1348 +- if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
1349 +- ast->support_wide_screen = true;
1350 +- if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
1351 +- ast->support_wide_screen = true;
1352 +- }
1353 ++ if (ast->chip == AST2300 &&
1354 ++ (scu_rev & 0x300) == 0x0) /* ast1300 */
1355 ++ ast->support_wide_screen = true;
1356 ++ if (ast->chip == AST2400 &&
1357 ++ (scu_rev & 0x300) == 0x100) /* ast1400 */
1358 ++ ast->support_wide_screen = true;
1359 + }
1360 + break;
1361 + }
1362 +@@ -220,85 +261,102 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
1363 +
1364 + static int ast_get_dram_info(struct drm_device *dev)
1365 + {
1366 ++ struct device_node *np = dev->pdev->dev.of_node;
1367 + struct ast_private *ast = dev->dev_private;
1368 +- uint32_t data, data2;
1369 +- uint32_t denum, num, div, ref_pll;
1370 ++ uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap;
1371 ++ uint32_t denum, num, div, ref_pll, dsel;
1372 +
1373 +- if (ast->DisableP2A)
1374 +- {
1375 ++ switch (ast->config_mode) {
1376 ++ case ast_use_dt:
1377 ++ /*
1378 ++ * If some properties are missing, use reasonable
1379 ++ * defaults for AST2400
1380 ++ */
1381 ++ if (of_property_read_u32(np, "aspeed,mcr-configuration",
1382 ++ &mcr_cfg))
1383 ++ mcr_cfg = 0x00000577;
1384 ++ if (of_property_read_u32(np, "aspeed,mcr-scu-mpll",
1385 ++ &mcr_scu_mpll))
1386 ++ mcr_scu_mpll = 0x000050C0;
1387 ++ if (of_property_read_u32(np, "aspeed,mcr-scu-strap",
1388 ++ &mcr_scu_strap))
1389 ++ mcr_scu_strap = 0;
1390 ++ break;
1391 ++ case ast_use_p2a:
1392 ++ ast_write32(ast, 0xf004, 0x1e6e0000);
1393 ++ ast_write32(ast, 0xf000, 0x1);
1394 ++ mcr_cfg = ast_read32(ast, 0x10004);
1395 ++ mcr_scu_mpll = ast_read32(ast, 0x10120);
1396 ++ mcr_scu_strap = ast_read32(ast, 0x10170);
1397 ++ break;
1398 ++ case ast_use_defaults:
1399 ++ default:
1400 + ast->dram_bus_width = 16;
1401 + ast->dram_type = AST_DRAM_1Gx16;
1402 + ast->mclk = 396;
1403 ++ return 0;
1404 + }
1405 +- else
1406 +- {
1407 +- ast_write32(ast, 0xf004, 0x1e6e0000);
1408 +- ast_write32(ast, 0xf000, 0x1);
1409 +- data = ast_read32(ast, 0x10004);
1410 +-
1411 +- if (data & 0x40)
1412 +- ast->dram_bus_width = 16;
1413 +- else
1414 +- ast->dram_bus_width = 32;
1415 +
1416 +- if (ast->chip == AST2300 || ast->chip == AST2400) {
1417 +- switch (data & 0x03) {
1418 +- case 0:
1419 +- ast->dram_type = AST_DRAM_512Mx16;
1420 +- break;
1421 +- default:
1422 +- case 1:
1423 +- ast->dram_type = AST_DRAM_1Gx16;
1424 +- break;
1425 +- case 2:
1426 +- ast->dram_type = AST_DRAM_2Gx16;
1427 +- break;
1428 +- case 3:
1429 +- ast->dram_type = AST_DRAM_4Gx16;
1430 +- break;
1431 +- }
1432 +- } else {
1433 +- switch (data & 0x0c) {
1434 +- case 0:
1435 +- case 4:
1436 +- ast->dram_type = AST_DRAM_512Mx16;
1437 +- break;
1438 +- case 8:
1439 +- if (data & 0x40)
1440 +- ast->dram_type = AST_DRAM_1Gx16;
1441 +- else
1442 +- ast->dram_type = AST_DRAM_512Mx32;
1443 +- break;
1444 +- case 0xc:
1445 +- ast->dram_type = AST_DRAM_1Gx32;
1446 +- break;
1447 +- }
1448 +- }
1449 ++ if (mcr_cfg & 0x40)
1450 ++ ast->dram_bus_width = 16;
1451 ++ else
1452 ++ ast->dram_bus_width = 32;
1453 +
1454 +- data = ast_read32(ast, 0x10120);
1455 +- data2 = ast_read32(ast, 0x10170);
1456 +- if (data2 & 0x2000)
1457 +- ref_pll = 14318;
1458 +- else
1459 +- ref_pll = 12000;
1460 +-
1461 +- denum = data & 0x1f;
1462 +- num = (data & 0x3fe0) >> 5;
1463 +- data = (data & 0xc000) >> 14;
1464 +- switch (data) {
1465 +- case 3:
1466 +- div = 0x4;
1467 ++ if (ast->chip == AST2300 || ast->chip == AST2400) {
1468 ++ switch (mcr_cfg & 0x03) {
1469 ++ case 0:
1470 ++ ast->dram_type = AST_DRAM_512Mx16;
1471 + break;
1472 +- case 2:
1473 ++ default:
1474 + case 1:
1475 +- div = 0x2;
1476 ++ ast->dram_type = AST_DRAM_1Gx16;
1477 + break;
1478 +- default:
1479 +- div = 0x1;
1480 ++ case 2:
1481 ++ ast->dram_type = AST_DRAM_2Gx16;
1482 ++ break;
1483 ++ case 3:
1484 ++ ast->dram_type = AST_DRAM_4Gx16;
1485 ++ break;
1486 ++ }
1487 ++ } else {
1488 ++ switch (mcr_cfg & 0x0c) {
1489 ++ case 0:
1490 ++ case 4:
1491 ++ ast->dram_type = AST_DRAM_512Mx16;
1492 ++ break;
1493 ++ case 8:
1494 ++ if (mcr_cfg & 0x40)
1495 ++ ast->dram_type = AST_DRAM_1Gx16;
1496 ++ else
1497 ++ ast->dram_type = AST_DRAM_512Mx32;
1498 ++ break;
1499 ++ case 0xc:
1500 ++ ast->dram_type = AST_DRAM_1Gx32;
1501 + break;
1502 + }
1503 +- ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
1504 + }
1505 ++
1506 ++ if (mcr_scu_strap & 0x2000)
1507 ++ ref_pll = 14318;
1508 ++ else
1509 ++ ref_pll = 12000;
1510 ++
1511 ++ denum = mcr_scu_mpll & 0x1f;
1512 ++ num = (mcr_scu_mpll & 0x3fe0) >> 5;
1513 ++ dsel = (mcr_scu_mpll & 0xc000) >> 14;
1514 ++ switch (dsel) {
1515 ++ case 3:
1516 ++ div = 0x4;
1517 ++ break;
1518 ++ case 2:
1519 ++ case 1:
1520 ++ div = 0x2;
1521 ++ break;
1522 ++ default:
1523 ++ div = 0x1;
1524 ++ break;
1525 ++ }
1526 ++ ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
1527 + return 0;
1528 + }
1529 +
1530 +diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
1531 +index 270e8fb2803f..c7c58becb25d 100644
1532 +--- a/drivers/gpu/drm/ast/ast_post.c
1533 ++++ b/drivers/gpu/drm/ast/ast_post.c
1534 +@@ -375,17 +375,14 @@ void ast_post_gpu(struct drm_device *dev)
1535 + ast_enable_mmio(dev);
1536 + ast_set_def_ext_reg(dev);
1537 +
1538 +- if (ast->DisableP2A == false)
1539 +- {
1540 ++ if (ast->config_mode == ast_use_p2a) {
1541 + if (ast->chip == AST2300 || ast->chip == AST2400)
1542 + ast_init_dram_2300(dev);
1543 + else
1544 + ast_init_dram_reg(dev);
1545 +
1546 + ast_init_3rdtx(dev);
1547 +- }
1548 +- else
1549 +- {
1550 ++ } else {
1551 + if (ast->tx_chip_type != AST_TX_NONE)
1552 + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */
1553 + }
1554 +diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
1555 +index a9b01bcf7d0a..fcecaf5b5526 100644
1556 +--- a/drivers/gpu/drm/radeon/radeon_combios.c
1557 ++++ b/drivers/gpu/drm/radeon/radeon_combios.c
1558 +@@ -3394,6 +3394,13 @@ void radeon_combios_asic_init(struct drm_device *dev)
1559 + rdev->pdev->subsystem_vendor == 0x103c &&
1560 + rdev->pdev->subsystem_device == 0x280a)
1561 + return;
1562 ++ /* quirk for rs4xx Toshiba Sattellite L20-183 latop to make it resume
1563 ++ * - it hangs on resume inside the dynclk 1 table.
1564 ++ */
1565 ++ if (rdev->family == CHIP_RS400 &&
1566 ++ rdev->pdev->subsystem_vendor == 0x1179 &&
1567 ++ rdev->pdev->subsystem_device == 0xff31)
1568 ++ return;
1569 +
1570 + /* DYN CLK 1 */
1571 + table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
1572 +diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
1573 +index 83b3eb2e444a..3c74c60fb8ea 100644
1574 +--- a/drivers/gpu/drm/radeon/radeon_device.c
1575 ++++ b/drivers/gpu/drm/radeon/radeon_device.c
1576 +@@ -127,6 +127,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
1577 + * https://bugzilla.kernel.org/show_bug.cgi?id=51381
1578 + */
1579 + { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
1580 ++ /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
1581 ++ * https://bugs.freedesktop.org/show_bug.cgi?id=101491
1582 ++ */
1583 ++ { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
1584 + /* macbook pro 8.2 */
1585 + { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
1586 + { 0, 0, 0, 0, 0 },
1587 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
1588 +index 21e9b7f8dad0..c3b8ebac18c2 100644
1589 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
1590 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
1591 +@@ -317,6 +317,7 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)
1592 + list_for_each_entry_safe(entry, next, &man->list, head)
1593 + vmw_cmdbuf_res_free(man, entry);
1594 +
1595 ++ drm_ht_remove(&man->resources);
1596 + kfree(man);
1597 + }
1598 +
1599 +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
1600 +index bf039dbaa7eb..07a963039b60 100644
1601 +--- a/drivers/hid/hid-core.c
1602 ++++ b/drivers/hid/hid-core.c
1603 +@@ -1228,6 +1228,7 @@ static void hid_input_field(struct hid_device *hid, struct hid_field *field,
1604 + /* Ignore report if ErrorRollOver */
1605 + if (!(field->flags & HID_MAIN_ITEM_VARIABLE) &&
1606 + value[n] >= min && value[n] <= max &&
1607 ++ value[n] - min < field->maxusage &&
1608 + field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1)
1609 + goto exit;
1610 + }
1611 +@@ -1240,11 +1241,13 @@ static void hid_input_field(struct hid_device *hid, struct hid_field *field,
1612 + }
1613 +
1614 + if (field->value[n] >= min && field->value[n] <= max
1615 ++ && field->value[n] - min < field->maxusage
1616 + && field->usage[field->value[n] - min].hid
1617 + && search(value, field->value[n], count))
1618 + hid_process_event(hid, field, &field->usage[field->value[n] - min], 0, interrupt);
1619 +
1620 + if (value[n] >= min && value[n] <= max
1621 ++ && value[n] - min < field->maxusage
1622 + && field->usage[value[n] - min].hid
1623 + && search(field->value, value[n], count))
1624 + hid_process_event(hid, field, &field->usage[value[n] - min], 1, interrupt);
1625 +diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
1626 +index c4c9d9523694..1ec738292a1a 100644
1627 +--- a/drivers/hid/i2c-hid/i2c-hid.c
1628 ++++ b/drivers/hid/i2c-hid/i2c-hid.c
1629 +@@ -362,6 +362,15 @@ static int i2c_hid_hwreset(struct i2c_client *client)
1630 + if (ret)
1631 + return ret;
1632 +
1633 ++ /*
1634 ++ * The HID over I2C specification states that if a DEVICE needs time
1635 ++ * after the PWR_ON request, it should utilise CLOCK stretching.
1636 ++ * However, it has been observered that the Windows driver provides a
1637 ++ * 1ms sleep between the PWR_ON and RESET requests and that some devices
1638 ++ * rely on this.
1639 ++ */
1640 ++ usleep_range(1000, 5000);
1641 ++
1642 + i2c_hid_dbg(ihid, "resetting...\n");
1643 +
1644 + ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0);
1645 +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
1646 +index 785570272505..1f40cdc1b357 100644
1647 +--- a/drivers/input/serio/i8042-x86ia64io.h
1648 ++++ b/drivers/input/serio/i8042-x86ia64io.h
1649 +@@ -698,6 +698,13 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
1650 + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"),
1651 + },
1652 + },
1653 ++ {
1654 ++ /* Fujitsu UH554 laptop */
1655 ++ .matches = {
1656 ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1657 ++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"),
1658 ++ },
1659 ++ },
1660 + { }
1661 + };
1662 +
1663 +diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
1664 +index 45087c3e5c57..d29c499375cb 100644
1665 +--- a/drivers/iommu/amd_iommu_v2.c
1666 ++++ b/drivers/iommu/amd_iommu_v2.c
1667 +@@ -675,9 +675,9 @@ out_clear_state:
1668 +
1669 + out_unregister:
1670 + mmu_notifier_unregister(&pasid_state->mn, mm);
1671 ++ mmput(mm);
1672 +
1673 + out_free:
1674 +- mmput(mm);
1675 + free_pasid_state(pasid_state);
1676 +
1677 + out:
1678 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
1679 +index b85a8614c128..275f59071f56 100644
1680 +--- a/drivers/iommu/intel-iommu.c
1681 ++++ b/drivers/iommu/intel-iommu.c
1682 +@@ -965,7 +965,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level,
1683 + if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1684 + goto next;
1685 +
1686 +- level_pfn = pfn & level_mask(level - 1);
1687 ++ level_pfn = pfn & level_mask(level);
1688 + level_pte = phys_to_virt(dma_pte_addr(pte));
1689 +
1690 + if (level > 2)
1691 +diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
1692 +index f1b15a0b3774..9976c37b9c64 100644
1693 +--- a/drivers/irqchip/irq-gic-v3.c
1694 ++++ b/drivers/irqchip/irq-gic-v3.c
1695 +@@ -612,6 +612,9 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1696 + int enabled;
1697 + u64 val;
1698 +
1699 ++ if (cpu >= nr_cpu_ids)
1700 ++ return -EINVAL;
1701 ++
1702 + if (gic_irq_in_rdist(d))
1703 + return -EINVAL;
1704 +
1705 +diff --git a/drivers/md/md.c b/drivers/md/md.c
1706 +index 7453c3ed4b8f..1fdcd5735418 100644
1707 +--- a/drivers/md/md.c
1708 ++++ b/drivers/md/md.c
1709 +@@ -1852,7 +1852,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1710 + }
1711 + sb = page_address(rdev->sb_page);
1712 + sb->data_size = cpu_to_le64(num_sectors);
1713 +- sb->super_offset = rdev->sb_start;
1714 ++ sb->super_offset = cpu_to_le64(rdev->sb_start);
1715 + sb->sb_csum = calc_sb_1_csum(sb);
1716 + md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1717 + rdev->sb_page);
1718 +diff --git a/drivers/media/pci/saa7134/saa7134-i2c.c b/drivers/media/pci/saa7134/saa7134-i2c.c
1719 +index f4da674e7f26..ae651007ee0f 100644
1720 +--- a/drivers/media/pci/saa7134/saa7134-i2c.c
1721 ++++ b/drivers/media/pci/saa7134/saa7134-i2c.c
1722 +@@ -350,12 +350,43 @@ static struct i2c_client saa7134_client_template = {
1723 +
1724 + /* ----------------------------------------------------------- */
1725 +
1726 ++/* On Medion 7134 reading EEPROM needs DVB-T demod i2c gate open */
1727 ++static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev)
1728 ++{
1729 ++ u8 subaddr = 0x7, dmdregval;
1730 ++ u8 data[2];
1731 ++ int ret;
1732 ++ struct i2c_msg i2cgatemsg_r[] = { {.addr = 0x08, .flags = 0,
1733 ++ .buf = &subaddr, .len = 1},
1734 ++ {.addr = 0x08,
1735 ++ .flags = I2C_M_RD,
1736 ++ .buf = &dmdregval, .len = 1}
1737 ++ };
1738 ++ struct i2c_msg i2cgatemsg_w[] = { {.addr = 0x08, .flags = 0,
1739 ++ .buf = data, .len = 2} };
1740 ++
1741 ++ ret = i2c_transfer(&dev->i2c_adap, i2cgatemsg_r, 2);
1742 ++ if ((ret == 2) && (dmdregval & 0x2)) {
1743 ++ pr_debug("%s: DVB-T demod i2c gate was left closed\n",
1744 ++ dev->name);
1745 ++
1746 ++ data[0] = subaddr;
1747 ++ data[1] = (dmdregval & ~0x2);
1748 ++ if (i2c_transfer(&dev->i2c_adap, i2cgatemsg_w, 1) != 1)
1749 ++ pr_err("%s: EEPROM i2c gate open failure\n",
1750 ++ dev->name);
1751 ++ }
1752 ++}
1753 ++
1754 + static int
1755 + saa7134_i2c_eeprom(struct saa7134_dev *dev, unsigned char *eedata, int len)
1756 + {
1757 + unsigned char buf;
1758 + int i,err;
1759 +
1760 ++ if (dev->board == SAA7134_BOARD_MD7134)
1761 ++ saa7134_i2c_eeprom_md7134_gate(dev);
1762 ++
1763 + dev->i2c_client.addr = 0xa0 >> 1;
1764 + buf = 0;
1765 + if (1 != (err = i2c_master_send(&dev->i2c_client,&buf,1))) {
1766 +diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
1767 +index 5abab8800891..9190057535e6 100644
1768 +--- a/drivers/mtd/bcm47xxpart.c
1769 ++++ b/drivers/mtd/bcm47xxpart.c
1770 +@@ -66,11 +66,13 @@ static const char *bcm47xxpart_trx_data_part_name(struct mtd_info *master,
1771 + {
1772 + uint32_t buf;
1773 + size_t bytes_read;
1774 ++ int err;
1775 +
1776 +- if (mtd_read(master, offset, sizeof(buf), &bytes_read,
1777 +- (uint8_t *)&buf) < 0) {
1778 +- pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
1779 +- offset);
1780 ++ err = mtd_read(master, offset, sizeof(buf), &bytes_read,
1781 ++ (uint8_t *)&buf);
1782 ++ if (err && !mtd_is_bitflip(err)) {
1783 ++ pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
1784 ++ offset, err);
1785 + goto out_default;
1786 + }
1787 +
1788 +@@ -95,6 +97,7 @@ static int bcm47xxpart_parse(struct mtd_info *master,
1789 + int trx_part = -1;
1790 + int last_trx_part = -1;
1791 + int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, };
1792 ++ int err;
1793 +
1794 + /*
1795 + * Some really old flashes (like AT45DB*) had smaller erasesize-s, but
1796 +@@ -118,8 +121,8 @@ static int bcm47xxpart_parse(struct mtd_info *master,
1797 + /* Parse block by block looking for magics */
1798 + for (offset = 0; offset <= master->size - blocksize;
1799 + offset += blocksize) {
1800 +- /* Nothing more in higher memory */
1801 +- if (offset >= 0x2000000)
1802 ++ /* Nothing more in higher memory on BCM47XX (MIPS) */
1803 ++ if (config_enabled(CONFIG_BCM47XX) && offset >= 0x2000000)
1804 + break;
1805 +
1806 + if (curr_part >= BCM47XXPART_MAX_PARTS) {
1807 +@@ -128,10 +131,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
1808 + }
1809 +
1810 + /* Read beginning of the block */
1811 +- if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
1812 +- &bytes_read, (uint8_t *)buf) < 0) {
1813 +- pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
1814 +- offset);
1815 ++ err = mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
1816 ++ &bytes_read, (uint8_t *)buf);
1817 ++ if (err && !mtd_is_bitflip(err)) {
1818 ++ pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
1819 ++ offset, err);
1820 + continue;
1821 + }
1822 +
1823 +@@ -252,10 +256,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
1824 + }
1825 +
1826 + /* Read middle of the block */
1827 +- if (mtd_read(master, offset + 0x8000, 0x4,
1828 +- &bytes_read, (uint8_t *)buf) < 0) {
1829 +- pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
1830 +- offset);
1831 ++ err = mtd_read(master, offset + 0x8000, 0x4, &bytes_read,
1832 ++ (uint8_t *)buf);
1833 ++ if (err && !mtd_is_bitflip(err)) {
1834 ++ pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
1835 ++ offset, err);
1836 + continue;
1837 + }
1838 +
1839 +@@ -275,10 +280,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
1840 + }
1841 +
1842 + offset = master->size - possible_nvram_sizes[i];
1843 +- if (mtd_read(master, offset, 0x4, &bytes_read,
1844 +- (uint8_t *)buf) < 0) {
1845 +- pr_err("mtd_read error while reading at offset 0x%X!\n",
1846 +- offset);
1847 ++ err = mtd_read(master, offset, 0x4, &bytes_read,
1848 ++ (uint8_t *)buf);
1849 ++ if (err && !mtd_is_bitflip(err)) {
1850 ++ pr_err("mtd_read error while reading (offset 0x%X): %d\n",
1851 ++ offset, err);
1852 + continue;
1853 + }
1854 +
1855 +diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
1856 +index 14a5d2325dac..ac2d68d6d446 100644
1857 +--- a/drivers/mtd/spi-nor/spi-nor.c
1858 ++++ b/drivers/mtd/spi-nor/spi-nor.c
1859 +@@ -900,6 +900,13 @@ static int spansion_quad_enable(struct spi_nor *nor)
1860 + return -EINVAL;
1861 + }
1862 +
1863 ++ ret = spi_nor_wait_till_ready(nor);
1864 ++ if (ret) {
1865 ++ dev_err(nor->dev,
1866 ++ "timeout while writing configuration register\n");
1867 ++ return ret;
1868 ++ }
1869 ++
1870 + /* read back and check it */
1871 + ret = read_cr(nor);
1872 + if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
1873 +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
1874 +index 21d9497518fd..8b3c60b1f486 100644
1875 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
1876 ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
1877 +@@ -2779,8 +2779,10 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
1878 +
1879 + /* Flush Tx queues */
1880 + ret = xgbe_flush_tx_queues(pdata);
1881 +- if (ret)
1882 ++ if (ret) {
1883 ++ netdev_err(pdata->netdev, "error flushing TX queues\n");
1884 + return ret;
1885 ++ }
1886 +
1887 + /*
1888 + * Initialize DMA related features
1889 +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
1890 +index 9fd6c69a8bac..eea5b58496a3 100644
1891 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
1892 ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
1893 +@@ -951,7 +951,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
1894 +
1895 + DBGPR("-->xgbe_start\n");
1896 +
1897 +- hw_if->init(pdata);
1898 ++ ret = hw_if->init(pdata);
1899 ++ if (ret)
1900 ++ return ret;
1901 +
1902 + phy_start(pdata->phydev);
1903 +
1904 +diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
1905 +index 21e3c38c7c75..6f0aad85c524 100644
1906 +--- a/drivers/net/ethernet/broadcom/bgmac.c
1907 ++++ b/drivers/net/ethernet/broadcom/bgmac.c
1908 +@@ -255,15 +255,16 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
1909 + while (ring->start != ring->end) {
1910 + int slot_idx = ring->start % BGMAC_TX_RING_SLOTS;
1911 + struct bgmac_slot_info *slot = &ring->slots[slot_idx];
1912 +- u32 ctl1;
1913 ++ u32 ctl0, ctl1;
1914 + int len;
1915 +
1916 + if (slot_idx == empty_slot)
1917 + break;
1918 +
1919 ++ ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0);
1920 + ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1);
1921 + len = ctl1 & BGMAC_DESC_CTL1_LEN;
1922 +- if (ctl1 & BGMAC_DESC_CTL0_SOF)
1923 ++ if (ctl0 & BGMAC_DESC_CTL0_SOF)
1924 + /* Unmap no longer used buffer */
1925 + dma_unmap_single(dma_dev, slot->dma_addr, len,
1926 + DMA_TO_DEVICE);
1927 +@@ -466,6 +467,11 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
1928 + len -= ETH_FCS_LEN;
1929 +
1930 + skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE);
1931 ++ if (unlikely(!skb)) {
1932 ++ bgmac_err(bgmac, "build_skb failed\n");
1933 ++ put_page(virt_to_head_page(buf));
1934 ++ break;
1935 ++ }
1936 + skb_put(skb, BGMAC_RX_FRAME_OFFSET +
1937 + BGMAC_RX_BUF_OFFSET + len);
1938 + skb_pull(skb, BGMAC_RX_FRAME_OFFSET +
1939 +@@ -1299,7 +1305,8 @@ static int bgmac_open(struct net_device *net_dev)
1940 +
1941 + phy_start(bgmac->phy_dev);
1942 +
1943 +- netif_carrier_on(net_dev);
1944 ++ netif_start_queue(net_dev);
1945 ++
1946 + return 0;
1947 + }
1948 +
1949 +@@ -1564,6 +1571,11 @@ static int bgmac_probe(struct bcma_device *core)
1950 + dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
1951 + }
1952 +
1953 ++ /* This (reset &) enable is not preset in specs or reference driver but
1954 ++ * Broadcom does it in arch PCI code when enabling fake PCI device.
1955 ++ */
1956 ++ bcma_core_enable(core, 0);
1957 ++
1958 + /* Allocation and references */
1959 + net_dev = alloc_etherdev(sizeof(*bgmac));
1960 + if (!net_dev)
1961 +diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
1962 +index c5e1d0ac75f9..8f3ef77902b9 100644
1963 +--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
1964 ++++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
1965 +@@ -1017,7 +1017,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
1966 + err:
1967 + spin_unlock_bh(&adapter->mcc_lock);
1968 +
1969 +- if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
1970 ++ if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
1971 + status = -EPERM;
1972 +
1973 + return status;
1974 +diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
1975 +index d74f5f4e5782..07eabf72c480 100644
1976 +--- a/drivers/net/ethernet/korina.c
1977 ++++ b/drivers/net/ethernet/korina.c
1978 +@@ -900,10 +900,10 @@ static void korina_restart_task(struct work_struct *work)
1979 + DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR,
1980 + &lp->rx_dma_regs->dmasm);
1981 +
1982 +- korina_free_ring(dev);
1983 +-
1984 + napi_disable(&lp->napi);
1985 +
1986 ++ korina_free_ring(dev);
1987 ++
1988 + if (korina_init(dev) < 0) {
1989 + printk(KERN_ERR "%s: cannot restart device\n", dev->name);
1990 + return;
1991 +@@ -1064,12 +1064,12 @@ static int korina_close(struct net_device *dev)
1992 + tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR;
1993 + writel(tmp, &lp->rx_dma_regs->dmasm);
1994 +
1995 +- korina_free_ring(dev);
1996 +-
1997 + napi_disable(&lp->napi);
1998 +
1999 + cancel_work_sync(&lp->restart_task);
2000 +
2001 ++ korina_free_ring(dev);
2002 ++
2003 + free_irq(lp->rx_irq, dev);
2004 + free_irq(lp->tx_irq, dev);
2005 + free_irq(lp->ovr_irq, dev);
2006 +diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
2007 +index 337811d208bd..fdc129151b18 100644
2008 +--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
2009 ++++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
2010 +@@ -514,8 +514,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
2011 + break;
2012 +
2013 + case MLX4_EVENT_TYPE_SRQ_LIMIT:
2014 +- mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
2015 +- __func__);
2016 ++ mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n",
2017 ++ __func__, be32_to_cpu(eqe->event.srq.srqn),
2018 ++ eq->eqn);
2019 + case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
2020 + if (mlx4_is_master(dev)) {
2021 + /* forward only to slave owning the SRQ */
2022 +@@ -530,15 +531,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
2023 + eq->eqn, eq->cons_index, ret);
2024 + break;
2025 + }
2026 +- mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
2027 +- __func__, slave,
2028 +- be32_to_cpu(eqe->event.srq.srqn),
2029 +- eqe->type, eqe->subtype);
2030 ++ if (eqe->type ==
2031 ++ MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
2032 ++ mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
2033 ++ __func__, slave,
2034 ++ be32_to_cpu(eqe->event.srq.srqn),
2035 ++ eqe->type, eqe->subtype);
2036 +
2037 + if (!ret && slave != dev->caps.function) {
2038 +- mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
2039 +- __func__, eqe->type,
2040 +- eqe->subtype, slave);
2041 ++ if (eqe->type ==
2042 ++ MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
2043 ++ mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
2044 ++ __func__, eqe->type,
2045 ++ eqe->subtype, slave);
2046 + mlx4_slave_event(dev, slave, eqe);
2047 + break;
2048 + }
2049 +diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
2050 +index c56cf0b86f2c..09e3e0d8412e 100644
2051 +--- a/drivers/net/phy/dp83640.c
2052 ++++ b/drivers/net/phy/dp83640.c
2053 +@@ -895,7 +895,7 @@ static void decode_txts(struct dp83640_private *dp83640,
2054 + if (overflow) {
2055 + pr_debug("tx timestamp queue overflow, count %d\n", overflow);
2056 + while (skb) {
2057 +- skb_complete_tx_timestamp(skb, NULL);
2058 ++ kfree_skb(skb);
2059 + skb = skb_dequeue(&dp83640->tx_queue);
2060 + }
2061 + return;
2062 +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
2063 +index ef36e8c70b4d..64ca961bca18 100644
2064 +--- a/drivers/net/virtio_net.c
2065 ++++ b/drivers/net/virtio_net.c
2066 +@@ -47,8 +47,16 @@ module_param(gso, bool, 0444);
2067 + */
2068 + #define RECEIVE_AVG_WEIGHT 64
2069 +
2070 ++/* With mergeable buffers we align buffer address and use the low bits to
2071 ++ * encode its true size. Buffer size is up to 1 page so we need to align to
2072 ++ * square root of page size to ensure we reserve enough bits to encode the true
2073 ++ * size.
2074 ++ */
2075 ++#define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2)
2076 ++
2077 + /* Minimum alignment for mergeable packet buffers. */
2078 +-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
2079 ++#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \
2080 ++ 1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT)
2081 +
2082 + #define VIRTNET_DRIVER_VERSION "1.0.0"
2083 +
2084 +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
2085 +index d9e873c3a273..422a9379a644 100644
2086 +--- a/drivers/net/vxlan.c
2087 ++++ b/drivers/net/vxlan.c
2088 +@@ -2136,7 +2136,7 @@ static void vxlan_cleanup(unsigned long arg)
2089 + = container_of(p, struct vxlan_fdb, hlist);
2090 + unsigned long timeout;
2091 +
2092 +- if (f->state & NUD_PERMANENT)
2093 ++ if (f->state & (NUD_PERMANENT | NUD_NOARP))
2094 + continue;
2095 +
2096 + timeout = f->used + vxlan->age_interval * HZ;
2097 +diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
2098 +index 8a15ebbce4a3..c304b66af5c6 100644
2099 +--- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
2100 ++++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
2101 +@@ -4384,6 +4384,11 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
2102 + cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true,
2103 + GFP_KERNEL);
2104 + } else if (ieee80211_is_action(mgmt->frame_control)) {
2105 ++ if (len > BRCMF_FIL_ACTION_FRAME_SIZE + DOT11_MGMT_HDR_LEN) {
2106 ++ brcmf_err("invalid action frame length\n");
2107 ++ err = -EINVAL;
2108 ++ goto exit;
2109 ++ }
2110 + af_params = kzalloc(sizeof(*af_params), GFP_KERNEL);
2111 + if (af_params == NULL) {
2112 + brcmf_err("unable to allocate frame\n");
2113 +diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
2114 +index eafaeb01aa3e..cdbad7d72afa 100644
2115 +--- a/drivers/net/wireless/mac80211_hwsim.c
2116 ++++ b/drivers/net/wireless/mac80211_hwsim.c
2117 +@@ -2538,7 +2538,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
2118 +
2119 + tasklet_hrtimer_init(&data->beacon_timer,
2120 + mac80211_hwsim_beacon,
2121 +- CLOCK_MONOTONIC_RAW, HRTIMER_MODE_ABS);
2122 ++ CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2123 +
2124 + spin_lock_bh(&hwsim_radio_lock);
2125 + list_add_tail(&data->list, &hwsim_radios);
2126 +diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
2127 +index ea1be52f5515..8a38a5bd34b8 100644
2128 +--- a/drivers/net/xen-netfront.c
2129 ++++ b/drivers/net/xen-netfront.c
2130 +@@ -305,7 +305,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
2131 + queue->rx_skbs[id] = skb;
2132 +
2133 + ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
2134 +- BUG_ON((signed short)ref < 0);
2135 ++ WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
2136 + queue->grant_rx_ref[id] = ref;
2137 +
2138 + pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
2139 +@@ -323,7 +323,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
2140 + queue->rx.req_prod_pvt = req_prod;
2141 +
2142 + /* Not enough requests? Try again later. */
2143 +- if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) {
2144 ++ if (req_prod - queue->rx.sring->req_prod < NET_RX_SLOTS_MIN) {
2145 + mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
2146 + return;
2147 + }
2148 +@@ -429,7 +429,7 @@ static struct xen_netif_tx_request *xennet_make_one_txreq(
2149 + id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
2150 + tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
2151 + ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
2152 +- BUG_ON((signed short)ref < 0);
2153 ++ WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
2154 +
2155 + gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
2156 + page_to_mfn(page), GNTMAP_readonly);
2157 +diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
2158 +index 63ea1e5b1c95..bf89754fe973 100644
2159 +--- a/drivers/of/fdt.c
2160 ++++ b/drivers/of/fdt.c
2161 +@@ -618,9 +618,12 @@ int __init of_scan_flat_dt(int (*it)(unsigned long node,
2162 + const char *pathp;
2163 + int offset, rc = 0, depth = -1;
2164 +
2165 +- for (offset = fdt_next_node(blob, -1, &depth);
2166 +- offset >= 0 && depth >= 0 && !rc;
2167 +- offset = fdt_next_node(blob, offset, &depth)) {
2168 ++ if (!blob)
2169 ++ return 0;
2170 ++
2171 ++ for (offset = fdt_next_node(blob, -1, &depth);
2172 ++ offset >= 0 && depth >= 0 && !rc;
2173 ++ offset = fdt_next_node(blob, offset, &depth)) {
2174 +
2175 + pathp = fdt_get_name(blob, offset, NULL);
2176 + if (*pathp == '/')
2177 +diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
2178 +index 02ff84fcfa61..635e7c3a24ad 100644
2179 +--- a/drivers/parisc/ccio-dma.c
2180 ++++ b/drivers/parisc/ccio-dma.c
2181 +@@ -743,6 +743,8 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
2182 +
2183 + BUG_ON(!dev);
2184 + ioc = GET_IOC(dev);
2185 ++ if (!ioc)
2186 ++ return DMA_ERROR_CODE;
2187 +
2188 + BUG_ON(size <= 0);
2189 +
2190 +@@ -807,6 +809,10 @@ ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
2191 +
2192 + BUG_ON(!dev);
2193 + ioc = GET_IOC(dev);
2194 ++ if (!ioc) {
2195 ++ WARN_ON(!ioc);
2196 ++ return;
2197 ++ }
2198 +
2199 + DBG_RUN("%s() iovp 0x%lx/%x\n",
2200 + __func__, (long)iova, size);
2201 +@@ -910,6 +916,8 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
2202 +
2203 + BUG_ON(!dev);
2204 + ioc = GET_IOC(dev);
2205 ++ if (!ioc)
2206 ++ return 0;
2207 +
2208 + DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
2209 +
2210 +@@ -982,6 +990,10 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
2211 +
2212 + BUG_ON(!dev);
2213 + ioc = GET_IOC(dev);
2214 ++ if (!ioc) {
2215 ++ WARN_ON(!ioc);
2216 ++ return;
2217 ++ }
2218 +
2219 + DBG_RUN_SG("%s() START %d entries, %p,%x\n",
2220 + __func__, nents, sg_virt(sglist), sglist->length);
2221 +diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
2222 +index a0580afe1713..7b0ca1551d7b 100644
2223 +--- a/drivers/parisc/dino.c
2224 ++++ b/drivers/parisc/dino.c
2225 +@@ -154,7 +154,10 @@ struct dino_device
2226 + };
2227 +
2228 + /* Looks nice and keeps the compiler happy */
2229 +-#define DINO_DEV(d) ((struct dino_device *) d)
2230 ++#define DINO_DEV(d) ({ \
2231 ++ void *__pdata = d; \
2232 ++ BUG_ON(!__pdata); \
2233 ++ (struct dino_device *)__pdata; })
2234 +
2235 +
2236 + /*
2237 +diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
2238 +index a32c1f6c252c..3901ff66d0ee 100644
2239 +--- a/drivers/parisc/lba_pci.c
2240 ++++ b/drivers/parisc/lba_pci.c
2241 +@@ -111,8 +111,10 @@ static u32 lba_t32;
2242 +
2243 +
2244 + /* Looks nice and keeps the compiler happy */
2245 +-#define LBA_DEV(d) ((struct lba_device *) (d))
2246 +-
2247 ++#define LBA_DEV(d) ({ \
2248 ++ void *__pdata = d; \
2249 ++ BUG_ON(!__pdata); \
2250 ++ (struct lba_device *)__pdata; })
2251 +
2252 + /*
2253 + ** Only allow 8 subsidiary busses per LBA
2254 +diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
2255 +index f1441e466c06..d3243071509a 100644
2256 +--- a/drivers/parisc/sba_iommu.c
2257 ++++ b/drivers/parisc/sba_iommu.c
2258 +@@ -691,6 +691,8 @@ static int sba_dma_supported( struct device *dev, u64 mask)
2259 + return 0;
2260 +
2261 + ioc = GET_IOC(dev);
2262 ++ if (!ioc)
2263 ++ return 0;
2264 +
2265 + /*
2266 + * check if mask is >= than the current max IO Virt Address
2267 +@@ -722,6 +724,8 @@ sba_map_single(struct device *dev, void *addr, size_t size,
2268 + int pide;
2269 +
2270 + ioc = GET_IOC(dev);
2271 ++ if (!ioc)
2272 ++ return DMA_ERROR_CODE;
2273 +
2274 + /* save offset bits */
2275 + offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
2276 +@@ -803,6 +807,10 @@ sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
2277 + DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
2278 +
2279 + ioc = GET_IOC(dev);
2280 ++ if (!ioc) {
2281 ++ WARN_ON(!ioc);
2282 ++ return;
2283 ++ }
2284 + offset = iova & ~IOVP_MASK;
2285 + iova ^= offset; /* clear offset bits */
2286 + size += offset;
2287 +@@ -942,6 +950,8 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
2288 + DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
2289 +
2290 + ioc = GET_IOC(dev);
2291 ++ if (!ioc)
2292 ++ return 0;
2293 +
2294 + /* Fast path single entry scatterlists. */
2295 + if (nents == 1) {
2296 +@@ -1027,6 +1037,10 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
2297 + __func__, nents, sg_virt(sglist), sglist->length);
2298 +
2299 + ioc = GET_IOC(dev);
2300 ++ if (!ioc) {
2301 ++ WARN_ON(!ioc);
2302 ++ return;
2303 ++ }
2304 +
2305 + #ifdef SBA_COLLECT_STATS
2306 + ioc->usg_calls++;
2307 +diff --git a/drivers/pinctrl/freescale/pinctrl-mxs.c b/drivers/pinctrl/freescale/pinctrl-mxs.c
2308 +index 646d5c244af1..496075928af9 100644
2309 +--- a/drivers/pinctrl/freescale/pinctrl-mxs.c
2310 ++++ b/drivers/pinctrl/freescale/pinctrl-mxs.c
2311 +@@ -195,6 +195,16 @@ static int mxs_pinctrl_get_func_groups(struct pinctrl_dev *pctldev,
2312 + return 0;
2313 + }
2314 +
2315 ++static void mxs_pinctrl_rmwl(u32 value, u32 mask, u8 shift, void __iomem *reg)
2316 ++{
2317 ++ u32 tmp;
2318 ++
2319 ++ tmp = readl(reg);
2320 ++ tmp &= ~(mask << shift);
2321 ++ tmp |= value << shift;
2322 ++ writel(tmp, reg);
2323 ++}
2324 ++
2325 + static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
2326 + unsigned group)
2327 + {
2328 +@@ -212,8 +222,7 @@ static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
2329 + reg += bank * 0x20 + pin / 16 * 0x10;
2330 + shift = pin % 16 * 2;
2331 +
2332 +- writel(0x3 << shift, reg + CLR);
2333 +- writel(g->muxsel[i] << shift, reg + SET);
2334 ++ mxs_pinctrl_rmwl(g->muxsel[i], 0x3, shift, reg);
2335 + }
2336 +
2337 + return 0;
2338 +@@ -280,8 +289,7 @@ static int mxs_pinconf_group_set(struct pinctrl_dev *pctldev,
2339 + /* mA */
2340 + if (config & MA_PRESENT) {
2341 + shift = pin % 8 * 4;
2342 +- writel(0x3 << shift, reg + CLR);
2343 +- writel(ma << shift, reg + SET);
2344 ++ mxs_pinctrl_rmwl(ma, 0x3, shift, reg);
2345 + }
2346 +
2347 + /* vol */
2348 +diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
2349 +index 9677807db364..b505b87661f8 100644
2350 +--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
2351 ++++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
2352 +@@ -732,8 +732,8 @@ static const char * const sdxc_c_groups[] = {
2353 + static const char * const nand_groups[] = {
2354 + "nand_io", "nand_io_ce0", "nand_io_ce1",
2355 + "nand_io_rb0", "nand_ale", "nand_cle",
2356 +- "nand_wen_clk", "nand_ren_clk", "nand_dqs0",
2357 +- "nand_dqs1"
2358 ++ "nand_wen_clk", "nand_ren_clk", "nand_dqs_0",
2359 ++ "nand_dqs_1"
2360 + };
2361 +
2362 + static const char * const nor_groups[] = {
2363 +diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
2364 +index 7b2c9495c383..a2b021958213 100644
2365 +--- a/drivers/pinctrl/sh-pfc/core.c
2366 ++++ b/drivers/pinctrl/sh-pfc/core.c
2367 +@@ -529,6 +529,9 @@ static int sh_pfc_probe(struct platform_device *pdev)
2368 + ret = info->ops->init(pfc);
2369 + if (ret < 0)
2370 + return ret;
2371 ++
2372 ++ /* .init() may have overridden pfc->info */
2373 ++ info = pfc->info;
2374 + }
2375 +
2376 + pinctrl_provide_dummies();
2377 +diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
2378 +index 8cad6c165680..a100d58dbfd7 100644
2379 +--- a/drivers/platform/x86/ideapad-laptop.c
2380 ++++ b/drivers/platform/x86/ideapad-laptop.c
2381 +@@ -800,6 +800,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
2382 + case 11:
2383 + case 7:
2384 + case 6:
2385 ++ case 1:
2386 + ideapad_input_report(priv, vpc_bit);
2387 + break;
2388 + case 5:
2389 +diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
2390 +index 851e8efe364e..0e5b3584e918 100644
2391 +--- a/drivers/scsi/lpfc/lpfc_els.c
2392 ++++ b/drivers/scsi/lpfc/lpfc_els.c
2393 +@@ -3600,12 +3600,14 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
2394 + } else {
2395 + buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
2396 + lpfc_els_free_data(phba, buf_ptr1);
2397 ++ elsiocb->context2 = NULL;
2398 + }
2399 + }
2400 +
2401 + if (elsiocb->context3) {
2402 + buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
2403 + lpfc_els_free_bpl(phba, buf_ptr);
2404 ++ elsiocb->context3 = NULL;
2405 + }
2406 + lpfc_sli_release_iocbq(phba, elsiocb);
2407 + return 0;
2408 +diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
2409 +index 56f73682d4bd..edb1a4d648dd 100644
2410 +--- a/drivers/scsi/lpfc/lpfc_sli.c
2411 ++++ b/drivers/scsi/lpfc/lpfc_sli.c
2412 +@@ -5887,18 +5887,25 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
2413 +
2414 + free_vfi_bmask:
2415 + kfree(phba->sli4_hba.vfi_bmask);
2416 ++ phba->sli4_hba.vfi_bmask = NULL;
2417 + free_xri_ids:
2418 + kfree(phba->sli4_hba.xri_ids);
2419 ++ phba->sli4_hba.xri_ids = NULL;
2420 + free_xri_bmask:
2421 + kfree(phba->sli4_hba.xri_bmask);
2422 ++ phba->sli4_hba.xri_bmask = NULL;
2423 + free_vpi_ids:
2424 + kfree(phba->vpi_ids);
2425 ++ phba->vpi_ids = NULL;
2426 + free_vpi_bmask:
2427 + kfree(phba->vpi_bmask);
2428 ++ phba->vpi_bmask = NULL;
2429 + free_rpi_ids:
2430 + kfree(phba->sli4_hba.rpi_ids);
2431 ++ phba->sli4_hba.rpi_ids = NULL;
2432 + free_rpi_bmask:
2433 + kfree(phba->sli4_hba.rpi_bmask);
2434 ++ phba->sli4_hba.rpi_bmask = NULL;
2435 + err_exit:
2436 + return rc;
2437 + }
2438 +diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
2439 +index 1f3991ba7580..b33762f1013f 100644
2440 +--- a/drivers/scsi/qla2xxx/qla_isr.c
2441 ++++ b/drivers/scsi/qla2xxx/qla_isr.c
2442 +@@ -2434,6 +2434,10 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
2443 + if (pkt->entry_status & RF_BUSY)
2444 + res = DID_BUS_BUSY << 16;
2445 +
2446 ++ if (pkt->entry_type == NOTIFY_ACK_TYPE &&
2447 ++ pkt->handle == QLA_TGT_SKIP_HANDLE)
2448 ++ return;
2449 ++
2450 + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2451 + if (sp) {
2452 + sp->done(ha, sp, res);
2453 +diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
2454 +index df6193b48177..4de1394ebf22 100644
2455 +--- a/drivers/scsi/qla2xxx/qla_target.c
2456 ++++ b/drivers/scsi/qla2xxx/qla_target.c
2457 +@@ -2872,7 +2872,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
2458 +
2459 + pkt->entry_type = NOTIFY_ACK_TYPE;
2460 + pkt->entry_count = 1;
2461 +- pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2462 ++ pkt->handle = QLA_TGT_SKIP_HANDLE;
2463 +
2464 + nack = (struct nack_to_isp *)pkt;
2465 + nack->ox_id = ntfy->ox_id;
2466 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
2467 +index 8a2cba63b5ff..80cebe691fee 100644
2468 +--- a/drivers/scsi/sd.c
2469 ++++ b/drivers/scsi/sd.c
2470 +@@ -2454,7 +2454,8 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2471 + if (sdp->broken_fua) {
2472 + sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
2473 + sdkp->DPOFUA = 0;
2474 +- } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
2475 ++ } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw &&
2476 ++ !sdkp->device->use_16_for_rw) {
2477 + sd_first_printk(KERN_NOTICE, sdkp,
2478 + "Uses READ/WRITE(6), disabling FUA\n");
2479 + sdkp->DPOFUA = 0;
2480 +diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
2481 +index f164f24a4a55..d836414c920d 100644
2482 +--- a/drivers/scsi/virtio_scsi.c
2483 ++++ b/drivers/scsi/virtio_scsi.c
2484 +@@ -531,7 +531,9 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
2485 + {
2486 + struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
2487 + struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
2488 ++ unsigned long flags;
2489 + int req_size;
2490 ++ int ret;
2491 +
2492 + BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
2493 +
2494 +@@ -556,8 +558,15 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
2495 + req_size = sizeof(cmd->req.cmd);
2496 + }
2497 +
2498 +- if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0)
2499 ++ ret = virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd));
2500 ++ if (ret == -EIO) {
2501 ++ cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
2502 ++ spin_lock_irqsave(&req_vq->vq_lock, flags);
2503 ++ virtscsi_complete_cmd(vscsi, cmd);
2504 ++ spin_unlock_irqrestore(&req_vq->vq_lock, flags);
2505 ++ } else if (ret != 0) {
2506 + return SCSI_MLQUEUE_HOST_BUSY;
2507 ++ }
2508 + return 0;
2509 + }
2510 +
2511 +diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
2512 +index 5e991065f5b0..4e7110351e8c 100644
2513 +--- a/drivers/spi/spi-davinci.c
2514 ++++ b/drivers/spi/spi-davinci.c
2515 +@@ -655,7 +655,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
2516 + buf = t->rx_buf;
2517 + t->rx_dma = dma_map_single(&spi->dev, buf,
2518 + t->len, DMA_FROM_DEVICE);
2519 +- if (!t->rx_dma) {
2520 ++ if (dma_mapping_error(&spi->dev, !t->rx_dma)) {
2521 + ret = -EFAULT;
2522 + goto err_rx_map;
2523 + }
2524 +@@ -669,7 +669,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
2525 + buf = (void *)t->tx_buf;
2526 + t->tx_dma = dma_map_single(&spi->dev, buf,
2527 + t->len, DMA_TO_DEVICE);
2528 +- if (!t->tx_dma) {
2529 ++ if (dma_mapping_error(&spi->dev, t->tx_dma)) {
2530 + ret = -EFAULT;
2531 + goto err_tx_map;
2532 + }
2533 +diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
2534 +index e78ddbe5a954..a503132f91e8 100644
2535 +--- a/drivers/staging/comedi/comedi_fops.c
2536 ++++ b/drivers/staging/comedi/comedi_fops.c
2537 +@@ -2885,6 +2885,7 @@ static int __init comedi_init(void)
2538 + dev = comedi_alloc_board_minor(NULL);
2539 + if (IS_ERR(dev)) {
2540 + comedi_cleanup_board_minors();
2541 ++ class_destroy(comedi_class);
2542 + cdev_del(&comedi_cdev);
2543 + unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
2544 + COMEDI_NUM_MINORS);
2545 +diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
2546 +index f7bcefd46b5e..c50b304ce0b4 100644
2547 +--- a/drivers/staging/comedi/drivers/ni_mio_common.c
2548 ++++ b/drivers/staging/comedi/drivers/ni_mio_common.c
2549 +@@ -2120,7 +2120,7 @@ static int ni_ai_insn_read(struct comedi_device *dev,
2550 + return -ETIME;
2551 + }
2552 + d += signbits;
2553 +- data[n] = d;
2554 ++ data[n] = d & 0xffff;
2555 + }
2556 + } else if (devpriv->is_6143) {
2557 + for (n = 0; n < insn->n; n++) {
2558 +@@ -2163,8 +2163,8 @@ static int ni_ai_insn_read(struct comedi_device *dev,
2559 + data[n] = dl;
2560 + } else {
2561 + d = ni_readw(dev, ADC_FIFO_Data_Register);
2562 +- d += signbits; /* subtle: needs to be short addition */
2563 +- data[n] = d;
2564 ++ d += signbits;
2565 ++ data[n] = d & 0xffff;
2566 + }
2567 + }
2568 + }
2569 +diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
2570 +index 766fdcece074..4c6b479a34c2 100644
2571 +--- a/drivers/staging/vt6656/main_usb.c
2572 ++++ b/drivers/staging/vt6656/main_usb.c
2573 +@@ -534,6 +534,9 @@ static int vnt_start(struct ieee80211_hw *hw)
2574 + goto free_all;
2575 + }
2576 +
2577 ++ if (vnt_key_init_table(priv))
2578 ++ goto free_all;
2579 ++
2580 + priv->int_interval = 1; /* bInterval is set to 1 */
2581 +
2582 + vnt_int_start_interrupt(priv);
2583 +diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
2584 +index 68bd7f5d9f73..6d561e1170f4 100644
2585 +--- a/drivers/target/target_core_internal.h
2586 ++++ b/drivers/target/target_core_internal.h
2587 +@@ -67,7 +67,7 @@ int init_se_kmem_caches(void);
2588 + void release_se_kmem_caches(void);
2589 + u32 scsi_get_new_index(scsi_index_t);
2590 + void transport_subsystem_check_init(void);
2591 +-void transport_cmd_finish_abort(struct se_cmd *, int);
2592 ++int transport_cmd_finish_abort(struct se_cmd *, int);
2593 + unsigned char *transport_dump_cmd_direction(struct se_cmd *);
2594 + void transport_dump_dev_state(struct se_device *, char *, int *);
2595 + void transport_dump_dev_info(struct se_device *, struct se_lun *,
2596 +diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
2597 +index eed7c5a31b15..44510bd74963 100644
2598 +--- a/drivers/target/target_core_tmr.c
2599 ++++ b/drivers/target/target_core_tmr.c
2600 +@@ -78,7 +78,7 @@ void core_tmr_release_req(struct se_tmr_req *tmr)
2601 + kfree(tmr);
2602 + }
2603 +
2604 +-static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
2605 ++static int core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
2606 + {
2607 + unsigned long flags;
2608 + bool remove = true, send_tas;
2609 +@@ -94,7 +94,7 @@ static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
2610 + transport_send_task_abort(cmd);
2611 + }
2612 +
2613 +- transport_cmd_finish_abort(cmd, remove);
2614 ++ return transport_cmd_finish_abort(cmd, remove);
2615 + }
2616 +
2617 + static int target_check_cdb_and_preempt(struct list_head *list,
2618 +@@ -190,8 +190,8 @@ void core_tmr_abort_task(
2619 + cancel_work_sync(&se_cmd->work);
2620 + transport_wait_for_tasks(se_cmd);
2621 +
2622 +- transport_cmd_finish_abort(se_cmd, true);
2623 +- target_put_sess_cmd(se_cmd);
2624 ++ if (!transport_cmd_finish_abort(se_cmd, true))
2625 ++ target_put_sess_cmd(se_cmd);
2626 +
2627 + printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
2628 + " ref_tag: %d\n", ref_tag);
2629 +@@ -291,8 +291,8 @@ static void core_tmr_drain_tmr_list(
2630 + cancel_work_sync(&cmd->work);
2631 + transport_wait_for_tasks(cmd);
2632 +
2633 +- transport_cmd_finish_abort(cmd, 1);
2634 +- target_put_sess_cmd(cmd);
2635 ++ if (!transport_cmd_finish_abort(cmd, 1))
2636 ++ target_put_sess_cmd(cmd);
2637 + }
2638 + }
2639 +
2640 +@@ -390,8 +390,8 @@ static void core_tmr_drain_state_list(
2641 + cancel_work_sync(&cmd->work);
2642 + transport_wait_for_tasks(cmd);
2643 +
2644 +- core_tmr_handle_tas_abort(cmd, tas);
2645 +- target_put_sess_cmd(cmd);
2646 ++ if (!core_tmr_handle_tas_abort(cmd, tas))
2647 ++ target_put_sess_cmd(cmd);
2648 + }
2649 + }
2650 +
2651 +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
2652 +index 1cf3c0819b81..95c1c4ecf336 100644
2653 +--- a/drivers/target/target_core_transport.c
2654 ++++ b/drivers/target/target_core_transport.c
2655 +@@ -644,9 +644,10 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
2656 + percpu_ref_put(&lun->lun_ref);
2657 + }
2658 +
2659 +-void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
2660 ++int transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
2661 + {
2662 + bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
2663 ++ int ret = 0;
2664 +
2665 + if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
2666 + transport_lun_remove_cmd(cmd);
2667 +@@ -658,9 +659,11 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
2668 + cmd->se_tfo->aborted_task(cmd);
2669 +
2670 + if (transport_cmd_check_stop_to_fabric(cmd))
2671 +- return;
2672 ++ return 1;
2673 + if (remove && ack_kref)
2674 +- transport_put_cmd(cmd);
2675 ++ ret = transport_put_cmd(cmd);
2676 ++
2677 ++ return ret;
2678 + }
2679 +
2680 + static void target_complete_failure_work(struct work_struct *work)
2681 +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
2682 +index 2df90a54509a..50b67ff2b6ea 100644
2683 +--- a/drivers/tty/vt/vt.c
2684 ++++ b/drivers/tty/vt/vt.c
2685 +@@ -2693,13 +2693,13 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
2686 + * related to the kernel should not use this.
2687 + */
2688 + data = vt_get_shift_state();
2689 +- ret = __put_user(data, p);
2690 ++ ret = put_user(data, p);
2691 + break;
2692 + case TIOCL_GETMOUSEREPORTING:
2693 + console_lock(); /* May be overkill */
2694 + data = mouse_reporting();
2695 + console_unlock();
2696 +- ret = __put_user(data, p);
2697 ++ ret = put_user(data, p);
2698 + break;
2699 + case TIOCL_SETVESABLANK:
2700 + console_lock();
2701 +@@ -2708,7 +2708,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
2702 + break;
2703 + case TIOCL_GETKMSGREDIRECT:
2704 + data = vt_get_kmsg_redirect();
2705 +- ret = __put_user(data, p);
2706 ++ ret = put_user(data, p);
2707 + break;
2708 + case TIOCL_SETKMSGREDIRECT:
2709 + if (!capable(CAP_SYS_ADMIN)) {
2710 +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
2711 +index 96b21b0dac1e..3116edfcdc18 100644
2712 +--- a/drivers/usb/core/quirks.c
2713 ++++ b/drivers/usb/core/quirks.c
2714 +@@ -223,6 +223,10 @@ static const struct usb_device_id usb_quirk_list[] = {
2715 + /* Blackmagic Design UltraStudio SDI */
2716 + { USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
2717 +
2718 ++ /* Hauppauge HVR-950q */
2719 ++ { USB_DEVICE(0x2040, 0x7200), .driver_info =
2720 ++ USB_QUIRK_CONFIG_INTF_STRINGS },
2721 ++
2722 + /* INTEL VALUE SSD */
2723 + { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
2724 +
2725 +diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
2726 +index 4a1a543deeda..da885c3bc33f 100644
2727 +--- a/drivers/usb/dwc3/dwc3-st.c
2728 ++++ b/drivers/usb/dwc3/dwc3-st.c
2729 +@@ -227,7 +227,7 @@ static int st_dwc3_probe(struct platform_device *pdev)
2730 +
2731 + dwc3_data->syscfg_reg_off = res->start;
2732 +
2733 +- dev_vdbg(&pdev->dev, "glue-logic addr 0x%p, syscfg-reg offset 0x%x\n",
2734 ++ dev_vdbg(&pdev->dev, "glue-logic addr 0x%pK, syscfg-reg offset 0x%x\n",
2735 + dwc3_data->glue_base, dwc3_data->syscfg_reg_off);
2736 +
2737 + dwc3_data->rstc_pwrdn = devm_reset_control_get(dev, "powerdown");
2738 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
2739 +index 48412e4afb1b..ff56aaa00bf7 100644
2740 +--- a/drivers/usb/dwc3/gadget.c
2741 ++++ b/drivers/usb/dwc3/gadget.c
2742 +@@ -1202,7 +1202,7 @@ static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
2743 + goto out;
2744 + }
2745 +
2746 +- if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
2747 ++ if (WARN(req->dep != dep, "request %pK belongs to '%s'\n",
2748 + request, req->dep->name)) {
2749 + ret = -EINVAL;
2750 + goto out;
2751 +@@ -1249,7 +1249,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
2752 + dwc3_stop_active_transfer(dwc, dep->number, true);
2753 + goto out1;
2754 + }
2755 +- dev_err(dwc->dev, "request %p was not queued to %s\n",
2756 ++ dev_err(dwc->dev, "request %pK was not queued to %s\n",
2757 + request, ep->name);
2758 + ret = -EINVAL;
2759 + goto out0;
2760 +@@ -1854,7 +1854,7 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
2761 + * would help. Lets hope that if this occurs, someone
2762 + * fixes the root cause instead of looking away :)
2763 + */
2764 +- dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
2765 ++ dev_err(dwc->dev, "%s's TRB (%pK) still owned by HW\n",
2766 + dep->name, trb);
2767 +
2768 + count = trb->size & DWC3_TRB_SIZE_MASK;
2769 +diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
2770 +index 921dd8b0733f..804b209f4c08 100644
2771 +--- a/drivers/usb/gadget/function/f_fs.c
2772 ++++ b/drivers/usb/gadget/function/f_fs.c
2773 +@@ -1668,12 +1668,12 @@ static int ffs_func_eps_enable(struct ffs_function *func)
2774 + ep->ep->driver_data = ep;
2775 + ep->ep->desc = ds;
2776 +
2777 +- comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds +
2778 +- USB_DT_ENDPOINT_SIZE);
2779 +- ep->ep->maxburst = comp_desc->bMaxBurst + 1;
2780 +-
2781 +- if (needs_comp_desc)
2782 ++ if (needs_comp_desc) {
2783 ++ comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds +
2784 ++ USB_DT_ENDPOINT_SIZE);
2785 ++ ep->ep->maxburst = comp_desc->bMaxBurst + 1;
2786 + ep->ep->comp_desc = comp_desc;
2787 ++ }
2788 +
2789 + ret = usb_ep_enable(ep->ep);
2790 + if (likely(!ret)) {
2791 +@@ -3459,6 +3459,7 @@ static void ffs_closed(struct ffs_data *ffs)
2792 + {
2793 + struct ffs_dev *ffs_obj;
2794 + struct f_fs_opts *opts;
2795 ++ struct config_item *ci;
2796 +
2797 + ENTER();
2798 + ffs_dev_lock();
2799 +@@ -3482,8 +3483,11 @@ static void ffs_closed(struct ffs_data *ffs)
2800 + || !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount))
2801 + goto done;
2802 +
2803 +- unregister_gadget_item(ffs_obj->opts->
2804 +- func_inst.group.cg_item.ci_parent->ci_parent);
2805 ++ ci = opts->func_inst.group.cg_item.ci_parent->ci_parent;
2806 ++ ffs_dev_unlock();
2807 ++
2808 ++ unregister_gadget_item(ci);
2809 ++ return;
2810 + done:
2811 + ffs_dev_unlock();
2812 + }
2813 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
2814 +index f36f964a9a37..69040e9069e0 100644
2815 +--- a/drivers/usb/serial/cp210x.c
2816 ++++ b/drivers/usb/serial/cp210x.c
2817 +@@ -132,6 +132,7 @@ static const struct usb_device_id id_table[] = {
2818 + { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
2819 + { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
2820 + { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
2821 ++ { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
2822 + { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
2823 + { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
2824 + { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
2825 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
2826 +index 80d93bccc09b..5d841485bbe3 100644
2827 +--- a/drivers/usb/serial/option.c
2828 ++++ b/drivers/usb/serial/option.c
2829 +@@ -1874,6 +1874,10 @@ static const struct usb_device_id option_ids[] = {
2830 + .driver_info = (kernel_ulong_t)&four_g_w100_blacklist
2831 + },
2832 + { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
2833 ++ { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff),
2834 ++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
2835 ++ { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff),
2836 ++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2837 + { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
2838 + { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
2839 + { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
2840 +diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
2841 +index 4f91868736a5..23c303b2a3a2 100644
2842 +--- a/drivers/usb/serial/qcserial.c
2843 ++++ b/drivers/usb/serial/qcserial.c
2844 +@@ -156,6 +156,7 @@ static const struct usb_device_id id_table[] = {
2845 + {DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */
2846 + {DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */
2847 + {DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */
2848 ++ {DEVICE_SWI(0x1199, 0x9063)}, /* Sierra Wireless EM7305 */
2849 + {DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx */
2850 + {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */
2851 + {DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */
2852 +diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
2853 +index 44ab43fc4fcc..af10f7b131a4 100644
2854 +--- a/drivers/usb/usbip/stub_main.c
2855 ++++ b/drivers/usb/usbip/stub_main.c
2856 +@@ -262,7 +262,11 @@ void stub_device_cleanup_urbs(struct stub_device *sdev)
2857 + kmem_cache_free(stub_priv_cache, priv);
2858 +
2859 + kfree(urb->transfer_buffer);
2860 ++ urb->transfer_buffer = NULL;
2861 ++
2862 + kfree(urb->setup_packet);
2863 ++ urb->setup_packet = NULL;
2864 ++
2865 + usb_free_urb(urb);
2866 + }
2867 + }
2868 +diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
2869 +index dbcabc9dbe0d..021003c4de53 100644
2870 +--- a/drivers/usb/usbip/stub_tx.c
2871 ++++ b/drivers/usb/usbip/stub_tx.c
2872 +@@ -28,7 +28,11 @@ static void stub_free_priv_and_urb(struct stub_priv *priv)
2873 + struct urb *urb = priv->urb;
2874 +
2875 + kfree(urb->setup_packet);
2876 ++ urb->setup_packet = NULL;
2877 ++
2878 + kfree(urb->transfer_buffer);
2879 ++ urb->transfer_buffer = NULL;
2880 ++
2881 + list_del(&priv->list);
2882 + kmem_cache_free(stub_priv_cache, priv);
2883 + usb_free_urb(urb);
2884 +diff --git a/drivers/watchdog/bcm_kona_wdt.c b/drivers/watchdog/bcm_kona_wdt.c
2885 +index 22d8ae65772a..35af1d15c7ef 100644
2886 +--- a/drivers/watchdog/bcm_kona_wdt.c
2887 ++++ b/drivers/watchdog/bcm_kona_wdt.c
2888 +@@ -304,6 +304,8 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev)
2889 + if (!wdt)
2890 + return -ENOMEM;
2891 +
2892 ++ spin_lock_init(&wdt->lock);
2893 ++
2894 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2895 + wdt->base = devm_ioremap_resource(dev, res);
2896 + if (IS_ERR(wdt->base))
2897 +@@ -316,7 +318,6 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev)
2898 + return ret;
2899 + }
2900 +
2901 +- spin_lock_init(&wdt->lock);
2902 + platform_set_drvdata(pdev, wdt);
2903 + watchdog_set_drvdata(&bcm_kona_wdt_wdd, wdt);
2904 +
2905 +diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
2906 +index 4c549323c605..3a0e6a031174 100644
2907 +--- a/drivers/xen/swiotlb-xen.c
2908 ++++ b/drivers/xen/swiotlb-xen.c
2909 +@@ -416,9 +416,9 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
2910 + if (map == SWIOTLB_MAP_ERROR)
2911 + return DMA_ERROR_CODE;
2912 +
2913 ++ dev_addr = xen_phys_to_bus(map);
2914 + xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
2915 + dev_addr, map & ~PAGE_MASK, size, dir, attrs);
2916 +- dev_addr = xen_phys_to_bus(map);
2917 +
2918 + /*
2919 + * Ensure that the address returned is DMA'ble
2920 +@@ -574,13 +574,14 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
2921 + sg_dma_len(sgl) = 0;
2922 + return 0;
2923 + }
2924 ++ dev_addr = xen_phys_to_bus(map);
2925 + xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
2926 + dev_addr,
2927 + map & ~PAGE_MASK,
2928 + sg->length,
2929 + dir,
2930 + attrs);
2931 +- sg->dma_address = xen_phys_to_bus(map);
2932 ++ sg->dma_address = dev_addr;
2933 + } else {
2934 + /* we are not interested in the dma_addr returned by
2935 + * xen_dma_map_page, only in the potential cache flushes executed
2936 +diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
2937 +index ac7d921ed984..257425511d10 100644
2938 +--- a/fs/autofs4/dev-ioctl.c
2939 ++++ b/fs/autofs4/dev-ioctl.c
2940 +@@ -331,7 +331,7 @@ static int autofs_dev_ioctl_fail(struct file *fp,
2941 + int status;
2942 +
2943 + token = (autofs_wqt_t) param->fail.token;
2944 +- status = param->fail.status ? param->fail.status : -ENOENT;
2945 ++ status = param->fail.status < 0 ? param->fail.status : -ENOENT;
2946 + return autofs4_wait_release(sbi, token, status);
2947 + }
2948 +
2949 +diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
2950 +index cd46e4158830..90f20f8ce87e 100644
2951 +--- a/fs/binfmt_elf.c
2952 ++++ b/fs/binfmt_elf.c
2953 +@@ -904,17 +904,60 @@ static int load_elf_binary(struct linux_binprm *bprm)
2954 + elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
2955 +
2956 + vaddr = elf_ppnt->p_vaddr;
2957 ++ /*
2958 ++ * If we are loading ET_EXEC or we have already performed
2959 ++ * the ET_DYN load_addr calculations, proceed normally.
2960 ++ */
2961 + if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
2962 + elf_flags |= MAP_FIXED;
2963 + } else if (loc->elf_ex.e_type == ET_DYN) {
2964 +- /* Try and get dynamic programs out of the way of the
2965 +- * default mmap base, as well as whatever program they
2966 +- * might try to exec. This is because the brk will
2967 +- * follow the loader, and is not movable. */
2968 +- load_bias = ELF_ET_DYN_BASE - vaddr;
2969 +- if (current->flags & PF_RANDOMIZE)
2970 +- load_bias += arch_mmap_rnd();
2971 +- load_bias = ELF_PAGESTART(load_bias);
2972 ++ /*
2973 ++ * This logic is run once for the first LOAD Program
2974 ++ * Header for ET_DYN binaries to calculate the
2975 ++ * randomization (load_bias) for all the LOAD
2976 ++ * Program Headers, and to calculate the entire
2977 ++ * size of the ELF mapping (total_size). (Note that
2978 ++ * load_addr_set is set to true later once the
2979 ++ * initial mapping is performed.)
2980 ++ *
2981 ++ * There are effectively two types of ET_DYN
2982 ++ * binaries: programs (i.e. PIE: ET_DYN with INTERP)
2983 ++ * and loaders (ET_DYN without INTERP, since they
2984 ++ * _are_ the ELF interpreter). The loaders must
2985 ++ * be loaded away from programs since the program
2986 ++ * may otherwise collide with the loader (especially
2987 ++ * for ET_EXEC which does not have a randomized
2988 ++ * position). For example to handle invocations of
2989 ++ * "./ld.so someprog" to test out a new version of
2990 ++ * the loader, the subsequent program that the
2991 ++ * loader loads must avoid the loader itself, so
2992 ++ * they cannot share the same load range. Sufficient
2993 ++ * room for the brk must be allocated with the
2994 ++ * loader as well, since brk must be available with
2995 ++ * the loader.
2996 ++ *
2997 ++ * Therefore, programs are loaded offset from
2998 ++ * ELF_ET_DYN_BASE and loaders are loaded into the
2999 ++ * independently randomized mmap region (0 load_bias
3000 ++ * without MAP_FIXED).
3001 ++ */
3002 ++ if (elf_interpreter) {
3003 ++ load_bias = ELF_ET_DYN_BASE;
3004 ++ if (current->flags & PF_RANDOMIZE)
3005 ++ load_bias += arch_mmap_rnd();
3006 ++ elf_flags |= MAP_FIXED;
3007 ++ } else
3008 ++ load_bias = 0;
3009 ++
3010 ++ /*
3011 ++ * Since load_bias is used for all subsequent loading
3012 ++ * calculations, we must lower it by the first vaddr
3013 ++ * so that the remaining calculations based on the
3014 ++ * ELF vaddrs will be correctly offset. The result
3015 ++ * is then page aligned.
3016 ++ */
3017 ++ load_bias = ELF_PAGESTART(load_bias - vaddr);
3018 ++
3019 + total_size = total_mapping_size(elf_phdata,
3020 + loc->elf_ex.e_phnum);
3021 + if (!total_size) {
3022 +@@ -2285,6 +2328,7 @@ static int elf_core_dump(struct coredump_params *cprm)
3023 + goto end_coredump;
3024 + }
3025 + }
3026 ++ dump_truncate(cprm);
3027 +
3028 + if (!elf_core_write_extra_data(cprm))
3029 + goto end_coredump;
3030 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
3031 +index 04997ac958c4..db6115486166 100644
3032 +--- a/fs/btrfs/inode.c
3033 ++++ b/fs/btrfs/inode.c
3034 +@@ -4358,8 +4358,19 @@ search_again:
3035 + if (found_type > min_type) {
3036 + del_item = 1;
3037 + } else {
3038 +- if (item_end < new_size)
3039 ++ if (item_end < new_size) {
3040 ++ /*
3041 ++ * With NO_HOLES mode, for the following mapping
3042 ++ *
3043 ++ * [0-4k][hole][8k-12k]
3044 ++ *
3045 ++ * if truncating isize down to 6k, it ends up
3046 ++ * isize being 8k.
3047 ++ */
3048 ++ if (btrfs_fs_incompat(root->fs_info, NO_HOLES))
3049 ++ last_size = new_size;
3050 + break;
3051 ++ }
3052 + if (found_key.offset >= new_size)
3053 + del_item = 1;
3054 + else
3055 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
3056 +index 7d7bd466520b..cb3406815330 100644
3057 +--- a/fs/cifs/connect.c
3058 ++++ b/fs/cifs/connect.c
3059 +@@ -401,6 +401,9 @@ cifs_reconnect(struct TCP_Server_Info *server)
3060 + }
3061 + } while (server->tcpStatus == CifsNeedReconnect);
3062 +
3063 ++ if (server->tcpStatus == CifsNeedNegotiate)
3064 ++ mod_delayed_work(cifsiod_wq, &server->echo, 0);
3065 ++
3066 + return rc;
3067 + }
3068 +
3069 +@@ -410,18 +413,27 @@ cifs_echo_request(struct work_struct *work)
3070 + int rc;
3071 + struct TCP_Server_Info *server = container_of(work,
3072 + struct TCP_Server_Info, echo.work);
3073 ++ unsigned long echo_interval;
3074 ++
3075 ++ /*
3076 ++ * If we need to renegotiate, set echo interval to zero to
3077 ++ * immediately call echo service where we can renegotiate.
3078 ++ */
3079 ++ if (server->tcpStatus == CifsNeedNegotiate)
3080 ++ echo_interval = 0;
3081 ++ else
3082 ++ echo_interval = SMB_ECHO_INTERVAL;
3083 +
3084 + /*
3085 +- * We cannot send an echo if it is disabled or until the
3086 +- * NEGOTIATE_PROTOCOL request is done, which is indicated by
3087 +- * server->ops->need_neg() == true. Also, no need to ping if
3088 +- * we got a response recently.
3089 ++ * We cannot send an echo if it is disabled.
3090 ++ * Also, no need to ping if we got a response recently.
3091 + */
3092 +
3093 + if (server->tcpStatus == CifsNeedReconnect ||
3094 +- server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew ||
3095 ++ server->tcpStatus == CifsExiting ||
3096 ++ server->tcpStatus == CifsNew ||
3097 + (server->ops->can_echo && !server->ops->can_echo(server)) ||
3098 +- time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ))
3099 ++ time_before(jiffies, server->lstrp + echo_interval - HZ))
3100 + goto requeue_echo;
3101 +
3102 + rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS;
3103 +diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
3104 +index 87b87e091e8e..efd72e1fae74 100644
3105 +--- a/fs/cifs/smb1ops.c
3106 ++++ b/fs/cifs/smb1ops.c
3107 +@@ -849,8 +849,13 @@ cifs_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
3108 + struct cifs_fid *fid, __u16 search_flags,
3109 + struct cifs_search_info *srch_inf)
3110 + {
3111 +- return CIFSFindFirst(xid, tcon, path, cifs_sb,
3112 +- &fid->netfid, search_flags, srch_inf, true);
3113 ++ int rc;
3114 ++
3115 ++ rc = CIFSFindFirst(xid, tcon, path, cifs_sb,
3116 ++ &fid->netfid, search_flags, srch_inf, true);
3117 ++ if (rc)
3118 ++ cifs_dbg(FYI, "find first failed=%d\n", rc);
3119 ++ return rc;
3120 + }
3121 +
3122 + static int
3123 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
3124 +index 57aeae6116d6..16212dab81d5 100644
3125 +--- a/fs/cifs/smb2ops.c
3126 ++++ b/fs/cifs/smb2ops.c
3127 +@@ -843,7 +843,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
3128 + rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL);
3129 + kfree(utf16_path);
3130 + if (rc) {
3131 +- cifs_dbg(VFS, "open dir failed\n");
3132 ++ cifs_dbg(FYI, "open dir failed rc=%d\n", rc);
3133 + return rc;
3134 + }
3135 +
3136 +@@ -853,7 +853,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
3137 + rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
3138 + fid->volatile_fid, 0, srch_inf);
3139 + if (rc) {
3140 +- cifs_dbg(VFS, "query directory failed\n");
3141 ++ cifs_dbg(FYI, "query directory failed rc=%d\n", rc);
3142 + SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
3143 + }
3144 + return rc;
3145 +diff --git a/fs/coredump.c b/fs/coredump.c
3146 +index 26d05e3bc6db..e07cbb629f1c 100644
3147 +--- a/fs/coredump.c
3148 ++++ b/fs/coredump.c
3149 +@@ -803,3 +803,21 @@ int dump_align(struct coredump_params *cprm, int align)
3150 + return mod ? dump_skip(cprm, align - mod) : 1;
3151 + }
3152 + EXPORT_SYMBOL(dump_align);
3153 ++
3154 ++/*
3155 ++ * Ensures that file size is big enough to contain the current file
3156 ++ * postion. This prevents gdb from complaining about a truncated file
3157 ++ * if the last "write" to the file was dump_skip.
3158 ++ */
3159 ++void dump_truncate(struct coredump_params *cprm)
3160 ++{
3161 ++ struct file *file = cprm->file;
3162 ++ loff_t offset;
3163 ++
3164 ++ if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
3165 ++ offset = file->f_op->llseek(file, 0, SEEK_CUR);
3166 ++ if (i_size_read(file->f_mapping->host) < offset)
3167 ++ do_truncate(file->f_path.dentry, offset, 0, file);
3168 ++ }
3169 ++}
3170 ++EXPORT_SYMBOL(dump_truncate);
3171 +diff --git a/fs/dcache.c b/fs/dcache.c
3172 +index 11d466bbfb0b..5ca8f0b2b897 100644
3173 +--- a/fs/dcache.c
3174 ++++ b/fs/dcache.c
3175 +@@ -1128,11 +1128,12 @@ void shrink_dcache_sb(struct super_block *sb)
3176 + LIST_HEAD(dispose);
3177 +
3178 + freed = list_lru_walk(&sb->s_dentry_lru,
3179 +- dentry_lru_isolate_shrink, &dispose, UINT_MAX);
3180 ++ dentry_lru_isolate_shrink, &dispose, 1024);
3181 +
3182 + this_cpu_sub(nr_dentry_unused, freed);
3183 + shrink_dentry_list(&dispose);
3184 +- } while (freed > 0);
3185 ++ cond_resched();
3186 ++ } while (list_lru_count(&sb->s_dentry_lru) > 0);
3187 + }
3188 + EXPORT_SYMBOL(shrink_dcache_sb);
3189 +
3190 +diff --git a/fs/exec.c b/fs/exec.c
3191 +index 04c9cab4d4d3..3ba35c21726e 100644
3192 +--- a/fs/exec.c
3193 ++++ b/fs/exec.c
3194 +@@ -199,7 +199,24 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
3195 +
3196 + if (write) {
3197 + unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
3198 +- struct rlimit *rlim;
3199 ++ unsigned long ptr_size, limit;
3200 ++
3201 ++ /*
3202 ++ * Since the stack will hold pointers to the strings, we
3203 ++ * must account for them as well.
3204 ++ *
3205 ++ * The size calculation is the entire vma while each arg page is
3206 ++ * built, so each time we get here it's calculating how far it
3207 ++ * is currently (rather than each call being just the newly
3208 ++ * added size from the arg page). As a result, we need to
3209 ++ * always add the entire size of the pointers, so that on the
3210 ++ * last call to get_arg_page() we'll actually have the entire
3211 ++ * correct size.
3212 ++ */
3213 ++ ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
3214 ++ if (ptr_size > ULONG_MAX - size)
3215 ++ goto fail;
3216 ++ size += ptr_size;
3217 +
3218 + acct_arg_size(bprm, size / PAGE_SIZE);
3219 +
3220 +@@ -211,20 +228,24 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
3221 + return page;
3222 +
3223 + /*
3224 +- * Limit to 1/4-th the stack size for the argv+env strings.
3225 ++ * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM
3226 ++ * (whichever is smaller) for the argv+env strings.
3227 + * This ensures that:
3228 + * - the remaining binfmt code will not run out of stack space,
3229 + * - the program will have a reasonable amount of stack left
3230 + * to work from.
3231 + */
3232 +- rlim = current->signal->rlim;
3233 +- if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) {
3234 +- put_page(page);
3235 +- return NULL;
3236 +- }
3237 ++ limit = _STK_LIM / 4 * 3;
3238 ++ limit = min(limit, rlimit(RLIMIT_STACK) / 4);
3239 ++ if (size > limit)
3240 ++ goto fail;
3241 + }
3242 +
3243 + return page;
3244 ++
3245 ++fail:
3246 ++ put_page(page);
3247 ++ return NULL;
3248 + }
3249 +
3250 + static void put_arg_page(struct page *page)
3251 +diff --git a/fs/fcntl.c b/fs/fcntl.c
3252 +index ee85cd4e136a..62376451bbce 100644
3253 +--- a/fs/fcntl.c
3254 ++++ b/fs/fcntl.c
3255 +@@ -740,16 +740,10 @@ static int __init fcntl_init(void)
3256 + * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
3257 + * is defined as O_NONBLOCK on some platforms and not on others.
3258 + */
3259 +- BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
3260 +- O_RDONLY | O_WRONLY | O_RDWR |
3261 +- O_CREAT | O_EXCL | O_NOCTTY |
3262 +- O_TRUNC | O_APPEND | /* O_NONBLOCK | */
3263 +- __O_SYNC | O_DSYNC | FASYNC |
3264 +- O_DIRECT | O_LARGEFILE | O_DIRECTORY |
3265 +- O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
3266 +- __FMODE_EXEC | O_PATH | __O_TMPFILE |
3267 +- __FMODE_NONOTIFY
3268 +- ));
3269 ++ BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ !=
3270 ++ HWEIGHT32(
3271 ++ (VALID_OPEN_FLAGS & ~(O_NONBLOCK | O_NDELAY)) |
3272 ++ __FMODE_EXEC | __FMODE_NONOTIFY));
3273 +
3274 + fasync_cache = kmem_cache_create("fasync_cache",
3275 + sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL);
3276 +diff --git a/fs/mount.h b/fs/mount.h
3277 +index 32cabd55a787..bae2b0943019 100644
3278 +--- a/fs/mount.h
3279 ++++ b/fs/mount.h
3280 +@@ -57,6 +57,7 @@ struct mount {
3281 + struct mnt_namespace *mnt_ns; /* containing namespace */
3282 + struct mountpoint *mnt_mp; /* where is it mounted */
3283 + struct hlist_node mnt_mp_list; /* list mounts with the same mountpoint */
3284 ++ struct list_head mnt_umounting; /* list entry for umount propagation */
3285 + #ifdef CONFIG_FSNOTIFY
3286 + struct hlist_head mnt_fsnotify_marks;
3287 + __u32 mnt_fsnotify_mask;
3288 +diff --git a/fs/namespace.c b/fs/namespace.c
3289 +index df20ee946f7c..58b281ad30d5 100644
3290 +--- a/fs/namespace.c
3291 ++++ b/fs/namespace.c
3292 +@@ -237,6 +237,7 @@ static struct mount *alloc_vfsmnt(const char *name)
3293 + INIT_LIST_HEAD(&mnt->mnt_slave_list);
3294 + INIT_LIST_HEAD(&mnt->mnt_slave);
3295 + INIT_HLIST_NODE(&mnt->mnt_mp_list);
3296 ++ INIT_LIST_HEAD(&mnt->mnt_umounting);
3297 + #ifdef CONFIG_FSNOTIFY
3298 + INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks);
3299 + #endif
3300 +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
3301 +index aadb4af4a0fe..b6d97dfa9cb6 100644
3302 +--- a/fs/nfs/dir.c
3303 ++++ b/fs/nfs/dir.c
3304 +@@ -2446,6 +2446,20 @@ int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags)
3305 + }
3306 + EXPORT_SYMBOL_GPL(nfs_may_open);
3307 +
3308 ++static int nfs_execute_ok(struct inode *inode, int mask)
3309 ++{
3310 ++ struct nfs_server *server = NFS_SERVER(inode);
3311 ++ int ret;
3312 ++
3313 ++ if (mask & MAY_NOT_BLOCK)
3314 ++ ret = nfs_revalidate_inode_rcu(server, inode);
3315 ++ else
3316 ++ ret = nfs_revalidate_inode(server, inode);
3317 ++ if (ret == 0 && !execute_ok(inode))
3318 ++ ret = -EACCES;
3319 ++ return ret;
3320 ++}
3321 ++
3322 + int nfs_permission(struct inode *inode, int mask)
3323 + {
3324 + struct rpc_cred *cred;
3325 +@@ -2463,6 +2477,9 @@ int nfs_permission(struct inode *inode, int mask)
3326 + case S_IFLNK:
3327 + goto out;
3328 + case S_IFREG:
3329 ++ if ((mask & MAY_OPEN) &&
3330 ++ nfs_server_capable(inode, NFS_CAP_ATOMIC_OPEN))
3331 ++ return 0;
3332 + break;
3333 + case S_IFDIR:
3334 + /*
3335 +@@ -2495,8 +2512,8 @@ force_lookup:
3336 + res = PTR_ERR(cred);
3337 + }
3338 + out:
3339 +- if (!res && (mask & MAY_EXEC) && !execute_ok(inode))
3340 +- res = -EACCES;
3341 ++ if (!res && (mask & MAY_EXEC))
3342 ++ res = nfs_execute_ok(inode, mask);
3343 +
3344 + dfprintk(VFS, "NFS: permission(%s/%lu), mask=0x%x, res=%d\n",
3345 + inode->i_sb->s_id, inode->i_ino, mask, res);
3346 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
3347 +index 5d8c7e978c33..f06af7248be7 100644
3348 +--- a/fs/nfs/nfs4proc.c
3349 ++++ b/fs/nfs/nfs4proc.c
3350 +@@ -2068,8 +2068,6 @@ static int nfs4_opendata_access(struct rpc_cred *cred,
3351 + if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0)
3352 + return 0;
3353 +
3354 +- /* even though OPEN succeeded, access is denied. Close the file */
3355 +- nfs4_close_state(state, fmode);
3356 + return -EACCES;
3357 + }
3358 +
3359 +diff --git a/fs/open.c b/fs/open.c
3360 +index ff80b2542989..d0169e52d7fe 100644
3361 +--- a/fs/open.c
3362 ++++ b/fs/open.c
3363 +@@ -881,6 +881,12 @@ static inline int build_open_flags(int flags, umode_t mode, struct open_flags *o
3364 + int lookup_flags = 0;
3365 + int acc_mode;
3366 +
3367 ++ /*
3368 ++ * Clear out all open flags we don't know about so that we don't report
3369 ++ * them in fcntl(F_GETFD) or similar interfaces.
3370 ++ */
3371 ++ flags &= VALID_OPEN_FLAGS;
3372 ++
3373 + if (flags & (O_CREAT | __O_TMPFILE))
3374 + op->mode = (mode & S_IALLUGO) | S_IFREG;
3375 + else
3376 +diff --git a/fs/pnode.c b/fs/pnode.c
3377 +index b394ca5307ec..d15c63e97ef1 100644
3378 +--- a/fs/pnode.c
3379 ++++ b/fs/pnode.c
3380 +@@ -24,6 +24,11 @@ static inline struct mount *first_slave(struct mount *p)
3381 + return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave);
3382 + }
3383 +
3384 ++static inline struct mount *last_slave(struct mount *p)
3385 ++{
3386 ++ return list_entry(p->mnt_slave_list.prev, struct mount, mnt_slave);
3387 ++}
3388 ++
3389 + static inline struct mount *next_slave(struct mount *p)
3390 + {
3391 + return list_entry(p->mnt_slave.next, struct mount, mnt_slave);
3392 +@@ -164,6 +169,19 @@ static struct mount *propagation_next(struct mount *m,
3393 + }
3394 + }
3395 +
3396 ++static struct mount *skip_propagation_subtree(struct mount *m,
3397 ++ struct mount *origin)
3398 ++{
3399 ++ /*
3400 ++ * Advance m such that propagation_next will not return
3401 ++ * the slaves of m.
3402 ++ */
3403 ++ if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
3404 ++ m = last_slave(m);
3405 ++
3406 ++ return m;
3407 ++}
3408 ++
3409 + static struct mount *next_group(struct mount *m, struct mount *origin)
3410 + {
3411 + while (1) {
3412 +@@ -415,65 +433,104 @@ void propagate_mount_unlock(struct mount *mnt)
3413 + }
3414 + }
3415 +
3416 +-/*
3417 +- * Mark all mounts that the MNT_LOCKED logic will allow to be unmounted.
3418 +- */
3419 +-static void mark_umount_candidates(struct mount *mnt)
3420 ++static void umount_one(struct mount *mnt, struct list_head *to_umount)
3421 + {
3422 +- struct mount *parent = mnt->mnt_parent;
3423 +- struct mount *m;
3424 +-
3425 +- BUG_ON(parent == mnt);
3426 +-
3427 +- for (m = propagation_next(parent, parent); m;
3428 +- m = propagation_next(m, parent)) {
3429 +- struct mount *child = __lookup_mnt(&m->mnt,
3430 +- mnt->mnt_mountpoint);
3431 +- if (!child || (child->mnt.mnt_flags & MNT_UMOUNT))
3432 +- continue;
3433 +- if (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m)) {
3434 +- SET_MNT_MARK(child);
3435 +- }
3436 +- }
3437 ++ CLEAR_MNT_MARK(mnt);
3438 ++ mnt->mnt.mnt_flags |= MNT_UMOUNT;
3439 ++ list_del_init(&mnt->mnt_child);
3440 ++ list_del_init(&mnt->mnt_umounting);
3441 ++ list_move_tail(&mnt->mnt_list, to_umount);
3442 + }
3443 +
3444 + /*
3445 + * NOTE: unmounting 'mnt' naturally propagates to all other mounts its
3446 + * parent propagates to.
3447 + */
3448 +-static void __propagate_umount(struct mount *mnt)
3449 ++static bool __propagate_umount(struct mount *mnt,
3450 ++ struct list_head *to_umount,
3451 ++ struct list_head *to_restore)
3452 + {
3453 +- struct mount *parent = mnt->mnt_parent;
3454 +- struct mount *m;
3455 ++ bool progress = false;
3456 ++ struct mount *child;
3457 +
3458 +- BUG_ON(parent == mnt);
3459 ++ /*
3460 ++ * The state of the parent won't change if this mount is
3461 ++ * already unmounted or marked as without children.
3462 ++ */
3463 ++ if (mnt->mnt.mnt_flags & (MNT_UMOUNT | MNT_MARKED))
3464 ++ goto out;
3465 +
3466 +- for (m = propagation_next(parent, parent); m;
3467 +- m = propagation_next(m, parent)) {
3468 +- struct mount *topper;
3469 +- struct mount *child = __lookup_mnt(&m->mnt,
3470 +- mnt->mnt_mountpoint);
3471 +- /*
3472 +- * umount the child only if the child has no children
3473 +- * and the child is marked safe to unmount.
3474 +- */
3475 +- if (!child || !IS_MNT_MARKED(child))
3476 ++ /* Verify topper is the only grandchild that has not been
3477 ++ * speculatively unmounted.
3478 ++ */
3479 ++ list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
3480 ++ if (child->mnt_mountpoint == mnt->mnt.mnt_root)
3481 + continue;
3482 +- CLEAR_MNT_MARK(child);
3483 ++ if (!list_empty(&child->mnt_umounting) && IS_MNT_MARKED(child))
3484 ++ continue;
3485 ++ /* Found a mounted child */
3486 ++ goto children;
3487 ++ }
3488 +
3489 +- /* If there is exactly one mount covering all of child
3490 +- * replace child with that mount.
3491 +- */
3492 +- topper = find_topper(child);
3493 +- if (topper)
3494 +- mnt_change_mountpoint(child->mnt_parent, child->mnt_mp,
3495 +- topper);
3496 ++ /* Mark mounts that can be unmounted if not locked */
3497 ++ SET_MNT_MARK(mnt);
3498 ++ progress = true;
3499 ++
3500 ++ /* If a mount is without children and not locked umount it. */
3501 ++ if (!IS_MNT_LOCKED(mnt)) {
3502 ++ umount_one(mnt, to_umount);
3503 ++ } else {
3504 ++children:
3505 ++ list_move_tail(&mnt->mnt_umounting, to_restore);
3506 ++ }
3507 ++out:
3508 ++ return progress;
3509 ++}
3510 ++
3511 ++static void umount_list(struct list_head *to_umount,
3512 ++ struct list_head *to_restore)
3513 ++{
3514 ++ struct mount *mnt, *child, *tmp;
3515 ++ list_for_each_entry(mnt, to_umount, mnt_list) {
3516 ++ list_for_each_entry_safe(child, tmp, &mnt->mnt_mounts, mnt_child) {
3517 ++ /* topper? */
3518 ++ if (child->mnt_mountpoint == mnt->mnt.mnt_root)
3519 ++ list_move_tail(&child->mnt_umounting, to_restore);
3520 ++ else
3521 ++ umount_one(child, to_umount);
3522 ++ }
3523 ++ }
3524 ++}
3525 +
3526 +- if (list_empty(&child->mnt_mounts)) {
3527 +- list_del_init(&child->mnt_child);
3528 +- child->mnt.mnt_flags |= MNT_UMOUNT;
3529 +- list_move_tail(&child->mnt_list, &mnt->mnt_list);
3530 ++static void restore_mounts(struct list_head *to_restore)
3531 ++{
3532 ++ /* Restore mounts to a clean working state */
3533 ++ while (!list_empty(to_restore)) {
3534 ++ struct mount *mnt, *parent;
3535 ++ struct mountpoint *mp;
3536 ++
3537 ++ mnt = list_first_entry(to_restore, struct mount, mnt_umounting);
3538 ++ CLEAR_MNT_MARK(mnt);
3539 ++ list_del_init(&mnt->mnt_umounting);
3540 ++
3541 ++ /* Should this mount be reparented? */
3542 ++ mp = mnt->mnt_mp;
3543 ++ parent = mnt->mnt_parent;
3544 ++ while (parent->mnt.mnt_flags & MNT_UMOUNT) {
3545 ++ mp = parent->mnt_mp;
3546 ++ parent = parent->mnt_parent;
3547 + }
3548 ++ if (parent != mnt->mnt_parent)
3549 ++ mnt_change_mountpoint(parent, mp, mnt);
3550 ++ }
3551 ++}
3552 ++
3553 ++static void cleanup_umount_visitations(struct list_head *visited)
3554 ++{
3555 ++ while (!list_empty(visited)) {
3556 ++ struct mount *mnt =
3557 ++ list_first_entry(visited, struct mount, mnt_umounting);
3558 ++ list_del_init(&mnt->mnt_umounting);
3559 + }
3560 + }
3561 +
3562 +@@ -487,11 +544,68 @@ static void __propagate_umount(struct mount *mnt)
3563 + int propagate_umount(struct list_head *list)
3564 + {
3565 + struct mount *mnt;
3566 ++ LIST_HEAD(to_restore);
3567 ++ LIST_HEAD(to_umount);
3568 ++ LIST_HEAD(visited);
3569 ++
3570 ++ /* Find candidates for unmounting */
3571 ++ list_for_each_entry_reverse(mnt, list, mnt_list) {
3572 ++ struct mount *parent = mnt->mnt_parent;
3573 ++ struct mount *m;
3574 ++
3575 ++ /*
3576 ++ * If this mount has already been visited it is known that it's
3577 ++ * entire peer group and all of their slaves in the propagation
3578 ++ * tree for the mountpoint has already been visited and there is
3579 ++ * no need to visit them again.
3580 ++ */
3581 ++ if (!list_empty(&mnt->mnt_umounting))
3582 ++ continue;
3583 ++
3584 ++ list_add_tail(&mnt->mnt_umounting, &visited);
3585 ++ for (m = propagation_next(parent, parent); m;
3586 ++ m = propagation_next(m, parent)) {
3587 ++ struct mount *child = __lookup_mnt(&m->mnt,
3588 ++ mnt->mnt_mountpoint);
3589 ++ if (!child)
3590 ++ continue;
3591 ++
3592 ++ if (!list_empty(&child->mnt_umounting)) {
3593 ++ /*
3594 ++ * If the child has already been visited it is
3595 ++ * know that it's entire peer group and all of
3596 ++ * their slaves in the propgation tree for the
3597 ++ * mountpoint has already been visited and there
3598 ++ * is no need to visit this subtree again.
3599 ++ */
3600 ++ m = skip_propagation_subtree(m, parent);
3601 ++ continue;
3602 ++ } else if (child->mnt.mnt_flags & MNT_UMOUNT) {
3603 ++ /*
3604 ++ * We have come accross an partially unmounted
3605 ++ * mount in list that has not been visited yet.
3606 ++ * Remember it has been visited and continue
3607 ++ * about our merry way.
3608 ++ */
3609 ++ list_add_tail(&child->mnt_umounting, &visited);
3610 ++ continue;
3611 ++ }
3612 ++
3613 ++ /* Check the child and parents while progress is made */
3614 ++ while (__propagate_umount(child,
3615 ++ &to_umount, &to_restore)) {
3616 ++ /* Is the parent a umount candidate? */
3617 ++ child = child->mnt_parent;
3618 ++ if (list_empty(&child->mnt_umounting))
3619 ++ break;
3620 ++ }
3621 ++ }
3622 ++ }
3623 +
3624 +- list_for_each_entry_reverse(mnt, list, mnt_list)
3625 +- mark_umount_candidates(mnt);
3626 ++ umount_list(&to_umount, &to_restore);
3627 ++ restore_mounts(&to_restore);
3628 ++ cleanup_umount_visitations(&visited);
3629 ++ list_splice_tail(&to_umount, list);
3630 +
3631 +- list_for_each_entry(mnt, list, mnt_list)
3632 +- __propagate_umount(mnt);
3633 + return 0;
3634 + }
3635 +diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/xfs_attr.h
3636 +index dd4824589470..234331227c0c 100644
3637 +--- a/fs/xfs/xfs_attr.h
3638 ++++ b/fs/xfs/xfs_attr.h
3639 +@@ -112,6 +112,7 @@ typedef struct attrlist_cursor_kern {
3640 + *========================================================================*/
3641 +
3642 +
3643 ++/* Return 0 on success, or -errno; other state communicated via *context */
3644 + typedef int (*put_listent_func_t)(struct xfs_attr_list_context *, int,
3645 + unsigned char *, int, int, unsigned char *);
3646 +
3647 +diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
3648 +index 8f7e09d0d0f0..36db8b21969f 100644
3649 +--- a/fs/xfs/xfs_attr_list.c
3650 ++++ b/fs/xfs/xfs_attr_list.c
3651 +@@ -108,16 +108,14 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
3652 + (int)sfe->namelen,
3653 + (int)sfe->valuelen,
3654 + &sfe->nameval[sfe->namelen]);
3655 +-
3656 ++ if (error)
3657 ++ return error;
3658 + /*
3659 + * Either search callback finished early or
3660 + * didn't fit it all in the buffer after all.
3661 + */
3662 + if (context->seen_enough)
3663 + break;
3664 +-
3665 +- if (error)
3666 +- return error;
3667 + sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
3668 + }
3669 + trace_xfs_attr_list_sf_all(context);
3670 +@@ -581,7 +579,7 @@ xfs_attr_put_listent(
3671 + trace_xfs_attr_list_full(context);
3672 + alist->al_more = 1;
3673 + context->seen_enough = 1;
3674 +- return 1;
3675 ++ return 0;
3676 + }
3677 +
3678 + aep = (attrlist_ent_t *)&context->alist[context->firstu];
3679 +diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
3680 +index e69a0899bc05..3b7985991823 100644
3681 +--- a/fs/xfs/xfs_ioctl.c
3682 ++++ b/fs/xfs/xfs_ioctl.c
3683 +@@ -402,6 +402,7 @@ xfs_attrlist_by_handle(
3684 + {
3685 + int error = -ENOMEM;
3686 + attrlist_cursor_kern_t *cursor;
3687 ++ struct xfs_fsop_attrlist_handlereq __user *p = arg;
3688 + xfs_fsop_attrlist_handlereq_t al_hreq;
3689 + struct dentry *dentry;
3690 + char *kbuf;
3691 +@@ -434,6 +435,11 @@ xfs_attrlist_by_handle(
3692 + if (error)
3693 + goto out_kfree;
3694 +
3695 ++ if (copy_to_user(&p->pos, cursor, sizeof(attrlist_cursor_kern_t))) {
3696 ++ error = -EFAULT;
3697 ++ goto out_kfree;
3698 ++ }
3699 ++
3700 + if (copy_to_user(al_hreq.buffer, kbuf, al_hreq.buflen))
3701 + error = -EFAULT;
3702 +
3703 +diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c
3704 +index c036815183cb..ead53c933de6 100644
3705 +--- a/fs/xfs/xfs_xattr.c
3706 ++++ b/fs/xfs/xfs_xattr.c
3707 +@@ -151,7 +151,8 @@ xfs_xattr_put_listent(
3708 + arraytop = context->count + prefix_len + namelen + 1;
3709 + if (arraytop > context->firstu) {
3710 + context->count = -1; /* insufficient space */
3711 +- return 1;
3712 ++ context->seen_enough = 1;
3713 ++ return 0;
3714 + }
3715 + offset = (char *)context->alist + context->count;
3716 + strncpy(offset, xfs_xattr_prefix(flags), prefix_len);
3717 +@@ -193,12 +194,15 @@ list_one_attr(const char *name, const size_t len, void *data,
3718 + }
3719 +
3720 + ssize_t
3721 +-xfs_vn_listxattr(struct dentry *dentry, char *data, size_t size)
3722 ++xfs_vn_listxattr(
3723 ++ struct dentry *dentry,
3724 ++ char *data,
3725 ++ size_t size)
3726 + {
3727 + struct xfs_attr_list_context context;
3728 + struct attrlist_cursor_kern cursor = { 0 };
3729 +- struct inode *inode = d_inode(dentry);
3730 +- int error;
3731 ++ struct inode *inode = d_inode(dentry);
3732 ++ int error;
3733 +
3734 + /*
3735 + * First read the regular on-disk attributes.
3736 +@@ -216,7 +220,9 @@ xfs_vn_listxattr(struct dentry *dentry, char *data, size_t size)
3737 + else
3738 + context.put_listent = xfs_xattr_put_listent_sizes;
3739 +
3740 +- xfs_attr_list_int(&context);
3741 ++ error = xfs_attr_list_int(&context);
3742 ++ if (error)
3743 ++ return error;
3744 + if (context.count < 0)
3745 + return -ERANGE;
3746 +
3747 +diff --git a/include/linux/coredump.h b/include/linux/coredump.h
3748 +index d016a121a8c4..28ffa94aed6b 100644
3749 +--- a/include/linux/coredump.h
3750 ++++ b/include/linux/coredump.h
3751 +@@ -14,6 +14,7 @@ struct coredump_params;
3752 + extern int dump_skip(struct coredump_params *cprm, size_t nr);
3753 + extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
3754 + extern int dump_align(struct coredump_params *cprm, int align);
3755 ++extern void dump_truncate(struct coredump_params *cprm);
3756 + #ifdef CONFIG_COREDUMP
3757 + extern void do_coredump(const siginfo_t *siginfo);
3758 + #else
3759 +diff --git a/include/linux/device.h b/include/linux/device.h
3760 +index 6558af90c8fe..98a1d9748eec 100644
3761 +--- a/include/linux/device.h
3762 ++++ b/include/linux/device.h
3763 +@@ -338,6 +338,7 @@ int subsys_virtual_register(struct bus_type *subsys,
3764 + * @suspend: Used to put the device to sleep mode, usually to a low power
3765 + * state.
3766 + * @resume: Used to bring the device from the sleep mode.
3767 ++ * @shutdown: Called at shut-down time to quiesce the device.
3768 + * @ns_type: Callbacks so sysfs can detemine namespaces.
3769 + * @namespace: Namespace of the device belongs to this class.
3770 + * @pm: The default device power management operations of this class.
3771 +@@ -366,6 +367,7 @@ struct class {
3772 +
3773 + int (*suspend)(struct device *dev, pm_message_t state);
3774 + int (*resume)(struct device *dev);
3775 ++ int (*shutdown)(struct device *dev);
3776 +
3777 + const struct kobj_ns_type_operations *ns_type;
3778 + const void *(*namespace)(struct device *dev);
3779 +diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h
3780 +index 76ce329e656d..1b48d9c9a561 100644
3781 +--- a/include/linux/fcntl.h
3782 ++++ b/include/linux/fcntl.h
3783 +@@ -3,6 +3,12 @@
3784 +
3785 + #include <uapi/linux/fcntl.h>
3786 +
3787 ++/* list of all valid flags for the open/openat flags argument: */
3788 ++#define VALID_OPEN_FLAGS \
3789 ++ (O_RDONLY | O_WRONLY | O_RDWR | O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC | \
3790 ++ O_APPEND | O_NDELAY | O_NONBLOCK | O_NDELAY | __O_SYNC | O_DSYNC | \
3791 ++ FASYNC | O_DIRECT | O_LARGEFILE | O_DIRECTORY | O_NOFOLLOW | \
3792 ++ O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE)
3793 +
3794 + #ifndef force_o_largefile
3795 + #define force_o_largefile() (BITS_PER_LONG != 32)
3796 +diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
3797 +index 2a6b9947aaa3..743b34f56f2b 100644
3798 +--- a/include/linux/list_lru.h
3799 ++++ b/include/linux/list_lru.h
3800 +@@ -44,6 +44,7 @@ struct list_lru_node {
3801 + /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
3802 + struct list_lru_memcg *memcg_lrus;
3803 + #endif
3804 ++ long nr_items;
3805 + } ____cacheline_aligned_in_smp;
3806 +
3807 + struct list_lru {
3808 +diff --git a/include/linux/random.h b/include/linux/random.h
3809 +index b05856e16b75..0fe49a14daa5 100644
3810 +--- a/include/linux/random.h
3811 ++++ b/include/linux/random.h
3812 +@@ -23,6 +23,7 @@ extern const struct file_operations random_fops, urandom_fops;
3813 + #endif
3814 +
3815 + unsigned int get_random_int(void);
3816 ++unsigned long get_random_long(void);
3817 + unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
3818 +
3819 + u32 prandom_u32(void);
3820 +diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
3821 +index fb86963859c7..866cb3c596f9 100644
3822 +--- a/include/linux/timekeeper_internal.h
3823 ++++ b/include/linux/timekeeper_internal.h
3824 +@@ -29,7 +29,6 @@
3825 + */
3826 + struct tk_read_base {
3827 + struct clocksource *clock;
3828 +- cycle_t (*read)(struct clocksource *cs);
3829 + cycle_t mask;
3830 + cycle_t cycle_last;
3831 + u32 mult;
3832 +diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
3833 +index c00c7393ce8c..e70cea22f093 100644
3834 +--- a/include/linux/usb/hcd.h
3835 ++++ b/include/linux/usb/hcd.h
3836 +@@ -548,9 +548,9 @@ extern void usb_ep0_reinit(struct usb_device *);
3837 + ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
3838 +
3839 + #define EndpointRequest \
3840 +- ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
3841 ++ ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8)
3842 + #define EndpointOutRequest \
3843 +- ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
3844 ++ ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8)
3845 +
3846 + /* class requests from the USB 2.0 hub spec, table 11-15 */
3847 + /* GetBusState and SetHubDescriptor are optional, omitted */
3848 +diff --git a/include/net/xfrm.h b/include/net/xfrm.h
3849 +index 36ac102c97c7..3dac7ac61f48 100644
3850 +--- a/include/net/xfrm.h
3851 ++++ b/include/net/xfrm.h
3852 +@@ -945,10 +945,6 @@ struct xfrm_dst {
3853 + struct flow_cache_object flo;
3854 + struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3855 + int num_pols, num_xfrms;
3856 +-#ifdef CONFIG_XFRM_SUB_POLICY
3857 +- struct flowi *origin;
3858 +- struct xfrm_selector *partner;
3859 +-#endif
3860 + u32 xfrm_genid;
3861 + u32 policy_genid;
3862 + u32 route_mtu_cached;
3863 +@@ -964,12 +960,6 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
3864 + dst_release(xdst->route);
3865 + if (likely(xdst->u.dst.xfrm))
3866 + xfrm_state_put(xdst->u.dst.xfrm);
3867 +-#ifdef CONFIG_XFRM_SUB_POLICY
3868 +- kfree(xdst->origin);
3869 +- xdst->origin = NULL;
3870 +- kfree(xdst->partner);
3871 +- xdst->partner = NULL;
3872 +-#endif
3873 + }
3874 + #endif
3875 +
3876 +diff --git a/ipc/mqueue.c b/ipc/mqueue.c
3877 +index c3fc5c2b63f3..e4e8b6080b33 100644
3878 +--- a/ipc/mqueue.c
3879 ++++ b/ipc/mqueue.c
3880 +@@ -1239,8 +1239,10 @@ retry:
3881 +
3882 + timeo = MAX_SCHEDULE_TIMEOUT;
3883 + ret = netlink_attachskb(sock, nc, &timeo, NULL);
3884 +- if (ret == 1)
3885 ++ if (ret == 1) {
3886 ++ sock = NULL;
3887 + goto retry;
3888 ++ }
3889 + if (ret) {
3890 + sock = NULL;
3891 + nc = NULL;
3892 +diff --git a/kernel/extable.c b/kernel/extable.c
3893 +index c98f926277a8..818019777503 100644
3894 +--- a/kernel/extable.c
3895 ++++ b/kernel/extable.c
3896 +@@ -67,7 +67,7 @@ static inline int init_kernel_text(unsigned long addr)
3897 + return 0;
3898 + }
3899 +
3900 +-int core_kernel_text(unsigned long addr)
3901 ++int notrace core_kernel_text(unsigned long addr)
3902 + {
3903 + if (addr >= (unsigned long)_stext &&
3904 + addr < (unsigned long)_etext)
3905 +diff --git a/kernel/fork.c b/kernel/fork.c
3906 +index 8209fa2d36ef..edc1916e89ee 100644
3907 +--- a/kernel/fork.c
3908 ++++ b/kernel/fork.c
3909 +@@ -361,7 +361,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
3910 + set_task_stack_end_magic(tsk);
3911 +
3912 + #ifdef CONFIG_CC_STACKPROTECTOR
3913 +- tsk->stack_canary = get_random_int();
3914 ++ tsk->stack_canary = get_random_long();
3915 + #endif
3916 +
3917 + /*
3918 +diff --git a/kernel/panic.c b/kernel/panic.c
3919 +index a4f7820f5930..10e28b8d1ac9 100644
3920 +--- a/kernel/panic.c
3921 ++++ b/kernel/panic.c
3922 +@@ -166,7 +166,7 @@ void panic(const char *fmt, ...)
3923 + * Delay timeout seconds before rebooting the machine.
3924 + * We can't use the "normal" timers since we just panicked.
3925 + */
3926 +- pr_emerg("Rebooting in %d seconds..", panic_timeout);
3927 ++ pr_emerg("Rebooting in %d seconds..\n", panic_timeout);
3928 +
3929 + for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
3930 + touch_nmi_watchdog();
3931 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
3932 +index 6cb5f00696f5..976d5fbcd60d 100644
3933 +--- a/kernel/sched/core.c
3934 ++++ b/kernel/sched/core.c
3935 +@@ -5863,6 +5863,9 @@ enum s_alloc {
3936 + * Build an iteration mask that can exclude certain CPUs from the upwards
3937 + * domain traversal.
3938 + *
3939 ++ * Only CPUs that can arrive at this group should be considered to continue
3940 ++ * balancing.
3941 ++ *
3942 + * Asymmetric node setups can result in situations where the domain tree is of
3943 + * unequal depth, make sure to skip domains that already cover the entire
3944 + * range.
3945 +@@ -5874,18 +5877,31 @@ enum s_alloc {
3946 + */
3947 + static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
3948 + {
3949 +- const struct cpumask *span = sched_domain_span(sd);
3950 ++ const struct cpumask *sg_span = sched_group_cpus(sg);
3951 + struct sd_data *sdd = sd->private;
3952 + struct sched_domain *sibling;
3953 + int i;
3954 +
3955 +- for_each_cpu(i, span) {
3956 ++ for_each_cpu(i, sg_span) {
3957 + sibling = *per_cpu_ptr(sdd->sd, i);
3958 +- if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
3959 ++
3960 ++ /*
3961 ++ * Can happen in the asymmetric case, where these siblings are
3962 ++ * unused. The mask will not be empty because those CPUs that
3963 ++ * do have the top domain _should_ span the domain.
3964 ++ */
3965 ++ if (!sibling->child)
3966 ++ continue;
3967 ++
3968 ++ /* If we would not end up here, we can't continue from here */
3969 ++ if (!cpumask_equal(sg_span, sched_domain_span(sibling->child)))
3970 + continue;
3971 +
3972 + cpumask_set_cpu(i, sched_group_mask(sg));
3973 + }
3974 ++
3975 ++ /* We must not have empty masks here */
3976 ++ WARN_ON_ONCE(cpumask_empty(sched_group_mask(sg)));
3977 + }
3978 +
3979 + /*
3980 +diff --git a/kernel/sysctl.c b/kernel/sysctl.c
3981 +index 1431089b8a67..d59551865035 100644
3982 +--- a/kernel/sysctl.c
3983 ++++ b/kernel/sysctl.c
3984 +@@ -173,7 +173,7 @@ extern int no_unaligned_warning;
3985 + #define SYSCTL_WRITES_WARN 0
3986 + #define SYSCTL_WRITES_STRICT 1
3987 +
3988 +-static int sysctl_writes_strict = SYSCTL_WRITES_WARN;
3989 ++static int sysctl_writes_strict = SYSCTL_WRITES_STRICT;
3990 +
3991 + static int proc_do_cad_pid(struct ctl_table *table, int write,
3992 + void __user *buffer, size_t *lenp, loff_t *ppos);
3993 +diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
3994 +index d296b904685b..308f8f019594 100644
3995 +--- a/kernel/time/timekeeping.c
3996 ++++ b/kernel/time/timekeeping.c
3997 +@@ -116,6 +116,26 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
3998 + tk->offs_boot = ktime_add(tk->offs_boot, delta);
3999 + }
4000 +
4001 ++/*
4002 ++ * tk_clock_read - atomic clocksource read() helper
4003 ++ *
4004 ++ * This helper is necessary to use in the read paths because, while the
4005 ++ * seqlock ensures we don't return a bad value while structures are updated,
4006 ++ * it doesn't protect from potential crashes. There is the possibility that
4007 ++ * the tkr's clocksource may change between the read reference, and the
4008 ++ * clock reference passed to the read function. This can cause crashes if
4009 ++ * the wrong clocksource is passed to the wrong read function.
4010 ++ * This isn't necessary to use when holding the timekeeper_lock or doing
4011 ++ * a read of the fast-timekeeper tkrs (which is protected by its own locking
4012 ++ * and update logic).
4013 ++ */
4014 ++static inline u64 tk_clock_read(struct tk_read_base *tkr)
4015 ++{
4016 ++ struct clocksource *clock = READ_ONCE(tkr->clock);
4017 ++
4018 ++ return clock->read(clock);
4019 ++}
4020 ++
4021 + #ifdef CONFIG_DEBUG_TIMEKEEPING
4022 + #define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
4023 + /*
4024 +@@ -184,7 +204,7 @@ static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
4025 + */
4026 + do {
4027 + seq = read_seqcount_begin(&tk_core.seq);
4028 +- now = tkr->read(tkr->clock);
4029 ++ now = tk_clock_read(tkr);
4030 + last = tkr->cycle_last;
4031 + mask = tkr->mask;
4032 + max = tkr->clock->max_cycles;
4033 +@@ -218,7 +238,7 @@ static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
4034 + cycle_t cycle_now, delta;
4035 +
4036 + /* read clocksource */
4037 +- cycle_now = tkr->read(tkr->clock);
4038 ++ cycle_now = tk_clock_read(tkr);
4039 +
4040 + /* calculate the delta since the last update_wall_time */
4041 + delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
4042 +@@ -246,12 +266,10 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
4043 +
4044 + old_clock = tk->tkr_mono.clock;
4045 + tk->tkr_mono.clock = clock;
4046 +- tk->tkr_mono.read = clock->read;
4047 + tk->tkr_mono.mask = clock->mask;
4048 +- tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock);
4049 ++ tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
4050 +
4051 + tk->tkr_raw.clock = clock;
4052 +- tk->tkr_raw.read = clock->read;
4053 + tk->tkr_raw.mask = clock->mask;
4054 + tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
4055 +
4056 +@@ -440,7 +458,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
4057 +
4058 + now += timekeeping_delta_to_ns(tkr,
4059 + clocksource_delta(
4060 +- tkr->read(tkr->clock),
4061 ++ tk_clock_read(tkr),
4062 + tkr->cycle_last,
4063 + tkr->mask));
4064 + } while (read_seqcount_retry(&tkf->seq, seq));
4065 +@@ -468,6 +486,10 @@ static cycle_t dummy_clock_read(struct clocksource *cs)
4066 + return cycles_at_suspend;
4067 + }
4068 +
4069 ++static struct clocksource dummy_clock = {
4070 ++ .read = dummy_clock_read,
4071 ++};
4072 ++
4073 + /**
4074 + * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
4075 + * @tk: Timekeeper to snapshot.
4076 +@@ -484,13 +506,13 @@ static void halt_fast_timekeeper(struct timekeeper *tk)
4077 + struct tk_read_base *tkr = &tk->tkr_mono;
4078 +
4079 + memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
4080 +- cycles_at_suspend = tkr->read(tkr->clock);
4081 +- tkr_dummy.read = dummy_clock_read;
4082 ++ cycles_at_suspend = tk_clock_read(tkr);
4083 ++ tkr_dummy.clock = &dummy_clock;
4084 + update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
4085 +
4086 + tkr = &tk->tkr_raw;
4087 + memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
4088 +- tkr_dummy.read = dummy_clock_read;
4089 ++ tkr_dummy.clock = &dummy_clock;
4090 + update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
4091 + }
4092 +
4093 +@@ -635,11 +657,10 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
4094 + */
4095 + static void timekeeping_forward_now(struct timekeeper *tk)
4096 + {
4097 +- struct clocksource *clock = tk->tkr_mono.clock;
4098 + cycle_t cycle_now, delta;
4099 + s64 nsec;
4100 +
4101 +- cycle_now = tk->tkr_mono.read(clock);
4102 ++ cycle_now = tk_clock_read(&tk->tkr_mono);
4103 + delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
4104 + tk->tkr_mono.cycle_last = cycle_now;
4105 + tk->tkr_raw.cycle_last = cycle_now;
4106 +@@ -1406,7 +1427,7 @@ void timekeeping_resume(void)
4107 + * The less preferred source will only be tried if there is no better
4108 + * usable source. The rtc part is handled separately in rtc core code.
4109 + */
4110 +- cycle_now = tk->tkr_mono.read(clock);
4111 ++ cycle_now = tk_clock_read(&tk->tkr_mono);
4112 + if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
4113 + cycle_now > tk->tkr_mono.cycle_last) {
4114 + u64 num, max = ULLONG_MAX;
4115 +@@ -1801,7 +1822,7 @@ void update_wall_time(void)
4116 + #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
4117 + offset = real_tk->cycle_interval;
4118 + #else
4119 +- offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock),
4120 ++ offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
4121 + tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
4122 + #endif
4123 +
4124 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
4125 +index a4c0ae70c6dd..591b3b4f5337 100644
4126 +--- a/kernel/trace/trace.c
4127 ++++ b/kernel/trace/trace.c
4128 +@@ -1638,7 +1638,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
4129 + TRACE_FLAG_IRQS_NOSUPPORT |
4130 + #endif
4131 + ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
4132 +- ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
4133 ++ ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
4134 + (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
4135 + (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
4136 + }
4137 +diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
4138 +index 28b291f83a4c..9a4aee1d3345 100644
4139 +--- a/kernel/trace/trace_kprobe.c
4140 ++++ b/kernel/trace/trace_kprobe.c
4141 +@@ -671,30 +671,25 @@ static int create_trace_kprobe(int argc, char **argv)
4142 + pr_info("Probe point is not specified.\n");
4143 + return -EINVAL;
4144 + }
4145 +- if (isdigit(argv[1][0])) {
4146 +- if (is_return) {
4147 +- pr_info("Return probe point must be a symbol.\n");
4148 +- return -EINVAL;
4149 +- }
4150 +- /* an address specified */
4151 +- ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
4152 +- if (ret) {
4153 +- pr_info("Failed to parse address.\n");
4154 +- return ret;
4155 +- }
4156 +- } else {
4157 ++
4158 ++ /* try to parse an address. if that fails, try to read the
4159 ++ * input as a symbol. */
4160 ++ if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
4161 + /* a symbol specified */
4162 + symbol = argv[1];
4163 + /* TODO: support .init module functions */
4164 + ret = traceprobe_split_symbol_offset(symbol, &offset);
4165 + if (ret) {
4166 +- pr_info("Failed to parse symbol.\n");
4167 ++ pr_info("Failed to parse either an address or a symbol.\n");
4168 + return ret;
4169 + }
4170 + if (offset && is_return) {
4171 + pr_info("Return probe must be used without offset.\n");
4172 + return -EINVAL;
4173 + }
4174 ++ } else if (is_return) {
4175 ++ pr_info("Return probe point must be a symbol.\n");
4176 ++ return -EINVAL;
4177 + }
4178 + argc -= 2; argv += 2;
4179 +
4180 +diff --git a/lib/cmdline.c b/lib/cmdline.c
4181 +index 8f13cf73c2ec..79069d7938ea 100644
4182 +--- a/lib/cmdline.c
4183 ++++ b/lib/cmdline.c
4184 +@@ -22,14 +22,14 @@
4185 + * the values[M, M+1, ..., N] into the ints array in get_options.
4186 + */
4187 +
4188 +-static int get_range(char **str, int *pint)
4189 ++static int get_range(char **str, int *pint, int n)
4190 + {
4191 + int x, inc_counter, upper_range;
4192 +
4193 + (*str)++;
4194 + upper_range = simple_strtol((*str), NULL, 0);
4195 + inc_counter = upper_range - *pint;
4196 +- for (x = *pint; x < upper_range; x++)
4197 ++ for (x = *pint; n && x < upper_range; x++, n--)
4198 + *pint++ = x;
4199 + return inc_counter;
4200 + }
4201 +@@ -96,7 +96,7 @@ char *get_options(const char *str, int nints, int *ints)
4202 + break;
4203 + if (res == 3) {
4204 + int range_nums;
4205 +- range_nums = get_range((char **)&str, ints + i);
4206 ++ range_nums = get_range((char **)&str, ints + i, nints - i);
4207 + if (range_nums < 0)
4208 + break;
4209 + /*
4210 +diff --git a/lib/swiotlb.c b/lib/swiotlb.c
4211 +index 3c365ab6cf5f..87a203e439f8 100644
4212 +--- a/lib/swiotlb.c
4213 ++++ b/lib/swiotlb.c
4214 +@@ -452,11 +452,11 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
4215 + : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
4216 +
4217 + /*
4218 +- * For mappings greater than a page, we limit the stride (and
4219 +- * hence alignment) to a page size.
4220 ++ * For mappings greater than or equal to a page, we limit the stride
4221 ++ * (and hence alignment) to a page size.
4222 + */
4223 + nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
4224 +- if (size > PAGE_SIZE)
4225 ++ if (size >= PAGE_SIZE)
4226 + stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
4227 + else
4228 + stride = 1;
4229 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
4230 +index a58270f60602..bdd6a8dd5797 100644
4231 +--- a/mm/huge_memory.c
4232 ++++ b/mm/huge_memory.c
4233 +@@ -1294,8 +1294,11 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
4234 + */
4235 + if (unlikely(pmd_trans_migrating(*pmdp))) {
4236 + page = pmd_page(*pmdp);
4237 ++ if (!get_page_unless_zero(page))
4238 ++ goto out_unlock;
4239 + spin_unlock(ptl);
4240 + wait_on_page_locked(page);
4241 ++ put_page(page);
4242 + goto out;
4243 + }
4244 +
4245 +@@ -1327,8 +1330,11 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
4246 +
4247 + /* Migration could have started since the pmd_trans_migrating check */
4248 + if (!page_locked) {
4249 ++ if (!get_page_unless_zero(page))
4250 ++ goto out_unlock;
4251 + spin_unlock(ptl);
4252 + wait_on_page_locked(page);
4253 ++ put_page(page);
4254 + page_nid = -1;
4255 + goto out;
4256 + }
4257 +diff --git a/mm/list_lru.c b/mm/list_lru.c
4258 +index 84b4c21d78d7..2a6a2e4b64ba 100644
4259 +--- a/mm/list_lru.c
4260 ++++ b/mm/list_lru.c
4261 +@@ -103,6 +103,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item)
4262 + if (list_empty(item)) {
4263 + list_add_tail(item, &l->list);
4264 + l->nr_items++;
4265 ++ nlru->nr_items++;
4266 + spin_unlock(&nlru->lock);
4267 + return true;
4268 + }
4269 +@@ -122,6 +123,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item)
4270 + if (!list_empty(item)) {
4271 + list_del_init(item);
4272 + l->nr_items--;
4273 ++ nlru->nr_items--;
4274 + spin_unlock(&nlru->lock);
4275 + return true;
4276 + }
4277 +@@ -169,15 +171,10 @@ EXPORT_SYMBOL_GPL(list_lru_count_one);
4278 +
4279 + unsigned long list_lru_count_node(struct list_lru *lru, int nid)
4280 + {
4281 +- long count = 0;
4282 +- int memcg_idx;
4283 ++ struct list_lru_node *nlru;
4284 +
4285 +- count += __list_lru_count_one(lru, nid, -1);
4286 +- if (list_lru_memcg_aware(lru)) {
4287 +- for_each_memcg_cache_index(memcg_idx)
4288 +- count += __list_lru_count_one(lru, nid, memcg_idx);
4289 +- }
4290 +- return count;
4291 ++ nlru = &lru->node[nid];
4292 ++ return nlru->nr_items;
4293 + }
4294 + EXPORT_SYMBOL_GPL(list_lru_count_node);
4295 +
4296 +@@ -212,6 +209,7 @@ restart:
4297 + assert_spin_locked(&nlru->lock);
4298 + case LRU_REMOVED:
4299 + isolated++;
4300 ++ nlru->nr_items--;
4301 + /*
4302 + * If the lru lock has been dropped, our list
4303 + * traversal is now invalid and so we have to
4304 +diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
4305 +index 40dd0f9b00d6..09f733b0424a 100644
4306 +--- a/mm/swap_cgroup.c
4307 ++++ b/mm/swap_cgroup.c
4308 +@@ -205,6 +205,8 @@ void swap_cgroup_swapoff(int type)
4309 + struct page *page = map[i];
4310 + if (page)
4311 + __free_page(page);
4312 ++ if (!(i % SWAP_CLUSTER_MAX))
4313 ++ cond_resched();
4314 + }
4315 + vfree(map);
4316 + }
4317 +diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
4318 +index 59555f0f8fc8..d45e590e8f10 100644
4319 +--- a/net/8021q/vlan.c
4320 ++++ b/net/8021q/vlan.c
4321 +@@ -278,7 +278,8 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
4322 + return 0;
4323 +
4324 + out_free_newdev:
4325 +- free_netdev(new_dev);
4326 ++ if (new_dev->reg_state == NETREG_UNINITIALIZED)
4327 ++ free_netdev(new_dev);
4328 + return err;
4329 + }
4330 +
4331 +diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
4332 +index f6c3b2137eea..bcb62e10a99c 100644
4333 +--- a/net/caif/cfpkt_skbuff.c
4334 ++++ b/net/caif/cfpkt_skbuff.c
4335 +@@ -81,11 +81,7 @@ static struct cfpkt *cfpkt_create_pfx(u16 len, u16 pfx)
4336 + {
4337 + struct sk_buff *skb;
4338 +
4339 +- if (likely(in_interrupt()))
4340 +- skb = alloc_skb(len + pfx, GFP_ATOMIC);
4341 +- else
4342 +- skb = alloc_skb(len + pfx, GFP_KERNEL);
4343 +-
4344 ++ skb = alloc_skb(len + pfx, GFP_ATOMIC);
4345 + if (unlikely(skb == NULL))
4346 + return NULL;
4347 +
4348 +diff --git a/net/core/dev.c b/net/core/dev.c
4349 +index 0f9289ff0f2a..bd47736b689e 100644
4350 +--- a/net/core/dev.c
4351 ++++ b/net/core/dev.c
4352 +@@ -1214,8 +1214,9 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
4353 + if (!new_ifalias)
4354 + return -ENOMEM;
4355 + dev->ifalias = new_ifalias;
4356 ++ memcpy(dev->ifalias, alias, len);
4357 ++ dev->ifalias[len] = 0;
4358 +
4359 +- strlcpy(dev->ifalias, alias, len+1);
4360 + return len;
4361 + }
4362 +
4363 +@@ -6726,8 +6727,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
4364 + } else {
4365 + netdev_stats_to_stats64(storage, &dev->stats);
4366 + }
4367 +- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
4368 +- storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
4369 ++ storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
4370 ++ storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
4371 + return storage;
4372 + }
4373 + EXPORT_SYMBOL(dev_get_stats);
4374 +diff --git a/net/core/dst.c b/net/core/dst.c
4375 +index 540066cb33ef..4d385b292f5d 100644
4376 +--- a/net/core/dst.c
4377 ++++ b/net/core/dst.c
4378 +@@ -373,6 +373,20 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event,
4379 + spin_lock_bh(&dst_garbage.lock);
4380 + dst = dst_garbage.list;
4381 + dst_garbage.list = NULL;
4382 ++ /* The code in dst_ifdown places a hold on the loopback device.
4383 ++ * If the gc entry processing is set to expire after a lengthy
4384 ++ * interval, this hold can cause netdev_wait_allrefs() to hang
4385 ++ * out and wait for a long time -- until the the loopback
4386 ++ * interface is released. If we're really unlucky, it'll emit
4387 ++ * pr_emerg messages to console too. Reset the interval here,
4388 ++ * so dst cleanups occur in a more timely fashion.
4389 ++ */
4390 ++ if (dst_garbage.timer_inc > DST_GC_INC) {
4391 ++ dst_garbage.timer_inc = DST_GC_INC;
4392 ++ dst_garbage.timer_expires = DST_GC_MIN;
4393 ++ mod_delayed_work(system_wq, &dst_gc_work,
4394 ++ dst_garbage.timer_expires);
4395 ++ }
4396 + spin_unlock_bh(&dst_garbage.lock);
4397 +
4398 + if (last)
4399 +diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
4400 +index 76d3bf70c31a..53b9099c331f 100644
4401 +--- a/net/decnet/dn_route.c
4402 ++++ b/net/decnet/dn_route.c
4403 +@@ -188,12 +188,6 @@ static inline void dnrt_free(struct dn_route *rt)
4404 + call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
4405 + }
4406 +
4407 +-static inline void dnrt_drop(struct dn_route *rt)
4408 +-{
4409 +- dst_release(&rt->dst);
4410 +- call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
4411 +-}
4412 +-
4413 + static void dn_dst_check_expire(unsigned long dummy)
4414 + {
4415 + int i;
4416 +@@ -248,7 +242,7 @@ static int dn_dst_gc(struct dst_ops *ops)
4417 + }
4418 + *rtp = rt->dst.dn_next;
4419 + rt->dst.dn_next = NULL;
4420 +- dnrt_drop(rt);
4421 ++ dnrt_free(rt);
4422 + break;
4423 + }
4424 + spin_unlock_bh(&dn_rt_hash_table[i].lock);
4425 +@@ -350,7 +344,7 @@ static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_rou
4426 + dst_use(&rth->dst, now);
4427 + spin_unlock_bh(&dn_rt_hash_table[hash].lock);
4428 +
4429 +- dnrt_drop(rt);
4430 ++ dst_free(&rt->dst);
4431 + *rp = rth;
4432 + return 0;
4433 + }
4434 +@@ -380,7 +374,7 @@ static void dn_run_flush(unsigned long dummy)
4435 + for(; rt; rt = next) {
4436 + next = rcu_dereference_raw(rt->dst.dn_next);
4437 + RCU_INIT_POINTER(rt->dst.dn_next, NULL);
4438 +- dst_free((struct dst_entry *)rt);
4439 ++ dnrt_free(rt);
4440 + }
4441 +
4442 + nothing_to_declare:
4443 +@@ -1189,7 +1183,7 @@ make_route:
4444 + if (dev_out->flags & IFF_LOOPBACK)
4445 + flags |= RTCF_LOCAL;
4446 +
4447 +- rt = dst_alloc(&dn_dst_ops, dev_out, 1, DST_OBSOLETE_NONE, DST_HOST);
4448 ++ rt = dst_alloc(&dn_dst_ops, dev_out, 0, DST_OBSOLETE_NONE, DST_HOST);
4449 + if (rt == NULL)
4450 + goto e_nobufs;
4451 +
4452 +diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
4453 +index af34fc9bdf69..2fe45762ca70 100644
4454 +--- a/net/decnet/netfilter/dn_rtmsg.c
4455 ++++ b/net/decnet/netfilter/dn_rtmsg.c
4456 +@@ -102,7 +102,9 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb)
4457 + {
4458 + struct nlmsghdr *nlh = nlmsg_hdr(skb);
4459 +
4460 +- if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len)
4461 ++ if (skb->len < sizeof(*nlh) ||
4462 ++ nlh->nlmsg_len < sizeof(*nlh) ||
4463 ++ skb->len < nlh->nlmsg_len)
4464 + return;
4465 +
4466 + if (!netlink_capable(skb, CAP_NET_ADMIN))
4467 +diff --git a/net/dsa/slave.c b/net/dsa/slave.c
4468 +index 57978c5b2c91..fe2758c72dbf 100644
4469 +--- a/net/dsa/slave.c
4470 ++++ b/net/dsa/slave.c
4471 +@@ -734,10 +734,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p,
4472 + /* Use already configured phy mode */
4473 + if (p->phy_interface == PHY_INTERFACE_MODE_NA)
4474 + p->phy_interface = p->phy->interface;
4475 +- phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
4476 +- p->phy_interface);
4477 +-
4478 +- return 0;
4479 ++ return phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
4480 ++ p->phy_interface);
4481 + }
4482 +
4483 + static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
4484 +diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
4485 +index 218abf9fb1ed..e2d3d62297ec 100644
4486 +--- a/net/ipv4/igmp.c
4487 ++++ b/net/ipv4/igmp.c
4488 +@@ -1080,6 +1080,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
4489 + pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
4490 + if (!pmc)
4491 + return;
4492 ++ spin_lock_init(&pmc->lock);
4493 + spin_lock_bh(&im->lock);
4494 + pmc->interface = im->interface;
4495 + in_dev_hold(in_dev);
4496 +@@ -1832,21 +1833,26 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
4497 +
4498 + static void ip_mc_clear_src(struct ip_mc_list *pmc)
4499 + {
4500 +- struct ip_sf_list *psf, *nextpsf;
4501 ++ struct ip_sf_list *psf, *nextpsf, *tomb, *sources;
4502 +
4503 +- for (psf = pmc->tomb; psf; psf = nextpsf) {
4504 ++ spin_lock_bh(&pmc->lock);
4505 ++ tomb = pmc->tomb;
4506 ++ pmc->tomb = NULL;
4507 ++ sources = pmc->sources;
4508 ++ pmc->sources = NULL;
4509 ++ pmc->sfmode = MCAST_EXCLUDE;
4510 ++ pmc->sfcount[MCAST_INCLUDE] = 0;
4511 ++ pmc->sfcount[MCAST_EXCLUDE] = 1;
4512 ++ spin_unlock_bh(&pmc->lock);
4513 ++
4514 ++ for (psf = tomb; psf; psf = nextpsf) {
4515 + nextpsf = psf->sf_next;
4516 + kfree(psf);
4517 + }
4518 +- pmc->tomb = NULL;
4519 +- for (psf = pmc->sources; psf; psf = nextpsf) {
4520 ++ for (psf = sources; psf; psf = nextpsf) {
4521 + nextpsf = psf->sf_next;
4522 + kfree(psf);
4523 + }
4524 +- pmc->sources = NULL;
4525 +- pmc->sfmode = MCAST_EXCLUDE;
4526 +- pmc->sfcount[MCAST_INCLUDE] = 0;
4527 +- pmc->sfcount[MCAST_EXCLUDE] = 1;
4528 + }
4529 +
4530 + /* Join a multicast group
4531 +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
4532 +index e31af0c23e56..df4edab0ba3a 100644
4533 +--- a/net/ipv6/addrconf.c
4534 ++++ b/net/ipv6/addrconf.c
4535 +@@ -286,9 +286,9 @@ static void addrconf_mod_rs_timer(struct inet6_dev *idev,
4536 + static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
4537 + unsigned long delay)
4538 + {
4539 +- if (!delayed_work_pending(&ifp->dad_work))
4540 +- in6_ifa_hold(ifp);
4541 +- mod_delayed_work(addrconf_wq, &ifp->dad_work, delay);
4542 ++ in6_ifa_hold(ifp);
4543 ++ if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay))
4544 ++ in6_ifa_put(ifp);
4545 + }
4546 +
4547 + static int snmp6_alloc_dev(struct inet6_dev *idev)
4548 +@@ -1675,17 +1675,7 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add
4549 +
4550 + static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
4551 + {
4552 +- if (ifp->flags&IFA_F_PERMANENT) {
4553 +- spin_lock_bh(&ifp->lock);
4554 +- addrconf_del_dad_work(ifp);
4555 +- ifp->flags |= IFA_F_TENTATIVE;
4556 +- if (dad_failed)
4557 +- ifp->flags |= IFA_F_DADFAILED;
4558 +- spin_unlock_bh(&ifp->lock);
4559 +- if (dad_failed)
4560 +- ipv6_ifa_notify(0, ifp);
4561 +- in6_ifa_put(ifp);
4562 +- } else if (ifp->flags&IFA_F_TEMPORARY) {
4563 ++ if (ifp->flags&IFA_F_TEMPORARY) {
4564 + struct inet6_ifaddr *ifpub;
4565 + spin_lock_bh(&ifp->lock);
4566 + ifpub = ifp->ifpub;
4567 +@@ -1698,6 +1688,16 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
4568 + spin_unlock_bh(&ifp->lock);
4569 + }
4570 + ipv6_del_addr(ifp);
4571 ++ } else if (ifp->flags&IFA_F_PERMANENT || !dad_failed) {
4572 ++ spin_lock_bh(&ifp->lock);
4573 ++ addrconf_del_dad_work(ifp);
4574 ++ ifp->flags |= IFA_F_TENTATIVE;
4575 ++ if (dad_failed)
4576 ++ ifp->flags |= IFA_F_DADFAILED;
4577 ++ spin_unlock_bh(&ifp->lock);
4578 ++ if (dad_failed)
4579 ++ ipv6_ifa_notify(0, ifp);
4580 ++ in6_ifa_put(ifp);
4581 + } else {
4582 + ipv6_del_addr(ifp);
4583 + }
4584 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
4585 +index f91ee783a5fd..eefb8759cfa4 100644
4586 +--- a/net/ipv6/ip6_output.c
4587 ++++ b/net/ipv6/ip6_output.c
4588 +@@ -978,8 +978,10 @@ static int ip6_dst_lookup_tail(struct sock *sk,
4589 + }
4590 + #endif
4591 + if (ipv6_addr_v4mapped(&fl6->saddr) &&
4592 +- !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr)))
4593 +- return -EAFNOSUPPORT;
4594 ++ !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) {
4595 ++ err = -EAFNOSUPPORT;
4596 ++ goto out_err_release;
4597 ++ }
4598 +
4599 + return 0;
4600 +
4601 +diff --git a/net/key/af_key.c b/net/key/af_key.c
4602 +index f0d52d721b3a..9a556e434f59 100644
4603 +--- a/net/key/af_key.c
4604 ++++ b/net/key/af_key.c
4605 +@@ -1135,6 +1135,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
4606 + goto out;
4607 + }
4608 +
4609 ++ err = -ENOBUFS;
4610 + key = ext_hdrs[SADB_EXT_KEY_AUTH - 1];
4611 + if (sa->sadb_sa_auth) {
4612 + int keysize = 0;
4613 +@@ -1146,8 +1147,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
4614 + if (key)
4615 + keysize = (key->sadb_key_bits + 7) / 8;
4616 + x->aalg = kmalloc(sizeof(*x->aalg) + keysize, GFP_KERNEL);
4617 +- if (!x->aalg)
4618 ++ if (!x->aalg) {
4619 ++ err = -ENOMEM;
4620 + goto out;
4621 ++ }
4622 + strcpy(x->aalg->alg_name, a->name);
4623 + x->aalg->alg_key_len = 0;
4624 + if (key) {
4625 +@@ -1166,8 +1169,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
4626 + goto out;
4627 + }
4628 + x->calg = kmalloc(sizeof(*x->calg), GFP_KERNEL);
4629 +- if (!x->calg)
4630 ++ if (!x->calg) {
4631 ++ err = -ENOMEM;
4632 + goto out;
4633 ++ }
4634 + strcpy(x->calg->alg_name, a->name);
4635 + x->props.calgo = sa->sadb_sa_encrypt;
4636 + } else {
4637 +@@ -1181,8 +1186,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
4638 + if (key)
4639 + keysize = (key->sadb_key_bits + 7) / 8;
4640 + x->ealg = kmalloc(sizeof(*x->ealg) + keysize, GFP_KERNEL);
4641 +- if (!x->ealg)
4642 ++ if (!x->ealg) {
4643 ++ err = -ENOMEM;
4644 + goto out;
4645 ++ }
4646 + strcpy(x->ealg->alg_name, a->name);
4647 + x->ealg->alg_key_len = 0;
4648 + if (key) {
4649 +@@ -1226,8 +1233,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
4650 + struct xfrm_encap_tmpl *natt;
4651 +
4652 + x->encap = kmalloc(sizeof(*x->encap), GFP_KERNEL);
4653 +- if (!x->encap)
4654 ++ if (!x->encap) {
4655 ++ err = -ENOMEM;
4656 + goto out;
4657 ++ }
4658 +
4659 + natt = x->encap;
4660 + n_type = ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1];
4661 +diff --git a/net/mac80211/main.c b/net/mac80211/main.c
4662 +index e86daed83c6f..0ddf23971b50 100644
4663 +--- a/net/mac80211/main.c
4664 ++++ b/net/mac80211/main.c
4665 +@@ -887,12 +887,17 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
4666 + supp_ht = supp_ht || sband->ht_cap.ht_supported;
4667 + supp_vht = supp_vht || sband->vht_cap.vht_supported;
4668 +
4669 +- if (sband->ht_cap.ht_supported)
4670 +- local->rx_chains =
4671 +- max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs),
4672 +- local->rx_chains);
4673 ++ if (!sband->ht_cap.ht_supported)
4674 ++ continue;
4675 +
4676 + /* TODO: consider VHT for RX chains, hopefully it's the same */
4677 ++ local->rx_chains =
4678 ++ max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs),
4679 ++ local->rx_chains);
4680 ++
4681 ++ /* no need to mask, SM_PS_DISABLED has all bits set */
4682 ++ sband->ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED <<
4683 ++ IEEE80211_HT_CAP_SM_PS_SHIFT;
4684 + }
4685 +
4686 + /* if low-level driver supports AP, we also support VLAN */
4687 +diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
4688 +index a26bd6532829..a837e405a8ab 100644
4689 +--- a/net/netfilter/ipvs/ip_vs_core.c
4690 ++++ b/net/netfilter/ipvs/ip_vs_core.c
4691 +@@ -822,10 +822,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
4692 + {
4693 + unsigned int verdict = NF_DROP;
4694 +
4695 +- if (IP_VS_FWD_METHOD(cp) != 0) {
4696 +- pr_err("shouldn't reach here, because the box is on the "
4697 +- "half connection in the tun/dr module.\n");
4698 +- }
4699 ++ if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
4700 ++ goto ignore_cp;
4701 +
4702 + /* Ensure the checksum is correct */
4703 + if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
4704 +@@ -859,6 +857,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
4705 + ip_vs_notrack(skb);
4706 + else
4707 + ip_vs_update_conntrack(skb, cp, 0);
4708 ++
4709 ++ignore_cp:
4710 + verdict = NF_ACCEPT;
4711 +
4712 + out:
4713 +@@ -1229,8 +1229,11 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
4714 + */
4715 + cp = pp->conn_out_get(af, skb, &iph, 0);
4716 +
4717 +- if (likely(cp))
4718 ++ if (likely(cp)) {
4719 ++ if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
4720 ++ goto ignore_cp;
4721 + return handle_response(af, skb, pd, cp, &iph, hooknum);
4722 ++ }
4723 + if (sysctl_nat_icmp_send(net) &&
4724 + (pp->protocol == IPPROTO_TCP ||
4725 + pp->protocol == IPPROTO_UDP ||
4726 +@@ -1272,9 +1275,15 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
4727 + }
4728 + }
4729 + }
4730 ++
4731 ++out:
4732 + IP_VS_DBG_PKT(12, af, pp, skb, 0,
4733 + "ip_vs_out: packet continues traversal as normal");
4734 + return NF_ACCEPT;
4735 ++
4736 ++ignore_cp:
4737 ++ __ip_vs_conn_put(cp);
4738 ++ goto out;
4739 + }
4740 +
4741 + /*
4742 +diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
4743 +index 6b8b0abbfab4..b6e939a8b099 100644
4744 +--- a/net/netfilter/nf_conntrack_netlink.c
4745 ++++ b/net/netfilter/nf_conntrack_netlink.c
4746 +@@ -45,6 +45,8 @@
4747 + #include <net/netfilter/nf_conntrack_zones.h>
4748 + #include <net/netfilter/nf_conntrack_timestamp.h>
4749 + #include <net/netfilter/nf_conntrack_labels.h>
4750 ++#include <net/netfilter/nf_conntrack_seqadj.h>
4751 ++#include <net/netfilter/nf_conntrack_synproxy.h>
4752 + #ifdef CONFIG_NF_NAT_NEEDED
4753 + #include <net/netfilter/nf_nat_core.h>
4754 + #include <net/netfilter/nf_nat_l4proto.h>
4755 +@@ -1727,6 +1729,8 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
4756 + nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
4757 + nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
4758 + nf_ct_labels_ext_add(ct);
4759 ++ nfct_seqadj_ext_add(ct);
4760 ++ nfct_synproxy_ext_add(ct);
4761 +
4762 + /* we must add conntrack extensions before confirmation. */
4763 + ct->status |= IPS_CONFIRMED;
4764 +diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
4765 +index e762de5ee89b..6531d7039b11 100644
4766 +--- a/net/netfilter/xt_TCPMSS.c
4767 ++++ b/net/netfilter/xt_TCPMSS.c
4768 +@@ -104,7 +104,7 @@ tcpmss_mangle_packet(struct sk_buff *skb,
4769 + tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
4770 + tcp_hdrlen = tcph->doff * 4;
4771 +
4772 +- if (len < tcp_hdrlen)
4773 ++ if (len < tcp_hdrlen || tcp_hdrlen < sizeof(struct tcphdr))
4774 + return -1;
4775 +
4776 + if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
4777 +@@ -156,6 +156,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
4778 + if (len > tcp_hdrlen)
4779 + return 0;
4780 +
4781 ++ /* tcph->doff has 4 bits, do not wrap it to 0 */
4782 ++ if (tcp_hdrlen >= 15 * 4)
4783 ++ return 0;
4784 ++
4785 + /*
4786 + * MSS Option not found ?! add it..
4787 + */
4788 +diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
4789 +index db0f39f5ef96..6851a6d98fce 100644
4790 +--- a/net/rxrpc/ar-key.c
4791 ++++ b/net/rxrpc/ar-key.c
4792 +@@ -215,7 +215,7 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
4793 + unsigned int *_toklen)
4794 + {
4795 + const __be32 *xdr = *_xdr;
4796 +- unsigned int toklen = *_toklen, n_parts, loop, tmp;
4797 ++ unsigned int toklen = *_toklen, n_parts, loop, tmp, paddedlen;
4798 +
4799 + /* there must be at least one name, and at least #names+1 length
4800 + * words */
4801 +@@ -245,16 +245,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
4802 + toklen -= 4;
4803 + if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX)
4804 + return -EINVAL;
4805 +- if (tmp > toklen)
4806 ++ paddedlen = (tmp + 3) & ~3;
4807 ++ if (paddedlen > toklen)
4808 + return -EINVAL;
4809 + princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL);
4810 + if (!princ->name_parts[loop])
4811 + return -ENOMEM;
4812 + memcpy(princ->name_parts[loop], xdr, tmp);
4813 + princ->name_parts[loop][tmp] = 0;
4814 +- tmp = (tmp + 3) & ~3;
4815 +- toklen -= tmp;
4816 +- xdr += tmp >> 2;
4817 ++ toklen -= paddedlen;
4818 ++ xdr += paddedlen >> 2;
4819 + }
4820 +
4821 + if (toklen < 4)
4822 +@@ -263,16 +263,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
4823 + toklen -= 4;
4824 + if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX)
4825 + return -EINVAL;
4826 +- if (tmp > toklen)
4827 ++ paddedlen = (tmp + 3) & ~3;
4828 ++ if (paddedlen > toklen)
4829 + return -EINVAL;
4830 + princ->realm = kmalloc(tmp + 1, GFP_KERNEL);
4831 + if (!princ->realm)
4832 + return -ENOMEM;
4833 + memcpy(princ->realm, xdr, tmp);
4834 + princ->realm[tmp] = 0;
4835 +- tmp = (tmp + 3) & ~3;
4836 +- toklen -= tmp;
4837 +- xdr += tmp >> 2;
4838 ++ toklen -= paddedlen;
4839 ++ xdr += paddedlen >> 2;
4840 +
4841 + _debug("%s/...@%s", princ->name_parts[0], princ->realm);
4842 +
4843 +@@ -291,7 +291,7 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
4844 + unsigned int *_toklen)
4845 + {
4846 + const __be32 *xdr = *_xdr;
4847 +- unsigned int toklen = *_toklen, len;
4848 ++ unsigned int toklen = *_toklen, len, paddedlen;
4849 +
4850 + /* there must be at least one tag and one length word */
4851 + if (toklen <= 8)
4852 +@@ -305,15 +305,17 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
4853 + toklen -= 8;
4854 + if (len > max_data_size)
4855 + return -EINVAL;
4856 ++ paddedlen = (len + 3) & ~3;
4857 ++ if (paddedlen > toklen)
4858 ++ return -EINVAL;
4859 + td->data_len = len;
4860 +
4861 + if (len > 0) {
4862 + td->data = kmemdup(xdr, len, GFP_KERNEL);
4863 + if (!td->data)
4864 + return -ENOMEM;
4865 +- len = (len + 3) & ~3;
4866 +- toklen -= len;
4867 +- xdr += len >> 2;
4868 ++ toklen -= paddedlen;
4869 ++ xdr += paddedlen >> 2;
4870 + }
4871 +
4872 + _debug("tag %x len %x", td->tag, td->data_len);
4873 +@@ -385,7 +387,7 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
4874 + const __be32 **_xdr, unsigned int *_toklen)
4875 + {
4876 + const __be32 *xdr = *_xdr;
4877 +- unsigned int toklen = *_toklen, len;
4878 ++ unsigned int toklen = *_toklen, len, paddedlen;
4879 +
4880 + /* there must be at least one length word */
4881 + if (toklen <= 4)
4882 +@@ -397,6 +399,9 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
4883 + toklen -= 4;
4884 + if (len > AFSTOKEN_K5_TIX_MAX)
4885 + return -EINVAL;
4886 ++ paddedlen = (len + 3) & ~3;
4887 ++ if (paddedlen > toklen)
4888 ++ return -EINVAL;
4889 + *_tktlen = len;
4890 +
4891 + _debug("ticket len %u", len);
4892 +@@ -405,9 +410,8 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
4893 + *_ticket = kmemdup(xdr, len, GFP_KERNEL);
4894 + if (!*_ticket)
4895 + return -ENOMEM;
4896 +- len = (len + 3) & ~3;
4897 +- toklen -= len;
4898 +- xdr += len >> 2;
4899 ++ toklen -= paddedlen;
4900 ++ xdr += paddedlen >> 2;
4901 + }
4902 +
4903 + *_xdr = xdr;
4904 +@@ -550,7 +554,7 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
4905 + {
4906 + const __be32 *xdr = prep->data, *token;
4907 + const char *cp;
4908 +- unsigned int len, tmp, loop, ntoken, toklen, sec_ix;
4909 ++ unsigned int len, paddedlen, loop, ntoken, toklen, sec_ix;
4910 + size_t datalen = prep->datalen;
4911 + int ret;
4912 +
4913 +@@ -576,22 +580,21 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
4914 + if (len < 1 || len > AFSTOKEN_CELL_MAX)
4915 + goto not_xdr;
4916 + datalen -= 4;
4917 +- tmp = (len + 3) & ~3;
4918 +- if (tmp > datalen)
4919 ++ paddedlen = (len + 3) & ~3;
4920 ++ if (paddedlen > datalen)
4921 + goto not_xdr;
4922 +
4923 + cp = (const char *) xdr;
4924 + for (loop = 0; loop < len; loop++)
4925 + if (!isprint(cp[loop]))
4926 + goto not_xdr;
4927 +- if (len < tmp)
4928 +- for (; loop < tmp; loop++)
4929 +- if (cp[loop])
4930 +- goto not_xdr;
4931 ++ for (; loop < paddedlen; loop++)
4932 ++ if (cp[loop])
4933 ++ goto not_xdr;
4934 + _debug("cellname: [%u/%u] '%*.*s'",
4935 +- len, tmp, len, len, (const char *) xdr);
4936 +- datalen -= tmp;
4937 +- xdr += tmp >> 2;
4938 ++ len, paddedlen, len, len, (const char *) xdr);
4939 ++ datalen -= paddedlen;
4940 ++ xdr += paddedlen >> 2;
4941 +
4942 + /* get the token count */
4943 + if (datalen < 12)
4944 +@@ -612,10 +615,11 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
4945 + sec_ix = ntohl(*xdr);
4946 + datalen -= 4;
4947 + _debug("token: [%x/%zx] %x", toklen, datalen, sec_ix);
4948 +- if (toklen < 20 || toklen > datalen)
4949 ++ paddedlen = (toklen + 3) & ~3;
4950 ++ if (toklen < 20 || toklen > datalen || paddedlen > datalen)
4951 + goto not_xdr;
4952 +- datalen -= (toklen + 3) & ~3;
4953 +- xdr += (toklen + 3) >> 2;
4954 ++ datalen -= paddedlen;
4955 ++ xdr += paddedlen >> 2;
4956 +
4957 + } while (--loop > 0);
4958 +
4959 +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
4960 +index c244a49ae4ac..25353056439d 100644
4961 +--- a/net/sched/sch_api.c
4962 ++++ b/net/sched/sch_api.c
4963 +@@ -1004,6 +1004,9 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
4964 +
4965 + return sch;
4966 + }
4967 ++ /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
4968 ++ if (ops->destroy)
4969 ++ ops->destroy(sch);
4970 + err_out3:
4971 + dev_put(dev);
4972 + kfree((char *) sch - sch->padded);
4973 +diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
4974 +index 792c6f330f77..c072305068e3 100644
4975 +--- a/net/sched/sch_hhf.c
4976 ++++ b/net/sched/sch_hhf.c
4977 +@@ -644,7 +644,9 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
4978 + q->hhf_arrays[i] = hhf_zalloc(HHF_ARRAYS_LEN *
4979 + sizeof(u32));
4980 + if (!q->hhf_arrays[i]) {
4981 +- hhf_destroy(sch);
4982 ++ /* Note: hhf_destroy() will be called
4983 ++ * by our caller.
4984 ++ */
4985 + return -ENOMEM;
4986 + }
4987 + }
4988 +@@ -655,7 +657,9 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
4989 + q->hhf_valid_bits[i] = hhf_zalloc(HHF_ARRAYS_LEN /
4990 + BITS_PER_BYTE);
4991 + if (!q->hhf_valid_bits[i]) {
4992 +- hhf_destroy(sch);
4993 ++ /* Note: hhf_destroy() will be called
4994 ++ * by our caller.
4995 ++ */
4996 + return -ENOMEM;
4997 + }
4998 + }
4999 +diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
5000 +index 3e82f047caaf..d9c84328e7eb 100644
5001 +--- a/net/sched/sch_mq.c
5002 ++++ b/net/sched/sch_mq.c
5003 +@@ -52,7 +52,7 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
5004 + /* pre-allocate qdiscs, attachment can't fail */
5005 + priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
5006 + GFP_KERNEL);
5007 +- if (priv->qdiscs == NULL)
5008 ++ if (!priv->qdiscs)
5009 + return -ENOMEM;
5010 +
5011 + for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
5012 +@@ -60,18 +60,14 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
5013 + qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops,
5014 + TC_H_MAKE(TC_H_MAJ(sch->handle),
5015 + TC_H_MIN(ntx + 1)));
5016 +- if (qdisc == NULL)
5017 +- goto err;
5018 ++ if (!qdisc)
5019 ++ return -ENOMEM;
5020 + priv->qdiscs[ntx] = qdisc;
5021 + qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
5022 + }
5023 +
5024 + sch->flags |= TCQ_F_MQROOT;
5025 + return 0;
5026 +-
5027 +-err:
5028 +- mq_destroy(sch);
5029 +- return -ENOMEM;
5030 + }
5031 +
5032 + static void mq_attach(struct Qdisc *sch)
5033 +diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
5034 +index ad70ecf57ce7..66bccc5ff4ea 100644
5035 +--- a/net/sched/sch_mqprio.c
5036 ++++ b/net/sched/sch_mqprio.c
5037 +@@ -117,20 +117,17 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
5038 + /* pre-allocate qdisc, attachment can't fail */
5039 + priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
5040 + GFP_KERNEL);
5041 +- if (priv->qdiscs == NULL) {
5042 +- err = -ENOMEM;
5043 +- goto err;
5044 +- }
5045 ++ if (!priv->qdiscs)
5046 ++ return -ENOMEM;
5047 +
5048 + for (i = 0; i < dev->num_tx_queues; i++) {
5049 + dev_queue = netdev_get_tx_queue(dev, i);
5050 + qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops,
5051 + TC_H_MAKE(TC_H_MAJ(sch->handle),
5052 + TC_H_MIN(i + 1)));
5053 +- if (qdisc == NULL) {
5054 +- err = -ENOMEM;
5055 +- goto err;
5056 +- }
5057 ++ if (!qdisc)
5058 ++ return -ENOMEM;
5059 ++
5060 + priv->qdiscs[i] = qdisc;
5061 + qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
5062 + }
5063 +@@ -143,7 +140,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
5064 + priv->hw_owned = 1;
5065 + err = dev->netdev_ops->ndo_setup_tc(dev, qopt->num_tc);
5066 + if (err)
5067 +- goto err;
5068 ++ return err;
5069 + } else {
5070 + netdev_set_num_tc(dev, qopt->num_tc);
5071 + for (i = 0; i < qopt->num_tc; i++)
5072 +@@ -157,10 +154,6 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
5073 +
5074 + sch->flags |= TCQ_F_MQROOT;
5075 + return 0;
5076 +-
5077 +-err:
5078 +- mqprio_destroy(sch);
5079 +- return err;
5080 + }
5081 +
5082 + static void mqprio_attach(struct Qdisc *sch)
5083 +diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
5084 +index 4417fb25166f..fdcced6aa71d 100644
5085 +--- a/net/sched/sch_sfq.c
5086 ++++ b/net/sched/sch_sfq.c
5087 +@@ -765,9 +765,10 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
5088 + q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor);
5089 + q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows);
5090 + if (!q->ht || !q->slots) {
5091 +- sfq_destroy(sch);
5092 ++ /* Note: sfq_destroy() will be called by our caller */
5093 + return -ENOMEM;
5094 + }
5095 ++
5096 + for (i = 0; i < q->divisor; i++)
5097 + q->ht[i] = SFQ_EMPTY_SLOT;
5098 +
5099 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
5100 +index 6c880961554f..34d3d4056a11 100644
5101 +--- a/net/sctp/socket.c
5102 ++++ b/net/sctp/socket.c
5103 +@@ -239,7 +239,7 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk,
5104 + union sctp_addr *laddr = (union sctp_addr *)addr;
5105 + struct sctp_transport *transport;
5106 +
5107 +- if (sctp_verify_addr(sk, laddr, af->sockaddr_len))
5108 ++ if (!af || sctp_verify_addr(sk, laddr, af->sockaddr_len))
5109 + return NULL;
5110 +
5111 + addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
5112 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
5113 +index 03da879008d7..ce6f2bff5208 100644
5114 +--- a/net/unix/af_unix.c
5115 ++++ b/net/unix/af_unix.c
5116 +@@ -967,7 +967,8 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
5117 + struct dentry *dentry;
5118 +
5119 + err = -EINVAL;
5120 +- if (sunaddr->sun_family != AF_UNIX)
5121 ++ if (addr_len < offsetofend(struct sockaddr_un, sun_family) ||
5122 ++ sunaddr->sun_family != AF_UNIX)
5123 + goto out;
5124 +
5125 + if (addr_len == sizeof(short)) {
5126 +@@ -1098,6 +1099,10 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
5127 + unsigned int hash;
5128 + int err;
5129 +
5130 ++ err = -EINVAL;
5131 ++ if (alen < offsetofend(struct sockaddr, sa_family))
5132 ++ goto out;
5133 ++
5134 + if (addr->sa_family != AF_UNSPEC) {
5135 + err = unix_mkname(sunaddr, alen, &hash);
5136 + if (err < 0)
5137 +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
5138 +index 81203bbb2eef..e81e20cbe6dd 100644
5139 +--- a/net/wireless/nl80211.c
5140 ++++ b/net/wireless/nl80211.c
5141 +@@ -301,8 +301,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
5142 + [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
5143 + [NL80211_ATTR_PID] = { .type = NLA_U32 },
5144 + [NL80211_ATTR_4ADDR] = { .type = NLA_U8 },
5145 +- [NL80211_ATTR_PMKID] = { .type = NLA_BINARY,
5146 +- .len = WLAN_PMKID_LEN },
5147 ++ [NL80211_ATTR_PMKID] = { .len = WLAN_PMKID_LEN },
5148 + [NL80211_ATTR_DURATION] = { .type = NLA_U32 },
5149 + [NL80211_ATTR_COOKIE] = { .type = NLA_U64 },
5150 + [NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED },
5151 +@@ -358,6 +357,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
5152 + [NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 },
5153 + [NL80211_ATTR_P2P_CTWINDOW] = { .type = NLA_U8 },
5154 + [NL80211_ATTR_P2P_OPPPS] = { .type = NLA_U8 },
5155 ++ [NL80211_ATTR_LOCAL_MESH_POWER_MODE] = {. type = NLA_U32 },
5156 + [NL80211_ATTR_ACL_POLICY] = {. type = NLA_U32 },
5157 + [NL80211_ATTR_MAC_ADDRS] = { .type = NLA_NESTED },
5158 + [NL80211_ATTR_STA_CAPABILITY] = { .type = NLA_U16 },
5159 +@@ -5678,6 +5678,10 @@ static int validate_scan_freqs(struct nlattr *freqs)
5160 + struct nlattr *attr1, *attr2;
5161 + int n_channels = 0, tmp1, tmp2;
5162 +
5163 ++ nla_for_each_nested(attr1, freqs, tmp1)
5164 ++ if (nla_len(attr1) != sizeof(u32))
5165 ++ return 0;
5166 ++
5167 + nla_for_each_nested(attr1, freqs, tmp1) {
5168 + n_channels++;
5169 + /*
5170 +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
5171 +index 4cd2076ff84b..155070f500aa 100644
5172 +--- a/net/xfrm/xfrm_policy.c
5173 ++++ b/net/xfrm/xfrm_policy.c
5174 +@@ -1757,43 +1757,6 @@ free_dst:
5175 + goto out;
5176 + }
5177 +
5178 +-#ifdef CONFIG_XFRM_SUB_POLICY
5179 +-static int xfrm_dst_alloc_copy(void **target, const void *src, int size)
5180 +-{
5181 +- if (!*target) {
5182 +- *target = kmalloc(size, GFP_ATOMIC);
5183 +- if (!*target)
5184 +- return -ENOMEM;
5185 +- }
5186 +-
5187 +- memcpy(*target, src, size);
5188 +- return 0;
5189 +-}
5190 +-#endif
5191 +-
5192 +-static int xfrm_dst_update_parent(struct dst_entry *dst,
5193 +- const struct xfrm_selector *sel)
5194 +-{
5195 +-#ifdef CONFIG_XFRM_SUB_POLICY
5196 +- struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
5197 +- return xfrm_dst_alloc_copy((void **)&(xdst->partner),
5198 +- sel, sizeof(*sel));
5199 +-#else
5200 +- return 0;
5201 +-#endif
5202 +-}
5203 +-
5204 +-static int xfrm_dst_update_origin(struct dst_entry *dst,
5205 +- const struct flowi *fl)
5206 +-{
5207 +-#ifdef CONFIG_XFRM_SUB_POLICY
5208 +- struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
5209 +- return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
5210 +-#else
5211 +- return 0;
5212 +-#endif
5213 +-}
5214 +-
5215 + static int xfrm_expand_policies(const struct flowi *fl, u16 family,
5216 + struct xfrm_policy **pols,
5217 + int *num_pols, int *num_xfrms)
5218 +@@ -1865,16 +1828,6 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
5219 +
5220 + xdst = (struct xfrm_dst *)dst;
5221 + xdst->num_xfrms = err;
5222 +- if (num_pols > 1)
5223 +- err = xfrm_dst_update_parent(dst, &pols[1]->selector);
5224 +- else
5225 +- err = xfrm_dst_update_origin(dst, fl);
5226 +- if (unlikely(err)) {
5227 +- dst_free(dst);
5228 +- XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
5229 +- return ERR_PTR(err);
5230 +- }
5231 +-
5232 + xdst->num_pols = num_pols;
5233 + memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
5234 + xdst->policy_genid = atomic_read(&pols[0]->genid);
5235 +diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
5236 +index c5ec977b9c37..928c1cf8606b 100755
5237 +--- a/scripts/checkpatch.pl
5238 ++++ b/scripts/checkpatch.pl
5239 +@@ -3085,7 +3085,7 @@ sub process {
5240 + $fixedline =~ s/\s*=\s*$/ = {/;
5241 + fix_insert_line($fixlinenr, $fixedline);
5242 + $fixedline = $line;
5243 +- $fixedline =~ s/^(.\s*){\s*/$1/;
5244 ++ $fixedline =~ s/^(.\s*)\{\s*/$1/;
5245 + fix_insert_line($fixlinenr, $fixedline);
5246 + }
5247 + }
5248 +@@ -3435,7 +3435,7 @@ sub process {
5249 + my $fixedline = rtrim($prevrawline) . " {";
5250 + fix_insert_line($fixlinenr, $fixedline);
5251 + $fixedline = $rawline;
5252 +- $fixedline =~ s/^(.\s*){\s*/$1\t/;
5253 ++ $fixedline =~ s/^(.\s*)\{\s*/$1\t/;
5254 + if ($fixedline !~ /^\+\s*$/) {
5255 + fix_insert_line($fixlinenr, $fixedline);
5256 + }
5257 +@@ -3924,7 +3924,7 @@ sub process {
5258 + if (ERROR("SPACING",
5259 + "space required before the open brace '{'\n" . $herecurr) &&
5260 + $fix) {
5261 +- $fixed[$fixlinenr] =~ s/^(\+.*(?:do|\))){/$1 {/;
5262 ++ $fixed[$fixlinenr] =~ s/^(\+.*(?:do|\)))\{/$1 {/;
5263 + }
5264 + }
5265 +
5266 +diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
5267 +index 0a374a2ce030..8e1c0099bb66 100644
5268 +--- a/security/keys/encrypted-keys/encrypted.c
5269 ++++ b/security/keys/encrypted-keys/encrypted.c
5270 +@@ -428,7 +428,7 @@ static int init_blkcipher_desc(struct blkcipher_desc *desc, const u8 *key,
5271 + static struct key *request_master_key(struct encrypted_key_payload *epayload,
5272 + u8 **master_key, size_t *master_keylen)
5273 + {
5274 +- struct key *mkey = NULL;
5275 ++ struct key *mkey = ERR_PTR(-EINVAL);
5276 +
5277 + if (!strncmp(epayload->master_desc, KEY_TRUSTED_PREFIX,
5278 + KEY_TRUSTED_PREFIX_LEN)) {
5279 +diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
5280 +index 0fda7b4901dd..e998aaf14338 100644
5281 +--- a/sound/pci/hda/hda_generic.c
5282 ++++ b/sound/pci/hda/hda_generic.c
5283 +@@ -3188,6 +3188,7 @@ static int check_dyn_adc_switch(struct hda_codec *codec)
5284 + spec->input_paths[i][nums]);
5285 + spec->input_paths[i][nums] =
5286 + spec->input_paths[i][n];
5287 ++ spec->input_paths[i][n] = 0;
5288 + }
5289 + }
5290 + nums++;
5291 +diff --git a/tools/lib/lockdep/uinclude/linux/lockdep.h b/tools/lib/lockdep/uinclude/linux/lockdep.h
5292 +index c1552c28507e..908a9c6fecf0 100644
5293 +--- a/tools/lib/lockdep/uinclude/linux/lockdep.h
5294 ++++ b/tools/lib/lockdep/uinclude/linux/lockdep.h
5295 +@@ -8,7 +8,7 @@
5296 + #include <linux/utsname.h>
5297 +
5298 +
5299 +-#define MAX_LOCK_DEPTH 2000UL
5300 ++#define MAX_LOCK_DEPTH 255UL
5301 +
5302 + #define asmlinkage
5303 + #define __visible
5304 +diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
5305 +index 58f10b8e6ff2..7ee9c19e8466 100644
5306 +--- a/tools/perf/builtin-script.c
5307 ++++ b/tools/perf/builtin-script.c
5308 +@@ -1061,21 +1061,19 @@ static int is_directory(const char *base_path, const struct dirent *dent)
5309 + return S_ISDIR(st.st_mode);
5310 + }
5311 +
5312 +-#define for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next)\
5313 +- while (!readdir_r(scripts_dir, &lang_dirent, &lang_next) && \
5314 +- lang_next) \
5315 +- if ((lang_dirent.d_type == DT_DIR || \
5316 +- (lang_dirent.d_type == DT_UNKNOWN && \
5317 +- is_directory(scripts_path, &lang_dirent))) && \
5318 +- (strcmp(lang_dirent.d_name, ".")) && \
5319 +- (strcmp(lang_dirent.d_name, "..")))
5320 +-
5321 +-#define for_each_script(lang_path, lang_dir, script_dirent, script_next)\
5322 +- while (!readdir_r(lang_dir, &script_dirent, &script_next) && \
5323 +- script_next) \
5324 +- if (script_dirent.d_type != DT_DIR && \
5325 +- (script_dirent.d_type != DT_UNKNOWN || \
5326 +- !is_directory(lang_path, &script_dirent)))
5327 ++#define for_each_lang(scripts_path, scripts_dir, lang_dirent) \
5328 ++ while ((lang_dirent = readdir(scripts_dir)) != NULL) \
5329 ++ if ((lang_dirent->d_type == DT_DIR || \
5330 ++ (lang_dirent->d_type == DT_UNKNOWN && \
5331 ++ is_directory(scripts_path, lang_dirent))) && \
5332 ++ (strcmp(lang_dirent->d_name, ".")) && \
5333 ++ (strcmp(lang_dirent->d_name, "..")))
5334 ++
5335 ++#define for_each_script(lang_path, lang_dir, script_dirent) \
5336 ++ while ((script_dirent = readdir(lang_dir)) != NULL) \
5337 ++ if (script_dirent->d_type != DT_DIR && \
5338 ++ (script_dirent->d_type != DT_UNKNOWN || \
5339 ++ !is_directory(lang_path, script_dirent)))
5340 +
5341 +
5342 + #define RECORD_SUFFIX "-record"
5343 +@@ -1221,7 +1219,7 @@ static int list_available_scripts(const struct option *opt __maybe_unused,
5344 + const char *s __maybe_unused,
5345 + int unset __maybe_unused)
5346 + {
5347 +- struct dirent *script_next, *lang_next, script_dirent, lang_dirent;
5348 ++ struct dirent *script_dirent, *lang_dirent;
5349 + char scripts_path[MAXPATHLEN];
5350 + DIR *scripts_dir, *lang_dir;
5351 + char script_path[MAXPATHLEN];
5352 +@@ -1236,19 +1234,19 @@ static int list_available_scripts(const struct option *opt __maybe_unused,
5353 + if (!scripts_dir)
5354 + return -1;
5355 +
5356 +- for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next) {
5357 ++ for_each_lang(scripts_path, scripts_dir, lang_dirent) {
5358 + snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
5359 +- lang_dirent.d_name);
5360 ++ lang_dirent->d_name);
5361 + lang_dir = opendir(lang_path);
5362 + if (!lang_dir)
5363 + continue;
5364 +
5365 +- for_each_script(lang_path, lang_dir, script_dirent, script_next) {
5366 +- script_root = get_script_root(&script_dirent, REPORT_SUFFIX);
5367 ++ for_each_script(lang_path, lang_dir, script_dirent) {
5368 ++ script_root = get_script_root(script_dirent, REPORT_SUFFIX);
5369 + if (script_root) {
5370 + desc = script_desc__findnew(script_root);
5371 + snprintf(script_path, MAXPATHLEN, "%s/%s",
5372 +- lang_path, script_dirent.d_name);
5373 ++ lang_path, script_dirent->d_name);
5374 + read_script_info(desc, script_path);
5375 + free(script_root);
5376 + }
5377 +@@ -1336,7 +1334,7 @@ static int check_ev_match(char *dir_name, char *scriptname,
5378 + */
5379 + int find_scripts(char **scripts_array, char **scripts_path_array)
5380 + {
5381 +- struct dirent *script_next, *lang_next, script_dirent, lang_dirent;
5382 ++ struct dirent *script_dirent, *lang_dirent;
5383 + char scripts_path[MAXPATHLEN], lang_path[MAXPATHLEN];
5384 + DIR *scripts_dir, *lang_dir;
5385 + struct perf_session *session;
5386 +@@ -1359,9 +1357,9 @@ int find_scripts(char **scripts_array, char **scripts_path_array)
5387 + return -1;
5388 + }
5389 +
5390 +- for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next) {
5391 ++ for_each_lang(scripts_path, scripts_dir, lang_dirent) {
5392 + snprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path,
5393 +- lang_dirent.d_name);
5394 ++ lang_dirent->d_name);
5395 + #ifdef NO_LIBPERL
5396 + if (strstr(lang_path, "perl"))
5397 + continue;
5398 +@@ -1375,16 +1373,16 @@ int find_scripts(char **scripts_array, char **scripts_path_array)
5399 + if (!lang_dir)
5400 + continue;
5401 +
5402 +- for_each_script(lang_path, lang_dir, script_dirent, script_next) {
5403 ++ for_each_script(lang_path, lang_dir, script_dirent) {
5404 + /* Skip those real time scripts: xxxtop.p[yl] */
5405 +- if (strstr(script_dirent.d_name, "top."))
5406 ++ if (strstr(script_dirent->d_name, "top."))
5407 + continue;
5408 + sprintf(scripts_path_array[i], "%s/%s", lang_path,
5409 +- script_dirent.d_name);
5410 +- temp = strchr(script_dirent.d_name, '.');
5411 ++ script_dirent->d_name);
5412 ++ temp = strchr(script_dirent->d_name, '.');
5413 + snprintf(scripts_array[i],
5414 +- (temp - script_dirent.d_name) + 1,
5415 +- "%s", script_dirent.d_name);
5416 ++ (temp - script_dirent->d_name) + 1,
5417 ++ "%s", script_dirent->d_name);
5418 +
5419 + if (check_ev_match(lang_path,
5420 + scripts_array[i], session))
5421 +@@ -1402,7 +1400,7 @@ int find_scripts(char **scripts_array, char **scripts_path_array)
5422 +
5423 + static char *get_script_path(const char *script_root, const char *suffix)
5424 + {
5425 +- struct dirent *script_next, *lang_next, script_dirent, lang_dirent;
5426 ++ struct dirent *script_dirent, *lang_dirent;
5427 + char scripts_path[MAXPATHLEN];
5428 + char script_path[MAXPATHLEN];
5429 + DIR *scripts_dir, *lang_dir;
5430 +@@ -1415,21 +1413,21 @@ static char *get_script_path(const char *script_root, const char *suffix)
5431 + if (!scripts_dir)
5432 + return NULL;
5433 +
5434 +- for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next) {
5435 ++ for_each_lang(scripts_path, scripts_dir, lang_dirent) {
5436 + snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
5437 +- lang_dirent.d_name);
5438 ++ lang_dirent->d_name);
5439 + lang_dir = opendir(lang_path);
5440 + if (!lang_dir)
5441 + continue;
5442 +
5443 +- for_each_script(lang_path, lang_dir, script_dirent, script_next) {
5444 +- __script_root = get_script_root(&script_dirent, suffix);
5445 ++ for_each_script(lang_path, lang_dir, script_dirent) {
5446 ++ __script_root = get_script_root(script_dirent, suffix);
5447 + if (__script_root && !strcmp(script_root, __script_root)) {
5448 + free(__script_root);
5449 + closedir(lang_dir);
5450 + closedir(scripts_dir);
5451 + snprintf(script_path, MAXPATHLEN, "%s/%s",
5452 +- lang_path, script_dirent.d_name);
5453 ++ lang_path, script_dirent->d_name);
5454 + return strdup(script_path);
5455 + }
5456 + free(__script_root);
5457 +diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
5458 +index 6a4d5d41c671..65e138019b99 100644
5459 +--- a/tools/perf/builtin-top.c
5460 ++++ b/tools/perf/builtin-top.c
5461 +@@ -627,7 +627,7 @@ repeat:
5462 + case -1:
5463 + if (errno == EINTR)
5464 + continue;
5465 +- /* Fall trhu */
5466 ++ __fallthrough;
5467 + default:
5468 + c = getc(stdin);
5469 + tcsetattr(0, TCSAFLUSH, &save);
5470 +diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
5471 +index e122970361f2..09b9b74e4c1b 100644
5472 +--- a/tools/perf/builtin-trace.c
5473 ++++ b/tools/perf/builtin-trace.c
5474 +@@ -1404,6 +1404,7 @@ static int trace__process_event(struct trace *trace, struct machine *machine,
5475 + color_fprintf(trace->output, PERF_COLOR_RED,
5476 + "LOST %" PRIu64 " events!\n", event->lost.lost);
5477 + ret = machine__process_lost_event(machine, event, sample);
5478 ++ break;
5479 + default:
5480 + ret = machine__process_event(machine, event, sample);
5481 + break;
5482 +diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
5483 +index 3de744961739..024583871237 100644
5484 +--- a/tools/perf/tests/parse-events.c
5485 ++++ b/tools/perf/tests/parse-events.c
5486 +@@ -1677,15 +1677,14 @@ static int test_pmu_events(void)
5487 + }
5488 +
5489 + while (!ret && (ent = readdir(dir))) {
5490 +-#define MAX_NAME 100
5491 + struct evlist_test e;
5492 +- char name[MAX_NAME];
5493 ++ char name[2 * NAME_MAX + 1 + 12 + 3];
5494 +
5495 + if (!strcmp(ent->d_name, ".") ||
5496 + !strcmp(ent->d_name, ".."))
5497 + continue;
5498 +
5499 +- snprintf(name, MAX_NAME, "cpu/event=%s/u", ent->d_name);
5500 ++ snprintf(name, sizeof(name), "cpu/event=%s/u", ent->d_name);
5501 +
5502 + e.name = name;
5503 + e.check = test__checkevent_pmu_events;
5504 +@@ -1693,11 +1692,10 @@ static int test_pmu_events(void)
5505 + ret = test_event(&e);
5506 + if (ret)
5507 + break;
5508 +- snprintf(name, MAX_NAME, "%s:u,cpu/event=%s/u", ent->d_name, ent->d_name);
5509 ++ snprintf(name, sizeof(name), "%s:u,cpu/event=%s/u", ent->d_name, ent->d_name);
5510 + e.name = name;
5511 + e.check = test__checkevent_pmu_events_mix;
5512 + ret = test_event(&e);
5513 +-#undef MAX_NAME
5514 + }
5515 +
5516 + closedir(dir);
5517 +diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
5518 +index e5250eb2dd57..96b5638c3342 100644
5519 +--- a/tools/perf/ui/browsers/annotate.c
5520 ++++ b/tools/perf/ui/browsers/annotate.c
5521 +@@ -716,11 +716,11 @@ static int annotate_browser__run(struct annotate_browser *browser,
5522 + nd = browser->curr_hot;
5523 + break;
5524 + case K_UNTAB:
5525 +- if (nd != NULL)
5526 ++ if (nd != NULL) {
5527 + nd = rb_next(nd);
5528 + if (nd == NULL)
5529 + nd = rb_first(&browser->entries);
5530 +- else
5531 ++ } else
5532 + nd = browser->curr_hot;
5533 + break;
5534 + case K_F1:
5535 +diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
5536 +index d18a59ab4ed5..12ad79717d94 100644
5537 +--- a/tools/perf/util/event.c
5538 ++++ b/tools/perf/util/event.c
5539 +@@ -385,7 +385,7 @@ static int __event__synthesize_thread(union perf_event *comm_event,
5540 + {
5541 + char filename[PATH_MAX];
5542 + DIR *tasks;
5543 +- struct dirent dirent, *next;
5544 ++ struct dirent *dirent;
5545 + pid_t tgid, ppid;
5546 + int rc = 0;
5547 +
5548 +@@ -413,11 +413,11 @@ static int __event__synthesize_thread(union perf_event *comm_event,
5549 + return 0;
5550 + }
5551 +
5552 +- while (!readdir_r(tasks, &dirent, &next) && next) {
5553 ++ while ((dirent = readdir(tasks)) != NULL) {
5554 + char *end;
5555 + pid_t _pid;
5556 +
5557 +- _pid = strtol(dirent.d_name, &end, 10);
5558 ++ _pid = strtol(dirent->d_name, &end, 10);
5559 + if (*end)
5560 + continue;
5561 +
5562 +@@ -523,7 +523,7 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
5563 + {
5564 + DIR *proc;
5565 + char proc_path[PATH_MAX];
5566 +- struct dirent dirent, *next;
5567 ++ struct dirent *dirent;
5568 + union perf_event *comm_event, *mmap_event, *fork_event;
5569 + int err = -1;
5570 +
5571 +@@ -548,9 +548,9 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
5572 + if (proc == NULL)
5573 + goto out_free_fork;
5574 +
5575 +- while (!readdir_r(proc, &dirent, &next) && next) {
5576 ++ while ((dirent = readdir(proc)) != NULL) {
5577 + char *end;
5578 +- pid_t pid = strtol(dirent.d_name, &end, 10);
5579 ++ pid_t pid = strtol(dirent->d_name, &end, 10);
5580 +
5581 + if (*end) /* only interested in proper numerical dirents */
5582 + continue;
5583 +diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
5584 +index 8b02a4355659..3297d7e85dd7 100644
5585 +--- a/tools/perf/util/pmu.c
5586 ++++ b/tools/perf/util/pmu.c
5587 +@@ -148,7 +148,7 @@ static int perf_pmu__parse_unit(struct perf_pmu_alias *alias, char *dir, char *n
5588 + if (fd == -1)
5589 + return -1;
5590 +
5591 +- sret = read(fd, alias->unit, UNIT_MAX_LEN);
5592 ++ sret = read(fd, alias->unit, UNIT_MAX_LEN);
5593 + if (sret < 0)
5594 + goto error;
5595 +
5596 +diff --git a/tools/perf/util/scripting-engines/Build b/tools/perf/util/scripting-engines/Build
5597 +index 6516e220c247..82d28c67e0f3 100644
5598 +--- a/tools/perf/util/scripting-engines/Build
5599 ++++ b/tools/perf/util/scripting-engines/Build
5600 +@@ -1,6 +1,6 @@
5601 + libperf-$(CONFIG_LIBPERL) += trace-event-perl.o
5602 + libperf-$(CONFIG_LIBPYTHON) += trace-event-python.o
5603 +
5604 +-CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-undef -Wno-switch-default
5605 ++CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-nested-externs -Wno-undef -Wno-switch-default
5606 +
5607 + CFLAGS_trace-event-python.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow
5608 +diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
5609 +index 6afd6106ceb5..3b2d7fdde6a6 100644
5610 +--- a/tools/perf/util/string.c
5611 ++++ b/tools/perf/util/string.c
5612 +@@ -21,6 +21,8 @@ s64 perf_atoll(const char *str)
5613 + case 'b': case 'B':
5614 + if (*p)
5615 + goto out_err;
5616 ++
5617 ++ __fallthrough;
5618 + case '\0':
5619 + return length;
5620 + default:
5621 +diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
5622 +index 1c8fbc9588c5..c0b7e17e3167 100644
5623 +--- a/tools/perf/util/thread.c
5624 ++++ b/tools/perf/util/thread.c
5625 +@@ -217,7 +217,7 @@ void thread__find_cpumode_addr_location(struct thread *thread,
5626 + struct addr_location *al)
5627 + {
5628 + size_t i;
5629 +- const u8 const cpumodes[] = {
5630 ++ const u8 cpumodes[] = {
5631 + PERF_RECORD_MISC_USER,
5632 + PERF_RECORD_MISC_KERNEL,
5633 + PERF_RECORD_MISC_GUEST_USER,
5634 +diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c
5635 +index f93b9734735b..905a55401842 100644
5636 +--- a/tools/perf/util/thread_map.c
5637 ++++ b/tools/perf/util/thread_map.c
5638 +@@ -63,7 +63,7 @@ struct thread_map *thread_map__new_by_uid(uid_t uid)
5639 + {
5640 + DIR *proc;
5641 + int max_threads = 32, items, i;
5642 +- char path[256];
5643 ++ char path[NAME_MAX + 1 + 6];
5644 + struct dirent dirent, *next, **namelist = NULL;
5645 + struct thread_map *threads = malloc(sizeof(*threads) +
5646 + max_threads * sizeof(pid_t));