Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Fri, 30 Sep 2016 19:07:28
Message-Id: 1475262431.e3a35f50a5f087b5d20a534a6df48f097ab67201.mpagano@gentoo
1 commit: e3a35f50a5f087b5d20a534a6df48f097ab67201
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Sep 30 19:07:11 2016 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Sep 30 19:07:11 2016 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e3a35f50
7
8 Linux patch 4.4.23
9
10 0000_README | 4 +
11 1022_linux-4.4.23.patch | 2907 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 2911 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index d60af0e..7ee3b9f 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -131,6 +131,10 @@ Patch: 1021_linux-4.4.22.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.4.22
21
22 +Patch: 1022_linux-4.4.23.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.4.23
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1022_linux-4.4.23.patch b/1022_linux-4.4.23.patch
31 new file mode 100644
32 index 0000000..5ea7450
33 --- /dev/null
34 +++ b/1022_linux-4.4.23.patch
35 @@ -0,0 +1,2907 @@
36 +diff --git a/Makefile b/Makefile
37 +index a6512f4eec9f..95421b688f23 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 4
43 +-SUBLEVEL = 22
44 ++SUBLEVEL = 23
45 + EXTRAVERSION =
46 + NAME = Blurry Fish Butt
47 +
48 +@@ -128,6 +128,10 @@ _all:
49 + # Cancel implicit rules on top Makefile
50 + $(CURDIR)/Makefile Makefile: ;
51 +
52 ++ifneq ($(words $(subst :, ,$(CURDIR))), 1)
53 ++ $(error main directory cannot contain spaces nor colons)
54 ++endif
55 ++
56 + ifneq ($(KBUILD_OUTPUT),)
57 + # Invoke a second make in the output directory, passing relevant variables
58 + # check that the output directory actually exists
59 +@@ -495,6 +499,12 @@ ifeq ($(KBUILD_EXTMOD),)
60 + endif
61 + endif
62 + endif
63 ++# install and module_install need also be processed one by one
64 ++ifneq ($(filter install,$(MAKECMDGOALS)),)
65 ++ ifneq ($(filter modules_install,$(MAKECMDGOALS)),)
66 ++ mixed-targets := 1
67 ++ endif
68 ++endif
69 +
70 + ifeq ($(mixed-targets),1)
71 + # ===========================================================================
72 +@@ -606,11 +616,16 @@ ARCH_CFLAGS :=
73 + include arch/$(SRCARCH)/Makefile
74 +
75 + KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
76 ++KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
77 +
78 + ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
79 +-KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,)
80 ++KBUILD_CFLAGS += -Os
81 + else
82 ++ifdef CONFIG_PROFILE_ALL_BRANCHES
83 + KBUILD_CFLAGS += -O2
84 ++else
85 ++KBUILD_CFLAGS += -O2
86 ++endif
87 + endif
88 +
89 + # Tell gcc to never replace conditional load with a non-conditional one
90 +@@ -1260,7 +1275,7 @@ help:
91 + @echo ' firmware_install- Install all firmware to INSTALL_FW_PATH'
92 + @echo ' (default: $$(INSTALL_MOD_PATH)/lib/firmware)'
93 + @echo ' dir/ - Build all files in dir and below'
94 +- @echo ' dir/file.[oisS] - Build specified target only'
95 ++ @echo ' dir/file.[ois] - Build specified target only'
96 + @echo ' dir/file.lst - Build specified mixed source/assembly target only'
97 + @echo ' (requires a recent binutils and recent build (System.map))'
98 + @echo ' dir/file.ko - Build module including final link'
99 +@@ -1500,11 +1515,11 @@ image_name:
100 + # Clear a bunch of variables before executing the submake
101 + tools/: FORCE
102 + $(Q)mkdir -p $(objtree)/tools
103 +- $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(O) subdir=tools -C $(src)/tools/
104 ++ $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(shell cd $(objtree) && /bin/pwd) subdir=tools -C $(src)/tools/
105 +
106 + tools/%: FORCE
107 + $(Q)mkdir -p $(objtree)/tools
108 +- $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(O) subdir=tools -C $(src)/tools/ $*
109 ++ $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(shell cd $(objtree) && /bin/pwd) subdir=tools -C $(src)/tools/ $*
110 +
111 + # Single targets
112 + # ---------------------------------------------------------------------------
113 +diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c
114 +index b445a5d56f43..593da7ffb449 100644
115 +--- a/arch/arm/crypto/aes-ce-glue.c
116 ++++ b/arch/arm/crypto/aes-ce-glue.c
117 +@@ -279,7 +279,7 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
118 + err = blkcipher_walk_done(desc, &walk,
119 + walk.nbytes % AES_BLOCK_SIZE);
120 + }
121 +- if (nbytes) {
122 ++ if (walk.nbytes % AES_BLOCK_SIZE) {
123 + u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
124 + u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
125 + u8 __aligned(8) tail[AES_BLOCK_SIZE];
126 +diff --git a/arch/arm/mach-pxa/idp.c b/arch/arm/mach-pxa/idp.c
127 +index f6d02e4cbcda..5c87dff5d46e 100644
128 +--- a/arch/arm/mach-pxa/idp.c
129 ++++ b/arch/arm/mach-pxa/idp.c
130 +@@ -83,7 +83,8 @@ static struct resource smc91x_resources[] = {
131 + };
132 +
133 + static struct smc91x_platdata smc91x_platdata = {
134 +- .flags = SMC91X_USE_32BIT | SMC91X_USE_DMA | SMC91X_NOWAIT,
135 ++ .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
136 ++ SMC91X_USE_DMA | SMC91X_NOWAIT,
137 + };
138 +
139 + static struct platform_device smc91x_device = {
140 +diff --git a/arch/arm/mach-pxa/xcep.c b/arch/arm/mach-pxa/xcep.c
141 +index 13b1d4586d7d..9001312710f7 100644
142 +--- a/arch/arm/mach-pxa/xcep.c
143 ++++ b/arch/arm/mach-pxa/xcep.c
144 +@@ -120,7 +120,8 @@ static struct resource smc91x_resources[] = {
145 + };
146 +
147 + static struct smc91x_platdata xcep_smc91x_info = {
148 +- .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT | SMC91X_USE_DMA,
149 ++ .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
150 ++ SMC91X_NOWAIT | SMC91X_USE_DMA,
151 + };
152 +
153 + static struct platform_device smc91x_device = {
154 +diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c
155 +index 44575edc44b1..cf0a7c2359f0 100644
156 +--- a/arch/arm/mach-realview/core.c
157 ++++ b/arch/arm/mach-realview/core.c
158 +@@ -95,7 +95,8 @@ static struct smsc911x_platform_config smsc911x_config = {
159 + };
160 +
161 + static struct smc91x_platdata smc91x_platdata = {
162 +- .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
163 ++ .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
164 ++ SMC91X_NOWAIT,
165 + };
166 +
167 + static struct platform_device realview_eth_device = {
168 +diff --git a/arch/arm/mach-sa1100/pleb.c b/arch/arm/mach-sa1100/pleb.c
169 +index 1525d7b5f1b7..88149f85bc49 100644
170 +--- a/arch/arm/mach-sa1100/pleb.c
171 ++++ b/arch/arm/mach-sa1100/pleb.c
172 +@@ -45,7 +45,7 @@ static struct resource smc91x_resources[] = {
173 + };
174 +
175 + static struct smc91x_platdata smc91x_platdata = {
176 +- .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
177 ++ .flags = SMC91X_USE_16BIT | SMC91X_USE_8BIT | SMC91X_NOWAIT,
178 + };
179 +
180 + static struct platform_device smc91x_device = {
181 +diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
182 +index 05d9e16c0dfd..6a51dfccfe71 100644
183 +--- a/arch/arm64/crypto/aes-glue.c
184 ++++ b/arch/arm64/crypto/aes-glue.c
185 +@@ -211,7 +211,7 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
186 + err = blkcipher_walk_done(desc, &walk,
187 + walk.nbytes % AES_BLOCK_SIZE);
188 + }
189 +- if (nbytes) {
190 ++ if (walk.nbytes % AES_BLOCK_SIZE) {
191 + u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
192 + u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
193 + u8 __aligned(8) tail[AES_BLOCK_SIZE];
194 +diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c
195 +index c6db52ba3a06..10c57771822d 100644
196 +--- a/arch/blackfin/mach-bf561/boards/cm_bf561.c
197 ++++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c
198 +@@ -146,7 +146,8 @@ static struct platform_device hitachi_fb_device = {
199 + #include <linux/smc91x.h>
200 +
201 + static struct smc91x_platdata smc91x_info = {
202 +- .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
203 ++ .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
204 ++ SMC91X_NOWAIT,
205 + .leda = RPC_LED_100_10,
206 + .ledb = RPC_LED_TX_RX,
207 + };
208 +diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
209 +index 2de71e8c104b..93c22468cc14 100644
210 +--- a/arch/blackfin/mach-bf561/boards/ezkit.c
211 ++++ b/arch/blackfin/mach-bf561/boards/ezkit.c
212 +@@ -134,7 +134,8 @@ static struct platform_device net2272_bfin_device = {
213 + #include <linux/smc91x.h>
214 +
215 + static struct smc91x_platdata smc91x_info = {
216 +- .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
217 ++ .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
218 ++ SMC91X_NOWAIT,
219 + .leda = RPC_LED_100_10,
220 + .ledb = RPC_LED_TX_RX,
221 + };
222 +diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
223 +index f0e314ceb8ba..7f975b20b20c 100644
224 +--- a/arch/mips/Kconfig.debug
225 ++++ b/arch/mips/Kconfig.debug
226 +@@ -113,42 +113,6 @@ config SPINLOCK_TEST
227 + help
228 + Add several files to the debugfs to test spinlock speed.
229 +
230 +-if CPU_MIPSR6
231 +-
232 +-choice
233 +- prompt "Compact branch policy"
234 +- default MIPS_COMPACT_BRANCHES_OPTIMAL
235 +-
236 +-config MIPS_COMPACT_BRANCHES_NEVER
237 +- bool "Never (force delay slot branches)"
238 +- help
239 +- Pass the -mcompact-branches=never flag to the compiler in order to
240 +- force it to always emit branches with delay slots, and make no use
241 +- of the compact branch instructions introduced by MIPSr6. This is
242 +- useful if you suspect there may be an issue with compact branches in
243 +- either the compiler or the CPU.
244 +-
245 +-config MIPS_COMPACT_BRANCHES_OPTIMAL
246 +- bool "Optimal (use where beneficial)"
247 +- help
248 +- Pass the -mcompact-branches=optimal flag to the compiler in order for
249 +- it to make use of compact branch instructions where it deems them
250 +- beneficial, and use branches with delay slots elsewhere. This is the
251 +- default compiler behaviour, and should be used unless you have a
252 +- reason to choose otherwise.
253 +-
254 +-config MIPS_COMPACT_BRANCHES_ALWAYS
255 +- bool "Always (force compact branches)"
256 +- help
257 +- Pass the -mcompact-branches=always flag to the compiler in order to
258 +- force it to always emit compact branches, making no use of branch
259 +- instructions with delay slots. This can result in more compact code
260 +- which may be beneficial in some scenarios.
261 +-
262 +-endchoice
263 +-
264 +-endif # CPU_MIPSR6
265 +-
266 + config SCACHE_DEBUGFS
267 + bool "L2 cache debugfs entries"
268 + depends on DEBUG_FS
269 +diff --git a/arch/mips/Makefile b/arch/mips/Makefile
270 +index 3f70ba54ae21..252e347958f3 100644
271 +--- a/arch/mips/Makefile
272 ++++ b/arch/mips/Makefile
273 +@@ -204,10 +204,6 @@ toolchain-msa := $(call cc-option-yn,$(mips-cflags) -mhard-float -mfp64 -Wa$(
274 + cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA
275 + endif
276 +
277 +-cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_NEVER) += -mcompact-branches=never
278 +-cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_OPTIMAL) += -mcompact-branches=optimal
279 +-cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_ALWAYS) += -mcompact-branches=always
280 +-
281 + #
282 + # Firmware support
283 + #
284 +diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
285 +index e689b894353c..8dedee1def83 100644
286 +--- a/arch/mips/include/asm/asmmacro.h
287 ++++ b/arch/mips/include/asm/asmmacro.h
288 +@@ -135,6 +135,7 @@
289 + ldc1 $f28, THREAD_FPR28(\thread)
290 + ldc1 $f30, THREAD_FPR30(\thread)
291 + ctc1 \tmp, fcr31
292 ++ .set pop
293 + .endm
294 +
295 + .macro fpu_restore_16odd thread
296 +diff --git a/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h b/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h
297 +index 2f82bfa3a773..c9f5769dfc8f 100644
298 +--- a/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h
299 ++++ b/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h
300 +@@ -11,11 +11,13 @@
301 + #define CP0_EBASE $15, 1
302 +
303 + .macro kernel_entry_setup
304 ++#ifdef CONFIG_SMP
305 + mfc0 t0, CP0_EBASE
306 + andi t0, t0, 0x3ff # CPUNum
307 + beqz t0, 1f
308 + # CPUs other than zero goto smp_bootstrap
309 + j smp_bootstrap
310 ++#endif /* CONFIG_SMP */
311 +
312 + 1:
313 + .endm
314 +diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
315 +index 4674a74a08b5..af27334d6809 100644
316 +--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
317 ++++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
318 +@@ -1164,7 +1164,9 @@ fpu_emul:
319 + regs->regs[31] = r31;
320 + regs->cp0_epc = epc;
321 + if (!used_math()) { /* First time FPU user. */
322 ++ preempt_disable();
323 + err = init_fpu();
324 ++ preempt_enable();
325 + set_used_math();
326 + }
327 + lose_fpu(1); /* Save FPU state for the emulator. */
328 +diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
329 +index 89847bee2b53..44a6f25e902e 100644
330 +--- a/arch/mips/kernel/process.c
331 ++++ b/arch/mips/kernel/process.c
332 +@@ -593,14 +593,14 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
333 + return -EOPNOTSUPP;
334 +
335 + /* Avoid inadvertently triggering emulation */
336 +- if ((value & PR_FP_MODE_FR) && cpu_has_fpu &&
337 +- !(current_cpu_data.fpu_id & MIPS_FPIR_F64))
338 ++ if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
339 ++ !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
340 + return -EOPNOTSUPP;
341 +- if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre)
342 ++ if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
343 + return -EOPNOTSUPP;
344 +
345 + /* FR = 0 not supported in MIPS R6 */
346 +- if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6)
347 ++ if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
348 + return -EOPNOTSUPP;
349 +
350 + /* Proceed with the mode switch */
351 +diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
352 +index 2b521e07b860..7fef02a9eb85 100644
353 +--- a/arch/mips/kernel/smp.c
354 ++++ b/arch/mips/kernel/smp.c
355 +@@ -174,6 +174,9 @@ asmlinkage void start_secondary(void)
356 + cpumask_set_cpu(cpu, &cpu_coherent_mask);
357 + notify_cpu_starting(cpu);
358 +
359 ++ cpumask_set_cpu(cpu, &cpu_callin_map);
360 ++ synchronise_count_slave(cpu);
361 ++
362 + set_cpu_online(cpu, true);
363 +
364 + set_cpu_sibling_map(cpu);
365 +@@ -181,10 +184,6 @@ asmlinkage void start_secondary(void)
366 +
367 + calculate_cpu_foreign_map();
368 +
369 +- cpumask_set_cpu(cpu, &cpu_callin_map);
370 +-
371 +- synchronise_count_slave(cpu);
372 +-
373 + /*
374 + * irq will be enabled in ->smp_finish(), enabling it too early
375 + * is dangerous.
376 +diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
377 +index 975e99759bab..5649a9e429e0 100644
378 +--- a/arch/mips/kernel/vdso.c
379 ++++ b/arch/mips/kernel/vdso.c
380 +@@ -39,16 +39,16 @@ static struct vm_special_mapping vdso_vvar_mapping = {
381 + static void __init init_vdso_image(struct mips_vdso_image *image)
382 + {
383 + unsigned long num_pages, i;
384 ++ unsigned long data_pfn;
385 +
386 + BUG_ON(!PAGE_ALIGNED(image->data));
387 + BUG_ON(!PAGE_ALIGNED(image->size));
388 +
389 + num_pages = image->size / PAGE_SIZE;
390 +
391 +- for (i = 0; i < num_pages; i++) {
392 +- image->mapping.pages[i] =
393 +- virt_to_page(image->data + (i * PAGE_SIZE));
394 +- }
395 ++ data_pfn = __phys_to_pfn(__pa_symbol(image->data));
396 ++ for (i = 0; i < num_pages; i++)
397 ++ image->mapping.pages[i] = pfn_to_page(data_pfn + i);
398 + }
399 +
400 + static int __init init_vdso(void)
401 +diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
402 +index 8cc1622b2ee0..dca7bc87dad9 100644
403 +--- a/crypto/blkcipher.c
404 ++++ b/crypto/blkcipher.c
405 +@@ -234,6 +234,8 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,
406 + return blkcipher_walk_done(desc, walk, -EINVAL);
407 + }
408 +
409 ++ bsize = min(walk->walk_blocksize, n);
410 ++
411 + walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
412 + BLKCIPHER_WALK_DIFF);
413 + if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
414 +@@ -246,7 +248,6 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,
415 + }
416 + }
417 +
418 +- bsize = min(walk->walk_blocksize, n);
419 + n = scatterwalk_clamp(&walk->in, n);
420 + n = scatterwalk_clamp(&walk->out, n);
421 +
422 +diff --git a/crypto/echainiv.c b/crypto/echainiv.c
423 +index b96a84560b67..343a74e96e2a 100644
424 +--- a/crypto/echainiv.c
425 ++++ b/crypto/echainiv.c
426 +@@ -1,8 +1,8 @@
427 + /*
428 + * echainiv: Encrypted Chain IV Generator
429 + *
430 +- * This generator generates an IV based on a sequence number by xoring it
431 +- * with a salt and then encrypting it with the same key as used to encrypt
432 ++ * This generator generates an IV based on a sequence number by multiplying
433 ++ * it with a salt and then encrypting it with the same key as used to encrypt
434 + * the plain text. This algorithm requires that the block size be equal
435 + * to the IV size. It is mainly useful for CBC.
436 + *
437 +@@ -23,81 +23,17 @@
438 + #include <linux/err.h>
439 + #include <linux/init.h>
440 + #include <linux/kernel.h>
441 +-#include <linux/mm.h>
442 + #include <linux/module.h>
443 +-#include <linux/percpu.h>
444 +-#include <linux/spinlock.h>
445 ++#include <linux/slab.h>
446 + #include <linux/string.h>
447 +
448 +-#define MAX_IV_SIZE 16
449 +-
450 +-static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv);
451 +-
452 +-/* We don't care if we get preempted and read/write IVs from the next CPU. */
453 +-static void echainiv_read_iv(u8 *dst, unsigned size)
454 +-{
455 +- u32 *a = (u32 *)dst;
456 +- u32 __percpu *b = echainiv_iv;
457 +-
458 +- for (; size >= 4; size -= 4) {
459 +- *a++ = this_cpu_read(*b);
460 +- b++;
461 +- }
462 +-}
463 +-
464 +-static void echainiv_write_iv(const u8 *src, unsigned size)
465 +-{
466 +- const u32 *a = (const u32 *)src;
467 +- u32 __percpu *b = echainiv_iv;
468 +-
469 +- for (; size >= 4; size -= 4) {
470 +- this_cpu_write(*b, *a);
471 +- a++;
472 +- b++;
473 +- }
474 +-}
475 +-
476 +-static void echainiv_encrypt_complete2(struct aead_request *req, int err)
477 +-{
478 +- struct aead_request *subreq = aead_request_ctx(req);
479 +- struct crypto_aead *geniv;
480 +- unsigned int ivsize;
481 +-
482 +- if (err == -EINPROGRESS)
483 +- return;
484 +-
485 +- if (err)
486 +- goto out;
487 +-
488 +- geniv = crypto_aead_reqtfm(req);
489 +- ivsize = crypto_aead_ivsize(geniv);
490 +-
491 +- echainiv_write_iv(subreq->iv, ivsize);
492 +-
493 +- if (req->iv != subreq->iv)
494 +- memcpy(req->iv, subreq->iv, ivsize);
495 +-
496 +-out:
497 +- if (req->iv != subreq->iv)
498 +- kzfree(subreq->iv);
499 +-}
500 +-
501 +-static void echainiv_encrypt_complete(struct crypto_async_request *base,
502 +- int err)
503 +-{
504 +- struct aead_request *req = base->data;
505 +-
506 +- echainiv_encrypt_complete2(req, err);
507 +- aead_request_complete(req, err);
508 +-}
509 +-
510 + static int echainiv_encrypt(struct aead_request *req)
511 + {
512 + struct crypto_aead *geniv = crypto_aead_reqtfm(req);
513 + struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
514 + struct aead_request *subreq = aead_request_ctx(req);
515 +- crypto_completion_t compl;
516 +- void *data;
517 ++ __be64 nseqno;
518 ++ u64 seqno;
519 + u8 *info;
520 + unsigned int ivsize = crypto_aead_ivsize(geniv);
521 + int err;
522 +@@ -107,8 +43,6 @@ static int echainiv_encrypt(struct aead_request *req)
523 +
524 + aead_request_set_tfm(subreq, ctx->child);
525 +
526 +- compl = echainiv_encrypt_complete;
527 +- data = req;
528 + info = req->iv;
529 +
530 + if (req->src != req->dst) {
531 +@@ -123,29 +57,30 @@ static int echainiv_encrypt(struct aead_request *req)
532 + return err;
533 + }
534 +
535 +- if (unlikely(!IS_ALIGNED((unsigned long)info,
536 +- crypto_aead_alignmask(geniv) + 1))) {
537 +- info = kmalloc(ivsize, req->base.flags &
538 +- CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
539 +- GFP_ATOMIC);
540 +- if (!info)
541 +- return -ENOMEM;
542 +-
543 +- memcpy(info, req->iv, ivsize);
544 +- }
545 +-
546 +- aead_request_set_callback(subreq, req->base.flags, compl, data);
547 ++ aead_request_set_callback(subreq, req->base.flags,
548 ++ req->base.complete, req->base.data);
549 + aead_request_set_crypt(subreq, req->dst, req->dst,
550 + req->cryptlen, info);
551 + aead_request_set_ad(subreq, req->assoclen);
552 +
553 +- crypto_xor(info, ctx->salt, ivsize);
554 ++ memcpy(&nseqno, info + ivsize - 8, 8);
555 ++ seqno = be64_to_cpu(nseqno);
556 ++ memset(info, 0, ivsize);
557 ++
558 + scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
559 +- echainiv_read_iv(info, ivsize);
560 +
561 +- err = crypto_aead_encrypt(subreq);
562 +- echainiv_encrypt_complete2(req, err);
563 +- return err;
564 ++ do {
565 ++ u64 a;
566 ++
567 ++ memcpy(&a, ctx->salt + ivsize - 8, 8);
568 ++
569 ++ a |= 1;
570 ++ a *= seqno;
571 ++
572 ++ memcpy(info + ivsize - 8, &a, 8);
573 ++ } while ((ivsize -= 8));
574 ++
575 ++ return crypto_aead_encrypt(subreq);
576 + }
577 +
578 + static int echainiv_decrypt(struct aead_request *req)
579 +@@ -192,8 +127,7 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
580 + alg = crypto_spawn_aead_alg(spawn);
581 +
582 + err = -EINVAL;
583 +- if (inst->alg.ivsize & (sizeof(u32) - 1) ||
584 +- inst->alg.ivsize > MAX_IV_SIZE)
585 ++ if (inst->alg.ivsize & (sizeof(u64) - 1) || !inst->alg.ivsize)
586 + goto free_inst;
587 +
588 + inst->alg.encrypt = echainiv_encrypt;
589 +@@ -202,7 +136,6 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
590 + inst->alg.init = aead_init_geniv;
591 + inst->alg.exit = aead_exit_geniv;
592 +
593 +- inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
594 + inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
595 + inst->alg.base.cra_ctxsize += inst->alg.ivsize;
596 +
597 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
598 +index 4bef72a9d106..3fda594700e0 100644
599 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
600 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
601 +@@ -59,9 +59,11 @@ static void
602 + nv40_perfctr_next(struct nvkm_pm *pm, struct nvkm_perfdom *dom)
603 + {
604 + struct nvkm_device *device = pm->engine.subdev.device;
605 +- if (pm->sequence != pm->sequence) {
606 ++ struct nv40_pm *nv40pm = container_of(pm, struct nv40_pm, base);
607 ++
608 ++ if (nv40pm->sequence != pm->sequence) {
609 + nvkm_wr32(device, 0x400084, 0x00000020);
610 +- pm->sequence = pm->sequence;
611 ++ nv40pm->sequence = pm->sequence;
612 + }
613 + }
614 +
615 +diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
616 +index 56e1d633875e..6e6c76080d6a 100644
617 +--- a/drivers/gpu/drm/qxl/qxl_draw.c
618 ++++ b/drivers/gpu/drm/qxl/qxl_draw.c
619 +@@ -136,6 +136,8 @@ static int qxl_palette_create_1bit(struct qxl_bo *palette_bo,
620 + * correctly globaly, since that would require
621 + * tracking all of our palettes. */
622 + ret = qxl_bo_kmap(palette_bo, (void **)&pal);
623 ++ if (ret)
624 ++ return ret;
625 + pal->num_ents = 2;
626 + pal->unique = unique++;
627 + if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) {
628 +diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
629 +index 76e699f9ed97..eef3aa6007f1 100644
630 +--- a/drivers/i2c/busses/i2c-eg20t.c
631 ++++ b/drivers/i2c/busses/i2c-eg20t.c
632 +@@ -773,13 +773,6 @@ static int pch_i2c_probe(struct pci_dev *pdev,
633 + /* Set the number of I2C channel instance */
634 + adap_info->ch_num = id->driver_data;
635 +
636 +- ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
637 +- KBUILD_MODNAME, adap_info);
638 +- if (ret) {
639 +- pch_pci_err(pdev, "request_irq FAILED\n");
640 +- goto err_request_irq;
641 +- }
642 +-
643 + for (i = 0; i < adap_info->ch_num; i++) {
644 + pch_adap = &adap_info->pch_data[i].pch_adapter;
645 + adap_info->pch_i2c_suspended = false;
646 +@@ -796,6 +789,17 @@ static int pch_i2c_probe(struct pci_dev *pdev,
647 + adap_info->pch_data[i].pch_base_address = base_addr + 0x100 * i;
648 +
649 + pch_adap->dev.parent = &pdev->dev;
650 ++ }
651 ++
652 ++ ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
653 ++ KBUILD_MODNAME, adap_info);
654 ++ if (ret) {
655 ++ pch_pci_err(pdev, "request_irq FAILED\n");
656 ++ goto err_request_irq;
657 ++ }
658 ++
659 ++ for (i = 0; i < adap_info->ch_num; i++) {
660 ++ pch_adap = &adap_info->pch_data[i].pch_adapter;
661 +
662 + pch_i2c_init(&adap_info->pch_data[i]);
663 +
664 +diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
665 +index fdcbdab808e9..33b11563cde7 100644
666 +--- a/drivers/i2c/busses/i2c-qup.c
667 ++++ b/drivers/i2c/busses/i2c-qup.c
668 +@@ -727,7 +727,8 @@ static int qup_i2c_pm_resume_runtime(struct device *device)
669 + #ifdef CONFIG_PM_SLEEP
670 + static int qup_i2c_suspend(struct device *device)
671 + {
672 +- qup_i2c_pm_suspend_runtime(device);
673 ++ if (!pm_runtime_suspended(device))
674 ++ return qup_i2c_pm_suspend_runtime(device);
675 + return 0;
676 + }
677 +
678 +diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
679 +index 7ede941e9301..131b434af994 100644
680 +--- a/drivers/iio/industrialio-core.c
681 ++++ b/drivers/iio/industrialio-core.c
682 +@@ -433,16 +433,15 @@ ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
683 + scale_db = true;
684 + case IIO_VAL_INT_PLUS_MICRO:
685 + if (vals[1] < 0)
686 +- return sprintf(buf, "-%ld.%06u%s\n", abs(vals[0]),
687 +- -vals[1],
688 +- scale_db ? " dB" : "");
689 ++ return sprintf(buf, "-%d.%06u%s\n", abs(vals[0]),
690 ++ -vals[1], scale_db ? " dB" : "");
691 + else
692 + return sprintf(buf, "%d.%06u%s\n", vals[0], vals[1],
693 + scale_db ? " dB" : "");
694 + case IIO_VAL_INT_PLUS_NANO:
695 + if (vals[1] < 0)
696 +- return sprintf(buf, "-%ld.%09u\n", abs(vals[0]),
697 +- -vals[1]);
698 ++ return sprintf(buf, "-%d.%09u\n", abs(vals[0]),
699 ++ -vals[1]);
700 + else
701 + return sprintf(buf, "%d.%09u\n", vals[0], vals[1]);
702 + case IIO_VAL_FRACTIONAL:
703 +diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
704 +index 3821c4786662..565bb2c140ed 100644
705 +--- a/drivers/iommu/dmar.c
706 ++++ b/drivers/iommu/dmar.c
707 +@@ -1858,10 +1858,11 @@ static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
708 + /*
709 + * All PCI devices managed by this unit should have been destroyed.
710 + */
711 +- if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt)
712 ++ if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
713 + for_each_active_dev_scope(dmaru->devices,
714 + dmaru->devices_cnt, i, dev)
715 + return -EBUSY;
716 ++ }
717 +
718 + ret = dmar_ir_hotplug(dmaru, false);
719 + if (ret == 0)
720 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
721 +index 24d81308a1a6..b7f852d824a3 100644
722 +--- a/drivers/iommu/intel-iommu.c
723 ++++ b/drivers/iommu/intel-iommu.c
724 +@@ -4182,10 +4182,11 @@ int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
725 + if (!atsru)
726 + return 0;
727 +
728 +- if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
729 ++ if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
730 + for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
731 + i, dev)
732 + return -EBUSY;
733 ++ }
734 +
735 + return 0;
736 + }
737 +diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
738 +index f0480d687f17..ba780c45f645 100644
739 +--- a/drivers/media/platform/am437x/am437x-vpfe.c
740 ++++ b/drivers/media/platform/am437x/am437x-vpfe.c
741 +@@ -1706,7 +1706,7 @@ static int vpfe_get_app_input_index(struct vpfe_device *vpfe,
742 + sdinfo = &cfg->sub_devs[i];
743 + client = v4l2_get_subdevdata(sdinfo->sd);
744 + if (client->addr == curr_client->addr &&
745 +- client->adapter->nr == client->adapter->nr) {
746 ++ client->adapter->nr == curr_client->adapter->nr) {
747 + if (vpfe->current_input >= 1)
748 + return -1;
749 + *app_input_index = j + vpfe->current_input;
750 +diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c
751 +index 744ca5cacc9b..f9fa3fad728e 100644
752 +--- a/drivers/mtd/maps/pmcmsp-flash.c
753 ++++ b/drivers/mtd/maps/pmcmsp-flash.c
754 +@@ -75,15 +75,15 @@ static int __init init_msp_flash(void)
755 +
756 + printk(KERN_NOTICE "Found %d PMC flash devices\n", fcnt);
757 +
758 +- msp_flash = kmalloc(fcnt * sizeof(struct map_info *), GFP_KERNEL);
759 ++ msp_flash = kcalloc(fcnt, sizeof(*msp_flash), GFP_KERNEL);
760 + if (!msp_flash)
761 + return -ENOMEM;
762 +
763 +- msp_parts = kmalloc(fcnt * sizeof(struct mtd_partition *), GFP_KERNEL);
764 ++ msp_parts = kcalloc(fcnt, sizeof(*msp_parts), GFP_KERNEL);
765 + if (!msp_parts)
766 + goto free_msp_flash;
767 +
768 +- msp_maps = kcalloc(fcnt, sizeof(struct mtd_info), GFP_KERNEL);
769 ++ msp_maps = kcalloc(fcnt, sizeof(*msp_maps), GFP_KERNEL);
770 + if (!msp_maps)
771 + goto free_msp_parts;
772 +
773 +diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
774 +index 142fc3d79463..784c6e1a0391 100644
775 +--- a/drivers/mtd/maps/sa1100-flash.c
776 ++++ b/drivers/mtd/maps/sa1100-flash.c
777 +@@ -230,8 +230,10 @@ static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
778 +
779 + info->mtd = mtd_concat_create(cdev, info->num_subdev,
780 + plat->name);
781 +- if (info->mtd == NULL)
782 ++ if (info->mtd == NULL) {
783 + ret = -ENXIO;
784 ++ goto err;
785 ++ }
786 + }
787 + info->mtd->dev.parent = &pdev->dev;
788 +
789 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
790 +index b3d70a7a5262..5dca77e0ffed 100644
791 +--- a/drivers/net/bonding/bond_main.c
792 ++++ b/drivers/net/bonding/bond_main.c
793 +@@ -1317,9 +1317,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
794 + slave_dev->name);
795 + }
796 +
797 +- /* already enslaved */
798 +- if (slave_dev->flags & IFF_SLAVE) {
799 +- netdev_dbg(bond_dev, "Error: Device was already enslaved\n");
800 ++ /* already in-use? */
801 ++ if (netdev_is_rx_handler_busy(slave_dev)) {
802 ++ netdev_err(bond_dev,
803 ++ "Error: Device is in use and cannot be enslaved\n");
804 + return -EBUSY;
805 + }
806 +
807 +diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
808 +index 41c0fc9f3b14..16f7cadda5c3 100644
809 +--- a/drivers/net/can/flexcan.c
810 ++++ b/drivers/net/can/flexcan.c
811 +@@ -1268,11 +1268,10 @@ static int __maybe_unused flexcan_suspend(struct device *device)
812 + struct flexcan_priv *priv = netdev_priv(dev);
813 + int err;
814 +
815 +- err = flexcan_chip_disable(priv);
816 +- if (err)
817 +- return err;
818 +-
819 + if (netif_running(dev)) {
820 ++ err = flexcan_chip_disable(priv);
821 ++ if (err)
822 ++ return err;
823 + netif_stop_queue(dev);
824 + netif_device_detach(dev);
825 + }
826 +@@ -1285,13 +1284,17 @@ static int __maybe_unused flexcan_resume(struct device *device)
827 + {
828 + struct net_device *dev = dev_get_drvdata(device);
829 + struct flexcan_priv *priv = netdev_priv(dev);
830 ++ int err;
831 +
832 + priv->can.state = CAN_STATE_ERROR_ACTIVE;
833 + if (netif_running(dev)) {
834 + netif_device_attach(dev);
835 + netif_start_queue(dev);
836 ++ err = flexcan_chip_enable(priv);
837 ++ if (err)
838 ++ return err;
839 + }
840 +- return flexcan_chip_enable(priv);
841 ++ return 0;
842 + }
843 +
844 + static SIMPLE_DEV_PM_OPS(flexcan_pm_ops, flexcan_suspend, flexcan_resume);
845 +diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
846 +index 6bba1c98d764..c7994e372284 100644
847 +--- a/drivers/net/dsa/bcm_sf2.h
848 ++++ b/drivers/net/dsa/bcm_sf2.h
849 +@@ -187,8 +187,8 @@ static inline void name##_writeq(struct bcm_sf2_priv *priv, u64 val, \
850 + static inline void intrl2_##which##_mask_clear(struct bcm_sf2_priv *priv, \
851 + u32 mask) \
852 + { \
853 +- intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
854 + priv->irq##which##_mask &= ~(mask); \
855 ++ intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
856 + } \
857 + static inline void intrl2_##which##_mask_set(struct bcm_sf2_priv *priv, \
858 + u32 mask) \
859 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
860 +index 037fc4cdf5af..cc199063612a 100644
861 +--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
862 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
863 +@@ -143,13 +143,14 @@ static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
864 + return cmd->cmd_buf + (idx << cmd->log_stride);
865 + }
866 +
867 +-static u8 xor8_buf(void *buf, int len)
868 ++static u8 xor8_buf(void *buf, size_t offset, int len)
869 + {
870 + u8 *ptr = buf;
871 + u8 sum = 0;
872 + int i;
873 ++ int end = len + offset;
874 +
875 +- for (i = 0; i < len; i++)
876 ++ for (i = offset; i < end; i++)
877 + sum ^= ptr[i];
878 +
879 + return sum;
880 +@@ -157,41 +158,49 @@ static u8 xor8_buf(void *buf, int len)
881 +
882 + static int verify_block_sig(struct mlx5_cmd_prot_block *block)
883 + {
884 +- if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
885 ++ size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
886 ++ int xor_len = sizeof(*block) - sizeof(block->data) - 1;
887 ++
888 ++ if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
889 + return -EINVAL;
890 +
891 +- if (xor8_buf(block, sizeof(*block)) != 0xff)
892 ++ if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
893 + return -EINVAL;
894 +
895 + return 0;
896 + }
897 +
898 +-static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
899 +- int csum)
900 ++static void calc_block_sig(struct mlx5_cmd_prot_block *block)
901 + {
902 +- block->token = token;
903 +- if (csum) {
904 +- block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
905 +- sizeof(block->data) - 2);
906 +- block->sig = ~xor8_buf(block, sizeof(*block) - 1);
907 +- }
908 ++ int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
909 ++ size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
910 ++
911 ++ block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
912 ++ block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
913 + }
914 +
915 +-static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
916 ++static void calc_chain_sig(struct mlx5_cmd_msg *msg)
917 + {
918 + struct mlx5_cmd_mailbox *next = msg->next;
919 +-
920 +- while (next) {
921 +- calc_block_sig(next->buf, token, csum);
922 ++ int size = msg->len;
923 ++ int blen = size - min_t(int, sizeof(msg->first.data), size);
924 ++ int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
925 ++ / MLX5_CMD_DATA_BLOCK_SIZE;
926 ++ int i = 0;
927 ++
928 ++ for (i = 0; i < n && next; i++) {
929 ++ calc_block_sig(next->buf);
930 + next = next->next;
931 + }
932 + }
933 +
934 + static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
935 + {
936 +- ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
937 +- calc_chain_sig(ent->in, ent->token, csum);
938 +- calc_chain_sig(ent->out, ent->token, csum);
939 ++ ent->lay->sig = ~xor8_buf(ent->lay, 0, sizeof(*ent->lay));
940 ++ if (csum) {
941 ++ calc_chain_sig(ent->in);
942 ++ calc_chain_sig(ent->out);
943 ++ }
944 + }
945 +
946 + static void poll_timeout(struct mlx5_cmd_work_ent *ent)
947 +@@ -222,12 +231,17 @@ static int verify_signature(struct mlx5_cmd_work_ent *ent)
948 + struct mlx5_cmd_mailbox *next = ent->out->next;
949 + int err;
950 + u8 sig;
951 ++ int size = ent->out->len;
952 ++ int blen = size - min_t(int, sizeof(ent->out->first.data), size);
953 ++ int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
954 ++ / MLX5_CMD_DATA_BLOCK_SIZE;
955 ++ int i = 0;
956 +
957 +- sig = xor8_buf(ent->lay, sizeof(*ent->lay));
958 ++ sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
959 + if (sig != 0xff)
960 + return -EINVAL;
961 +
962 +- while (next) {
963 ++ for (i = 0; i < n && next; i++) {
964 + err = verify_block_sig(next->buf);
965 + if (err)
966 + return err;
967 +@@ -641,7 +655,6 @@ static void cmd_work_handler(struct work_struct *work)
968 + spin_unlock_irqrestore(&cmd->alloc_lock, flags);
969 + }
970 +
971 +- ent->token = alloc_token(cmd);
972 + cmd->ent_arr[ent->idx] = ent;
973 + lay = get_inst(cmd, ent->idx);
974 + ent->lay = lay;
975 +@@ -755,7 +768,8 @@ static u8 *get_status_ptr(struct mlx5_outbox_hdr *out)
976 + static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
977 + struct mlx5_cmd_msg *out, void *uout, int uout_size,
978 + mlx5_cmd_cbk_t callback,
979 +- void *context, int page_queue, u8 *status)
980 ++ void *context, int page_queue, u8 *status,
981 ++ u8 token)
982 + {
983 + struct mlx5_cmd *cmd = &dev->cmd;
984 + struct mlx5_cmd_work_ent *ent;
985 +@@ -772,6 +786,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
986 + if (IS_ERR(ent))
987 + return PTR_ERR(ent);
988 +
989 ++ ent->token = token;
990 ++
991 + if (!callback)
992 + init_completion(&ent->done);
993 +
994 +@@ -844,7 +860,8 @@ static const struct file_operations fops = {
995 + .write = dbg_write,
996 + };
997 +
998 +-static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
999 ++static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
1000 ++ u8 token)
1001 + {
1002 + struct mlx5_cmd_prot_block *block;
1003 + struct mlx5_cmd_mailbox *next;
1004 +@@ -870,6 +887,7 @@ static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
1005 + memcpy(block->data, from, copy);
1006 + from += copy;
1007 + size -= copy;
1008 ++ block->token = token;
1009 + next = next->next;
1010 + }
1011 +
1012 +@@ -939,7 +957,8 @@ static void free_cmd_box(struct mlx5_core_dev *dev,
1013 + }
1014 +
1015 + static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
1016 +- gfp_t flags, int size)
1017 ++ gfp_t flags, int size,
1018 ++ u8 token)
1019 + {
1020 + struct mlx5_cmd_mailbox *tmp, *head = NULL;
1021 + struct mlx5_cmd_prot_block *block;
1022 +@@ -968,6 +987,7 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
1023 + tmp->next = head;
1024 + block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
1025 + block->block_num = cpu_to_be32(n - i - 1);
1026 ++ block->token = token;
1027 + head = tmp;
1028 + }
1029 + msg->next = head;
1030 +@@ -1351,7 +1371,7 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
1031 + }
1032 +
1033 + if (IS_ERR(msg))
1034 +- msg = mlx5_alloc_cmd_msg(dev, gfp, in_size);
1035 ++ msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
1036 +
1037 + return msg;
1038 + }
1039 +@@ -1376,6 +1396,7 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1040 + int err;
1041 + u8 status = 0;
1042 + u32 drv_synd;
1043 ++ u8 token;
1044 +
1045 + if (pci_channel_offline(dev->pdev) ||
1046 + dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1047 +@@ -1394,20 +1415,22 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1048 + return err;
1049 + }
1050 +
1051 +- err = mlx5_copy_to_msg(inb, in, in_size);
1052 ++ token = alloc_token(&dev->cmd);
1053 ++
1054 ++ err = mlx5_copy_to_msg(inb, in, in_size, token);
1055 + if (err) {
1056 + mlx5_core_warn(dev, "err %d\n", err);
1057 + goto out_in;
1058 + }
1059 +
1060 +- outb = mlx5_alloc_cmd_msg(dev, gfp, out_size);
1061 ++ outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token);
1062 + if (IS_ERR(outb)) {
1063 + err = PTR_ERR(outb);
1064 + goto out_in;
1065 + }
1066 +
1067 + err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
1068 +- pages_queue, &status);
1069 ++ pages_queue, &status, token);
1070 + if (err)
1071 + goto out_out;
1072 +
1073 +@@ -1475,7 +1498,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev)
1074 + INIT_LIST_HEAD(&cmd->cache.med.head);
1075 +
1076 + for (i = 0; i < NUM_LONG_LISTS; i++) {
1077 +- msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE);
1078 ++ msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE, 0);
1079 + if (IS_ERR(msg)) {
1080 + err = PTR_ERR(msg);
1081 + goto ex_err;
1082 +@@ -1485,7 +1508,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev)
1083 + }
1084 +
1085 + for (i = 0; i < NUM_MED_LISTS; i++) {
1086 +- msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE);
1087 ++ msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE, 0);
1088 + if (IS_ERR(msg)) {
1089 + err = PTR_ERR(msg);
1090 + goto ex_err;
1091 +diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
1092 +index 0e2fc1a844ab..8c44cf6ff7a2 100644
1093 +--- a/drivers/net/ethernet/smsc/smc91x.c
1094 ++++ b/drivers/net/ethernet/smsc/smc91x.c
1095 +@@ -2269,6 +2269,13 @@ static int smc_drv_probe(struct platform_device *pdev)
1096 + if (pd) {
1097 + memcpy(&lp->cfg, pd, sizeof(lp->cfg));
1098 + lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags);
1099 ++
1100 ++ if (!SMC_8BIT(lp) && !SMC_16BIT(lp)) {
1101 ++ dev_err(&pdev->dev,
1102 ++ "at least one of 8-bit or 16-bit access support is required.\n");
1103 ++ ret = -ENXIO;
1104 ++ goto out_free_netdev;
1105 ++ }
1106 + }
1107 +
1108 + #if IS_BUILTIN(CONFIG_OF)
1109 +diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
1110 +index a3c129e1e40a..29df0465daf4 100644
1111 +--- a/drivers/net/ethernet/smsc/smc91x.h
1112 ++++ b/drivers/net/ethernet/smsc/smc91x.h
1113 +@@ -37,6 +37,27 @@
1114 + #include <linux/smc91x.h>
1115 +
1116 + /*
1117 ++ * Any 16-bit access is performed with two 8-bit accesses if the hardware
1118 ++ * can't do it directly. Most registers are 16-bit so those are mandatory.
1119 ++ */
1120 ++#define SMC_outw_b(x, a, r) \
1121 ++ do { \
1122 ++ unsigned int __val16 = (x); \
1123 ++ unsigned int __reg = (r); \
1124 ++ SMC_outb(__val16, a, __reg); \
1125 ++ SMC_outb(__val16 >> 8, a, __reg + (1 << SMC_IO_SHIFT)); \
1126 ++ } while (0)
1127 ++
1128 ++#define SMC_inw_b(a, r) \
1129 ++ ({ \
1130 ++ unsigned int __val16; \
1131 ++ unsigned int __reg = r; \
1132 ++ __val16 = SMC_inb(a, __reg); \
1133 ++ __val16 |= SMC_inb(a, __reg + (1 << SMC_IO_SHIFT)) << 8; \
1134 ++ __val16; \
1135 ++ })
1136 ++
1137 ++/*
1138 + * Define your architecture specific bus configuration parameters here.
1139 + */
1140 +
1141 +@@ -55,10 +76,30 @@
1142 + #define SMC_IO_SHIFT (lp->io_shift)
1143 +
1144 + #define SMC_inb(a, r) readb((a) + (r))
1145 +-#define SMC_inw(a, r) readw((a) + (r))
1146 ++#define SMC_inw(a, r) \
1147 ++ ({ \
1148 ++ unsigned int __smc_r = r; \
1149 ++ SMC_16BIT(lp) ? readw((a) + __smc_r) : \
1150 ++ SMC_8BIT(lp) ? SMC_inw_b(a, __smc_r) : \
1151 ++ ({ BUG(); 0; }); \
1152 ++ })
1153 ++
1154 + #define SMC_inl(a, r) readl((a) + (r))
1155 + #define SMC_outb(v, a, r) writeb(v, (a) + (r))
1156 ++#define SMC_outw(v, a, r) \
1157 ++ do { \
1158 ++ unsigned int __v = v, __smc_r = r; \
1159 ++ if (SMC_16BIT(lp)) \
1160 ++ __SMC_outw(__v, a, __smc_r); \
1161 ++ else if (SMC_8BIT(lp)) \
1162 ++ SMC_outw_b(__v, a, __smc_r); \
1163 ++ else \
1164 ++ BUG(); \
1165 ++ } while (0)
1166 ++
1167 + #define SMC_outl(v, a, r) writel(v, (a) + (r))
1168 ++#define SMC_insb(a, r, p, l) readsb((a) + (r), p, l)
1169 ++#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, l)
1170 + #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
1171 + #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
1172 + #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
1173 +@@ -66,7 +107,7 @@
1174 + #define SMC_IRQ_FLAGS (-1) /* from resource */
1175 +
1176 + /* We actually can't write halfwords properly if not word aligned */
1177 +-static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
1178 ++static inline void __SMC_outw(u16 val, void __iomem *ioaddr, int reg)
1179 + {
1180 + if ((machine_is_mainstone() || machine_is_stargate2() ||
1181 + machine_is_pxa_idp()) && reg & 2) {
1182 +@@ -405,24 +446,8 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
1183 +
1184 + #if ! SMC_CAN_USE_16BIT
1185 +
1186 +-/*
1187 +- * Any 16-bit access is performed with two 8-bit accesses if the hardware
1188 +- * can't do it directly. Most registers are 16-bit so those are mandatory.
1189 +- */
1190 +-#define SMC_outw(x, ioaddr, reg) \
1191 +- do { \
1192 +- unsigned int __val16 = (x); \
1193 +- SMC_outb( __val16, ioaddr, reg ); \
1194 +- SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\
1195 +- } while (0)
1196 +-#define SMC_inw(ioaddr, reg) \
1197 +- ({ \
1198 +- unsigned int __val16; \
1199 +- __val16 = SMC_inb( ioaddr, reg ); \
1200 +- __val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \
1201 +- __val16; \
1202 +- })
1203 +-
1204 ++#define SMC_outw(x, ioaddr, reg) SMC_outw_b(x, ioaddr, reg)
1205 ++#define SMC_inw(ioaddr, reg) SMC_inw_b(ioaddr, reg)
1206 + #define SMC_insw(a, r, p, l) BUG()
1207 + #define SMC_outsw(a, r, p, l) BUG()
1208 +
1209 +diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
1210 +index 47cd306dbb3c..bba0ca786aaa 100644
1211 +--- a/drivers/net/phy/phy.c
1212 ++++ b/drivers/net/phy/phy.c
1213 +@@ -640,8 +640,10 @@ phy_err:
1214 + int phy_start_interrupts(struct phy_device *phydev)
1215 + {
1216 + atomic_set(&phydev->irq_disable, 0);
1217 +- if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt",
1218 +- phydev) < 0) {
1219 ++ if (request_irq(phydev->irq, phy_interrupt,
1220 ++ IRQF_SHARED,
1221 ++ "phy_interrupt",
1222 ++ phydev) < 0) {
1223 + pr_warn("%s: Can't get IRQ %d (PHY)\n",
1224 + phydev->bus->name, phydev->irq);
1225 + phydev->irq = PHY_POLL;
1226 +diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
1227 +index 1bdeacf7b257..bc70ce62bc03 100644
1228 +--- a/drivers/net/wireless/ath/ath9k/init.c
1229 ++++ b/drivers/net/wireless/ath/ath9k/init.c
1230 +@@ -869,8 +869,8 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
1231 + hw->wiphy->interface_modes |=
1232 + BIT(NL80211_IFTYPE_P2P_DEVICE);
1233 +
1234 +- hw->wiphy->iface_combinations = if_comb;
1235 +- hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
1236 ++ hw->wiphy->iface_combinations = if_comb;
1237 ++ hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
1238 + }
1239 +
1240 + hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
1241 +diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
1242 +index 93bdf684babe..ae047ab7a4df 100644
1243 +--- a/drivers/net/wireless/iwlegacy/3945.c
1244 ++++ b/drivers/net/wireless/iwlegacy/3945.c
1245 +@@ -1019,12 +1019,13 @@ il3945_hw_txq_ctx_free(struct il_priv *il)
1246 + int txq_id;
1247 +
1248 + /* Tx queues */
1249 +- if (il->txq)
1250 ++ if (il->txq) {
1251 + for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
1252 + if (txq_id == IL39_CMD_QUEUE_NUM)
1253 + il_cmd_queue_free(il);
1254 + else
1255 + il_tx_queue_free(il, txq_id);
1256 ++ }
1257 +
1258 + /* free tx queue structure */
1259 + il_free_txq_mem(il);
1260 +diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.c b/drivers/net/wireless/iwlwifi/dvm/calib.c
1261 +index 20e6aa910700..c148085742a0 100644
1262 +--- a/drivers/net/wireless/iwlwifi/dvm/calib.c
1263 ++++ b/drivers/net/wireless/iwlwifi/dvm/calib.c
1264 +@@ -901,7 +901,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv,
1265 + /* bound gain by 2 bits value max, 3rd bit is sign */
1266 + data->delta_gain_code[i] =
1267 + min(abs(delta_g),
1268 +- (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
1269 ++ (s32) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
1270 +
1271 + if (delta_g < 0)
1272 + /*
1273 +diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c
1274 +index 9c65f134d447..da7a75f82489 100644
1275 +--- a/drivers/power/max17042_battery.c
1276 ++++ b/drivers/power/max17042_battery.c
1277 +@@ -457,13 +457,16 @@ static inline void max17042_write_model_data(struct max17042_chip *chip,
1278 + }
1279 +
1280 + static inline void max17042_read_model_data(struct max17042_chip *chip,
1281 +- u8 addr, u32 *data, int size)
1282 ++ u8 addr, u16 *data, int size)
1283 + {
1284 + struct regmap *map = chip->regmap;
1285 + int i;
1286 ++ u32 tmp;
1287 +
1288 +- for (i = 0; i < size; i++)
1289 +- regmap_read(map, addr + i, &data[i]);
1290 ++ for (i = 0; i < size; i++) {
1291 ++ regmap_read(map, addr + i, &tmp);
1292 ++ data[i] = (u16)tmp;
1293 ++ }
1294 + }
1295 +
1296 + static inline int max17042_model_data_compare(struct max17042_chip *chip,
1297 +@@ -486,7 +489,7 @@ static int max17042_init_model(struct max17042_chip *chip)
1298 + {
1299 + int ret;
1300 + int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
1301 +- u32 *temp_data;
1302 ++ u16 *temp_data;
1303 +
1304 + temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
1305 + if (!temp_data)
1306 +@@ -501,7 +504,7 @@ static int max17042_init_model(struct max17042_chip *chip)
1307 + ret = max17042_model_data_compare(
1308 + chip,
1309 + chip->pdata->config_data->cell_char_tbl,
1310 +- (u16 *)temp_data,
1311 ++ temp_data,
1312 + table_size);
1313 +
1314 + max10742_lock_model(chip);
1315 +@@ -514,7 +517,7 @@ static int max17042_verify_model_lock(struct max17042_chip *chip)
1316 + {
1317 + int i;
1318 + int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
1319 +- u32 *temp_data;
1320 ++ u16 *temp_data;
1321 + int ret = 0;
1322 +
1323 + temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
1324 +diff --git a/drivers/power/reset/hisi-reboot.c b/drivers/power/reset/hisi-reboot.c
1325 +index 9ab7f562a83b..f69387e12c1e 100644
1326 +--- a/drivers/power/reset/hisi-reboot.c
1327 ++++ b/drivers/power/reset/hisi-reboot.c
1328 +@@ -53,13 +53,16 @@ static int hisi_reboot_probe(struct platform_device *pdev)
1329 +
1330 + if (of_property_read_u32(np, "reboot-offset", &reboot_offset) < 0) {
1331 + pr_err("failed to find reboot-offset property\n");
1332 ++ iounmap(base);
1333 + return -EINVAL;
1334 + }
1335 +
1336 + err = register_restart_handler(&hisi_restart_nb);
1337 +- if (err)
1338 ++ if (err) {
1339 + dev_err(&pdev->dev, "cannot register restart handler (err=%d)\n",
1340 + err);
1341 ++ iounmap(base);
1342 ++ }
1343 +
1344 + return err;
1345 + }
1346 +diff --git a/drivers/power/tps65217_charger.c b/drivers/power/tps65217_charger.c
1347 +index d9f56730c735..040a40b4b173 100644
1348 +--- a/drivers/power/tps65217_charger.c
1349 ++++ b/drivers/power/tps65217_charger.c
1350 +@@ -205,6 +205,7 @@ static int tps65217_charger_probe(struct platform_device *pdev)
1351 + if (!charger)
1352 + return -ENOMEM;
1353 +
1354 ++ platform_set_drvdata(pdev, charger);
1355 + charger->tps = tps;
1356 + charger->dev = &pdev->dev;
1357 +
1358 +diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
1359 +index d24ca5f281b4..7831bc6b51dd 100644
1360 +--- a/drivers/pwm/core.c
1361 ++++ b/drivers/pwm/core.c
1362 +@@ -889,7 +889,7 @@ EXPORT_SYMBOL_GPL(devm_pwm_put);
1363 + */
1364 + bool pwm_can_sleep(struct pwm_device *pwm)
1365 + {
1366 +- return pwm->chip->can_sleep;
1367 ++ return true;
1368 + }
1369 + EXPORT_SYMBOL_GPL(pwm_can_sleep);
1370 +
1371 +diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
1372 +index 3f8d357b1bac..278e10cd771f 100644
1373 +--- a/drivers/scsi/megaraid/megaraid_sas_base.c
1374 ++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
1375 +@@ -5941,11 +5941,11 @@ static void megasas_detach_one(struct pci_dev *pdev)
1376 + if (fusion->ld_drv_map[i])
1377 + free_pages((ulong)fusion->ld_drv_map[i],
1378 + fusion->drv_map_pages);
1379 +- if (fusion->pd_seq_sync)
1380 +- dma_free_coherent(&instance->pdev->dev,
1381 +- pd_seq_map_sz,
1382 +- fusion->pd_seq_sync[i],
1383 +- fusion->pd_seq_phys[i]);
1384 ++ if (fusion->pd_seq_sync[i])
1385 ++ dma_free_coherent(&instance->pdev->dev,
1386 ++ pd_seq_map_sz,
1387 ++ fusion->pd_seq_sync[i],
1388 ++ fusion->pd_seq_phys[i]);
1389 + }
1390 + free_pages((ulong)instance->ctrl_context,
1391 + instance->ctrl_context_pages);
1392 +diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
1393 +index bb40f3728742..20314ff08be0 100644
1394 +--- a/drivers/staging/iio/adc/ad7192.c
1395 ++++ b/drivers/staging/iio/adc/ad7192.c
1396 +@@ -236,7 +236,7 @@ static int ad7192_setup(struct ad7192_state *st,
1397 + st->mclk = pdata->ext_clk_hz;
1398 + else
1399 + st->mclk = AD7192_INT_FREQ_MHZ;
1400 +- break;
1401 ++ break;
1402 + default:
1403 + ret = -EINVAL;
1404 + goto out;
1405 +diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
1406 +index c37149b929be..502d3892d8a4 100644
1407 +--- a/fs/autofs4/autofs_i.h
1408 ++++ b/fs/autofs4/autofs_i.h
1409 +@@ -79,9 +79,13 @@ struct autofs_info {
1410 + };
1411 +
1412 + #define AUTOFS_INF_EXPIRING (1<<0) /* dentry is in the process of expiring */
1413 +-#define AUTOFS_INF_NO_RCU (1<<1) /* the dentry is being considered
1414 ++#define AUTOFS_INF_WANT_EXPIRE (1<<1) /* the dentry is being considered
1415 + * for expiry, so RCU_walk is
1416 +- * not permitted
1417 ++ * not permitted. If it progresses to
1418 ++ * actual expiry attempt, the flag is
1419 ++ * not cleared when EXPIRING is set -
1420 ++ * in that case it gets cleared only
1421 ++ * when it comes to clearing EXPIRING.
1422 + */
1423 + #define AUTOFS_INF_PENDING (1<<2) /* dentry pending mount */
1424 +
1425 +diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
1426 +index 1cebc3c52fa5..7a5a598a2d94 100644
1427 +--- a/fs/autofs4/expire.c
1428 ++++ b/fs/autofs4/expire.c
1429 +@@ -315,19 +315,17 @@ struct dentry *autofs4_expire_direct(struct super_block *sb,
1430 + if (ino->flags & AUTOFS_INF_PENDING)
1431 + goto out;
1432 + if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
1433 +- ino->flags |= AUTOFS_INF_NO_RCU;
1434 ++ ino->flags |= AUTOFS_INF_WANT_EXPIRE;
1435 + spin_unlock(&sbi->fs_lock);
1436 + synchronize_rcu();
1437 + spin_lock(&sbi->fs_lock);
1438 + if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
1439 + ino->flags |= AUTOFS_INF_EXPIRING;
1440 +- smp_mb();
1441 +- ino->flags &= ~AUTOFS_INF_NO_RCU;
1442 + init_completion(&ino->expire_complete);
1443 + spin_unlock(&sbi->fs_lock);
1444 + return root;
1445 + }
1446 +- ino->flags &= ~AUTOFS_INF_NO_RCU;
1447 ++ ino->flags &= ~AUTOFS_INF_WANT_EXPIRE;
1448 + }
1449 + out:
1450 + spin_unlock(&sbi->fs_lock);
1451 +@@ -417,6 +415,7 @@ static struct dentry *should_expire(struct dentry *dentry,
1452 + }
1453 + return NULL;
1454 + }
1455 ++
1456 + /*
1457 + * Find an eligible tree to time-out
1458 + * A tree is eligible if :-
1459 +@@ -432,6 +431,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
1460 + struct dentry *root = sb->s_root;
1461 + struct dentry *dentry;
1462 + struct dentry *expired;
1463 ++ struct dentry *found;
1464 + struct autofs_info *ino;
1465 +
1466 + if (!root)
1467 +@@ -442,48 +442,54 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
1468 +
1469 + dentry = NULL;
1470 + while ((dentry = get_next_positive_subdir(dentry, root))) {
1471 ++ int flags = how;
1472 ++
1473 + spin_lock(&sbi->fs_lock);
1474 + ino = autofs4_dentry_ino(dentry);
1475 +- if (ino->flags & AUTOFS_INF_NO_RCU)
1476 +- expired = NULL;
1477 +- else
1478 +- expired = should_expire(dentry, mnt, timeout, how);
1479 +- if (!expired) {
1480 ++ if (ino->flags & AUTOFS_INF_WANT_EXPIRE) {
1481 + spin_unlock(&sbi->fs_lock);
1482 + continue;
1483 + }
1484 ++ spin_unlock(&sbi->fs_lock);
1485 ++
1486 ++ expired = should_expire(dentry, mnt, timeout, flags);
1487 ++ if (!expired)
1488 ++ continue;
1489 ++
1490 ++ spin_lock(&sbi->fs_lock);
1491 + ino = autofs4_dentry_ino(expired);
1492 +- ino->flags |= AUTOFS_INF_NO_RCU;
1493 ++ ino->flags |= AUTOFS_INF_WANT_EXPIRE;
1494 + spin_unlock(&sbi->fs_lock);
1495 + synchronize_rcu();
1496 +- spin_lock(&sbi->fs_lock);
1497 +- if (should_expire(expired, mnt, timeout, how)) {
1498 +- if (expired != dentry)
1499 +- dput(dentry);
1500 +- goto found;
1501 +- }
1502 +
1503 +- ino->flags &= ~AUTOFS_INF_NO_RCU;
1504 ++ /* Make sure a reference is not taken on found if
1505 ++ * things have changed.
1506 ++ */
1507 ++ flags &= ~AUTOFS_EXP_LEAVES;
1508 ++ found = should_expire(expired, mnt, timeout, how);
1509 ++ if (!found || found != expired)
1510 ++ /* Something has changed, continue */
1511 ++ goto next;
1512 ++
1513 + if (expired != dentry)
1514 +- dput(expired);
1515 ++ dput(dentry);
1516 ++
1517 ++ spin_lock(&sbi->fs_lock);
1518 ++ goto found;
1519 ++next:
1520 ++ spin_lock(&sbi->fs_lock);
1521 ++ ino->flags &= ~AUTOFS_INF_WANT_EXPIRE;
1522 + spin_unlock(&sbi->fs_lock);
1523 ++ if (expired != dentry)
1524 ++ dput(expired);
1525 + }
1526 + return NULL;
1527 +
1528 + found:
1529 + DPRINTK("returning %p %pd", expired, expired);
1530 + ino->flags |= AUTOFS_INF_EXPIRING;
1531 +- smp_mb();
1532 +- ino->flags &= ~AUTOFS_INF_NO_RCU;
1533 + init_completion(&ino->expire_complete);
1534 + spin_unlock(&sbi->fs_lock);
1535 +- spin_lock(&sbi->lookup_lock);
1536 +- spin_lock(&expired->d_parent->d_lock);
1537 +- spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED);
1538 +- list_move(&expired->d_parent->d_subdirs, &expired->d_child);
1539 +- spin_unlock(&expired->d_lock);
1540 +- spin_unlock(&expired->d_parent->d_lock);
1541 +- spin_unlock(&sbi->lookup_lock);
1542 + return expired;
1543 + }
1544 +
1545 +@@ -492,15 +498,27 @@ int autofs4_expire_wait(struct dentry *dentry, int rcu_walk)
1546 + struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
1547 + struct autofs_info *ino = autofs4_dentry_ino(dentry);
1548 + int status;
1549 ++ int state;
1550 +
1551 + /* Block on any pending expire */
1552 +- if (!(ino->flags & (AUTOFS_INF_EXPIRING | AUTOFS_INF_NO_RCU)))
1553 ++ if (!(ino->flags & AUTOFS_INF_WANT_EXPIRE))
1554 + return 0;
1555 + if (rcu_walk)
1556 + return -ECHILD;
1557 +
1558 ++retry:
1559 + spin_lock(&sbi->fs_lock);
1560 +- if (ino->flags & AUTOFS_INF_EXPIRING) {
1561 ++ state = ino->flags & (AUTOFS_INF_WANT_EXPIRE | AUTOFS_INF_EXPIRING);
1562 ++ if (state == AUTOFS_INF_WANT_EXPIRE) {
1563 ++ spin_unlock(&sbi->fs_lock);
1564 ++ /*
1565 ++ * Possibly being selected for expire, wait until
1566 ++ * it's selected or not.
1567 ++ */
1568 ++ schedule_timeout_uninterruptible(HZ/10);
1569 ++ goto retry;
1570 ++ }
1571 ++ if (state & AUTOFS_INF_EXPIRING) {
1572 + spin_unlock(&sbi->fs_lock);
1573 +
1574 + DPRINTK("waiting for expire %p name=%pd", dentry, dentry);
1575 +@@ -551,7 +569,7 @@ int autofs4_expire_run(struct super_block *sb,
1576 + ino = autofs4_dentry_ino(dentry);
1577 + /* avoid rapid-fire expire attempts if expiry fails */
1578 + ino->last_used = now;
1579 +- ino->flags &= ~AUTOFS_INF_EXPIRING;
1580 ++ ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE);
1581 + complete_all(&ino->expire_complete);
1582 + spin_unlock(&sbi->fs_lock);
1583 +
1584 +@@ -579,7 +597,7 @@ int autofs4_do_expire_multi(struct super_block *sb, struct vfsmount *mnt,
1585 + spin_lock(&sbi->fs_lock);
1586 + /* avoid rapid-fire expire attempts if expiry fails */
1587 + ino->last_used = now;
1588 +- ino->flags &= ~AUTOFS_INF_EXPIRING;
1589 ++ ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE);
1590 + complete_all(&ino->expire_complete);
1591 + spin_unlock(&sbi->fs_lock);
1592 + dput(dentry);
1593 +diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
1594 +index c6d7d3dbd52a..7a54c6a867c8 100644
1595 +--- a/fs/autofs4/root.c
1596 ++++ b/fs/autofs4/root.c
1597 +@@ -455,7 +455,7 @@ static int autofs4_d_manage(struct dentry *dentry, bool rcu_walk)
1598 + * a mount-trap.
1599 + */
1600 + struct inode *inode;
1601 +- if (ino->flags & (AUTOFS_INF_EXPIRING | AUTOFS_INF_NO_RCU))
1602 ++ if (ino->flags & AUTOFS_INF_WANT_EXPIRE)
1603 + return 0;
1604 + if (d_mountpoint(dentry))
1605 + return 0;
1606 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
1607 +index 65f30b3b04f9..a7e18dbadf74 100644
1608 +--- a/fs/btrfs/ioctl.c
1609 ++++ b/fs/btrfs/ioctl.c
1610 +@@ -1619,6 +1619,9 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
1611 + int namelen;
1612 + int ret = 0;
1613 +
1614 ++ if (!S_ISDIR(file_inode(file)->i_mode))
1615 ++ return -ENOTDIR;
1616 ++
1617 + ret = mnt_want_write_file(file);
1618 + if (ret)
1619 + goto out;
1620 +@@ -1676,6 +1679,9 @@ static noinline int btrfs_ioctl_snap_create(struct file *file,
1621 + struct btrfs_ioctl_vol_args *vol_args;
1622 + int ret;
1623 +
1624 ++ if (!S_ISDIR(file_inode(file)->i_mode))
1625 ++ return -ENOTDIR;
1626 ++
1627 + vol_args = memdup_user(arg, sizeof(*vol_args));
1628 + if (IS_ERR(vol_args))
1629 + return PTR_ERR(vol_args);
1630 +@@ -1699,6 +1705,9 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
1631 + bool readonly = false;
1632 + struct btrfs_qgroup_inherit *inherit = NULL;
1633 +
1634 ++ if (!S_ISDIR(file_inode(file)->i_mode))
1635 ++ return -ENOTDIR;
1636 ++
1637 + vol_args = memdup_user(arg, sizeof(*vol_args));
1638 + if (IS_ERR(vol_args))
1639 + return PTR_ERR(vol_args);
1640 +@@ -2345,6 +2354,9 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
1641 + int ret;
1642 + int err = 0;
1643 +
1644 ++ if (!S_ISDIR(dir->i_mode))
1645 ++ return -ENOTDIR;
1646 ++
1647 + vol_args = memdup_user(arg, sizeof(*vol_args));
1648 + if (IS_ERR(vol_args))
1649 + return PTR_ERR(vol_args);
1650 +diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
1651 +index 5a7b3229b956..f34d6f5a5aca 100644
1652 +--- a/fs/hostfs/hostfs_kern.c
1653 ++++ b/fs/hostfs/hostfs_kern.c
1654 +@@ -959,10 +959,11 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
1655 +
1656 + if (S_ISLNK(root_inode->i_mode)) {
1657 + char *name = follow_link(host_root_path);
1658 +- if (IS_ERR(name))
1659 ++ if (IS_ERR(name)) {
1660 + err = PTR_ERR(name);
1661 +- else
1662 +- err = read_name(root_inode, name);
1663 ++ goto out_put;
1664 ++ }
1665 ++ err = read_name(root_inode, name);
1666 + kfree(name);
1667 + if (err)
1668 + goto out_put;
1669 +diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
1670 +index d2f97ecca6a5..e0e5f7c3c99f 100644
1671 +--- a/fs/notify/fanotify/fanotify.c
1672 ++++ b/fs/notify/fanotify/fanotify.c
1673 +@@ -67,18 +67,7 @@ static int fanotify_get_response(struct fsnotify_group *group,
1674 +
1675 + pr_debug("%s: group=%p event=%p\n", __func__, group, event);
1676 +
1677 +- wait_event(group->fanotify_data.access_waitq, event->response ||
1678 +- atomic_read(&group->fanotify_data.bypass_perm));
1679 +-
1680 +- if (!event->response) { /* bypass_perm set */
1681 +- /*
1682 +- * Event was canceled because group is being destroyed. Remove
1683 +- * it from group's event list because we are responsible for
1684 +- * freeing the permission event.
1685 +- */
1686 +- fsnotify_remove_event(group, &event->fae.fse);
1687 +- return 0;
1688 +- }
1689 ++ wait_event(group->fanotify_data.access_waitq, event->response);
1690 +
1691 + /* userspace responded, convert to something usable */
1692 + switch (event->response) {
1693 +diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
1694 +index 8e8e6bcd1d43..a64313868d3a 100644
1695 +--- a/fs/notify/fanotify/fanotify_user.c
1696 ++++ b/fs/notify/fanotify/fanotify_user.c
1697 +@@ -358,16 +358,20 @@ static int fanotify_release(struct inode *ignored, struct file *file)
1698 +
1699 + #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
1700 + struct fanotify_perm_event_info *event, *next;
1701 ++ struct fsnotify_event *fsn_event;
1702 +
1703 + /*
1704 +- * There may be still new events arriving in the notification queue
1705 +- * but since userspace cannot use fanotify fd anymore, no event can
1706 +- * enter or leave access_list by now.
1707 ++ * Stop new events from arriving in the notification queue. since
1708 ++ * userspace cannot use fanotify fd anymore, no event can enter or
1709 ++ * leave access_list by now either.
1710 + */
1711 +- spin_lock(&group->fanotify_data.access_lock);
1712 +-
1713 +- atomic_inc(&group->fanotify_data.bypass_perm);
1714 ++ fsnotify_group_stop_queueing(group);
1715 +
1716 ++ /*
1717 ++ * Process all permission events on access_list and notification queue
1718 ++ * and simulate reply from userspace.
1719 ++ */
1720 ++ spin_lock(&group->fanotify_data.access_lock);
1721 + list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
1722 + fae.fse.list) {
1723 + pr_debug("%s: found group=%p event=%p\n", __func__, group,
1724 +@@ -379,12 +383,21 @@ static int fanotify_release(struct inode *ignored, struct file *file)
1725 + spin_unlock(&group->fanotify_data.access_lock);
1726 +
1727 + /*
1728 +- * Since bypass_perm is set, newly queued events will not wait for
1729 +- * access response. Wake up the already sleeping ones now.
1730 +- * synchronize_srcu() in fsnotify_destroy_group() will wait for all
1731 +- * processes sleeping in fanotify_handle_event() waiting for access
1732 +- * response and thus also for all permission events to be freed.
1733 ++ * Destroy all non-permission events. For permission events just
1734 ++ * dequeue them and set the response. They will be freed once the
1735 ++ * response is consumed and fanotify_get_response() returns.
1736 + */
1737 ++ mutex_lock(&group->notification_mutex);
1738 ++ while (!fsnotify_notify_queue_is_empty(group)) {
1739 ++ fsn_event = fsnotify_remove_first_event(group);
1740 ++ if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS))
1741 ++ fsnotify_destroy_event(group, fsn_event);
1742 ++ else
1743 ++ FANOTIFY_PE(fsn_event)->response = FAN_ALLOW;
1744 ++ }
1745 ++ mutex_unlock(&group->notification_mutex);
1746 ++
1747 ++ /* Response for all permission events it set, wakeup waiters */
1748 + wake_up(&group->fanotify_data.access_waitq);
1749 + #endif
1750 +
1751 +@@ -755,7 +768,6 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
1752 + spin_lock_init(&group->fanotify_data.access_lock);
1753 + init_waitqueue_head(&group->fanotify_data.access_waitq);
1754 + INIT_LIST_HEAD(&group->fanotify_data.access_list);
1755 +- atomic_set(&group->fanotify_data.bypass_perm, 0);
1756 + #endif
1757 + switch (flags & FAN_ALL_CLASS_BITS) {
1758 + case FAN_CLASS_NOTIF:
1759 +diff --git a/fs/notify/group.c b/fs/notify/group.c
1760 +index d16b62cb2854..18eb30c6bd8f 100644
1761 +--- a/fs/notify/group.c
1762 ++++ b/fs/notify/group.c
1763 +@@ -40,6 +40,17 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group)
1764 + }
1765 +
1766 + /*
1767 ++ * Stop queueing new events for this group. Once this function returns
1768 ++ * fsnotify_add_event() will not add any new events to the group's queue.
1769 ++ */
1770 ++void fsnotify_group_stop_queueing(struct fsnotify_group *group)
1771 ++{
1772 ++ mutex_lock(&group->notification_mutex);
1773 ++ group->shutdown = true;
1774 ++ mutex_unlock(&group->notification_mutex);
1775 ++}
1776 ++
1777 ++/*
1778 + * Trying to get rid of a group. Remove all marks, flush all events and release
1779 + * the group reference.
1780 + * Note that another thread calling fsnotify_clear_marks_by_group() may still
1781 +@@ -47,6 +58,14 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group)
1782 + */
1783 + void fsnotify_destroy_group(struct fsnotify_group *group)
1784 + {
1785 ++ /*
1786 ++ * Stop queueing new events. The code below is careful enough to not
1787 ++ * require this but fanotify needs to stop queuing events even before
1788 ++ * fsnotify_destroy_group() is called and this makes the other callers
1789 ++ * of fsnotify_destroy_group() to see the same behavior.
1790 ++ */
1791 ++ fsnotify_group_stop_queueing(group);
1792 ++
1793 + /* clear all inode marks for this group */
1794 + fsnotify_clear_marks_by_group(group);
1795 +
1796 +diff --git a/fs/notify/notification.c b/fs/notify/notification.c
1797 +index a95d8e037aeb..e455e83ceeeb 100644
1798 +--- a/fs/notify/notification.c
1799 ++++ b/fs/notify/notification.c
1800 +@@ -82,7 +82,8 @@ void fsnotify_destroy_event(struct fsnotify_group *group,
1801 + * Add an event to the group notification queue. The group can later pull this
1802 + * event off the queue to deal with. The function returns 0 if the event was
1803 + * added to the queue, 1 if the event was merged with some other queued event,
1804 +- * 2 if the queue of events has overflown.
1805 ++ * 2 if the event was not queued - either the queue of events has overflown
1806 ++ * or the group is shutting down.
1807 + */
1808 + int fsnotify_add_event(struct fsnotify_group *group,
1809 + struct fsnotify_event *event,
1810 +@@ -96,6 +97,11 @@ int fsnotify_add_event(struct fsnotify_group *group,
1811 +
1812 + mutex_lock(&group->notification_mutex);
1813 +
1814 ++ if (group->shutdown) {
1815 ++ mutex_unlock(&group->notification_mutex);
1816 ++ return 2;
1817 ++ }
1818 ++
1819 + if (group->q_len >= group->max_events) {
1820 + ret = 2;
1821 + /* Queue overflow event only if it isn't already queued */
1822 +@@ -126,21 +132,6 @@ queue:
1823 + }
1824 +
1825 + /*
1826 +- * Remove @event from group's notification queue. It is the responsibility of
1827 +- * the caller to destroy the event.
1828 +- */
1829 +-void fsnotify_remove_event(struct fsnotify_group *group,
1830 +- struct fsnotify_event *event)
1831 +-{
1832 +- mutex_lock(&group->notification_mutex);
1833 +- if (!list_empty(&event->list)) {
1834 +- list_del_init(&event->list);
1835 +- group->q_len--;
1836 +- }
1837 +- mutex_unlock(&group->notification_mutex);
1838 +-}
1839 +-
1840 +-/*
1841 + * Remove and return the first event from the notification list. It is the
1842 + * responsibility of the caller to destroy the obtained event
1843 + */
1844 +diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
1845 +index f90931335c6b..2e11658676eb 100644
1846 +--- a/fs/ocfs2/dlm/dlmconvert.c
1847 ++++ b/fs/ocfs2/dlm/dlmconvert.c
1848 +@@ -262,7 +262,6 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
1849 + struct dlm_lock *lock, int flags, int type)
1850 + {
1851 + enum dlm_status status;
1852 +- u8 old_owner = res->owner;
1853 +
1854 + mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type,
1855 + lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS);
1856 +@@ -329,7 +328,6 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
1857 +
1858 + spin_lock(&res->spinlock);
1859 + res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1860 +- lock->convert_pending = 0;
1861 + /* if it failed, move it back to granted queue.
1862 + * if master returns DLM_NORMAL and then down before sending ast,
1863 + * it may have already been moved to granted queue, reset to
1864 +@@ -338,12 +336,14 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
1865 + if (status != DLM_NOTQUEUED)
1866 + dlm_error(status);
1867 + dlm_revert_pending_convert(res, lock);
1868 +- } else if ((res->state & DLM_LOCK_RES_RECOVERING) ||
1869 +- (old_owner != res->owner)) {
1870 +- mlog(0, "res %.*s is in recovering or has been recovered.\n",
1871 +- res->lockname.len, res->lockname.name);
1872 ++ } else if (!lock->convert_pending) {
1873 ++ mlog(0, "%s: res %.*s, owner died and lock has been moved back "
1874 ++ "to granted list, retry convert.\n",
1875 ++ dlm->name, res->lockname.len, res->lockname.name);
1876 + status = DLM_RECOVERING;
1877 + }
1878 ++
1879 ++ lock->convert_pending = 0;
1880 + bail:
1881 + spin_unlock(&res->spinlock);
1882 +
1883 +diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
1884 +index 77d30cbd944d..56dd3957cc91 100644
1885 +--- a/fs/ocfs2/file.c
1886 ++++ b/fs/ocfs2/file.c
1887 +@@ -1536,7 +1536,8 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
1888 + u64 start, u64 len)
1889 + {
1890 + int ret = 0;
1891 +- u64 tmpend, end = start + len;
1892 ++ u64 tmpend = 0;
1893 ++ u64 end = start + len;
1894 + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1895 + unsigned int csize = osb->s_clustersize;
1896 + handle_t *handle;
1897 +@@ -1568,18 +1569,31 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
1898 + }
1899 +
1900 + /*
1901 +- * We want to get the byte offset of the end of the 1st cluster.
1902 ++ * If start is on a cluster boundary and end is somewhere in another
1903 ++ * cluster, we have not COWed the cluster starting at start, unless
1904 ++ * end is also within the same cluster. So, in this case, we skip this
1905 ++ * first call to ocfs2_zero_range_for_truncate() truncate and move on
1906 ++ * to the next one.
1907 + */
1908 +- tmpend = (u64)osb->s_clustersize + (start & ~(osb->s_clustersize - 1));
1909 +- if (tmpend > end)
1910 +- tmpend = end;
1911 ++ if ((start & (csize - 1)) != 0) {
1912 ++ /*
1913 ++ * We want to get the byte offset of the end of the 1st
1914 ++ * cluster.
1915 ++ */
1916 ++ tmpend = (u64)osb->s_clustersize +
1917 ++ (start & ~(osb->s_clustersize - 1));
1918 ++ if (tmpend > end)
1919 ++ tmpend = end;
1920 +
1921 +- trace_ocfs2_zero_partial_clusters_range1((unsigned long long)start,
1922 +- (unsigned long long)tmpend);
1923 ++ trace_ocfs2_zero_partial_clusters_range1(
1924 ++ (unsigned long long)start,
1925 ++ (unsigned long long)tmpend);
1926 +
1927 +- ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend);
1928 +- if (ret)
1929 +- mlog_errno(ret);
1930 ++ ret = ocfs2_zero_range_for_truncate(inode, handle, start,
1931 ++ tmpend);
1932 ++ if (ret)
1933 ++ mlog_errno(ret);
1934 ++ }
1935 +
1936 + if (tmpend < end) {
1937 + /*
1938 +diff --git a/fs/reiserfs/ibalance.c b/fs/reiserfs/ibalance.c
1939 +index b751eea32e20..5db6f45b3fed 100644
1940 +--- a/fs/reiserfs/ibalance.c
1941 ++++ b/fs/reiserfs/ibalance.c
1942 +@@ -1153,8 +1153,9 @@ int balance_internal(struct tree_balance *tb,
1943 + insert_ptr);
1944 + }
1945 +
1946 +- memcpy(new_insert_key_addr, &new_insert_key, KEY_SIZE);
1947 + insert_ptr[0] = new_insert_ptr;
1948 ++ if (new_insert_ptr)
1949 ++ memcpy(new_insert_key_addr, &new_insert_key, KEY_SIZE);
1950 +
1951 + return order;
1952 + }
1953 +diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
1954 +index 39090fc56f09..eb1b8c8acfcb 100644
1955 +--- a/fs/xfs/xfs_buf.c
1956 ++++ b/fs/xfs/xfs_buf.c
1957 +@@ -1535,7 +1535,7 @@ xfs_wait_buftarg(
1958 + * ensure here that all reference counts have been dropped before we
1959 + * start walking the LRU list.
1960 + */
1961 +- drain_workqueue(btp->bt_mount->m_buf_workqueue);
1962 ++ flush_workqueue(btp->bt_mount->m_buf_workqueue);
1963 +
1964 + /* loop until there is nothing left on the lru list. */
1965 + while (list_lru_count(&btp->bt_lru)) {
1966 +diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
1967 +index 533c4408529a..850d8822e8ff 100644
1968 +--- a/include/linux/fsnotify_backend.h
1969 ++++ b/include/linux/fsnotify_backend.h
1970 +@@ -148,6 +148,7 @@ struct fsnotify_group {
1971 + #define FS_PRIO_1 1 /* fanotify content based access control */
1972 + #define FS_PRIO_2 2 /* fanotify pre-content access */
1973 + unsigned int priority;
1974 ++ bool shutdown; /* group is being shut down, don't queue more events */
1975 +
1976 + /* stores all fastpath marks assoc with this group so they can be cleaned on unregister */
1977 + struct mutex mark_mutex; /* protect marks_list */
1978 +@@ -179,7 +180,6 @@ struct fsnotify_group {
1979 + spinlock_t access_lock;
1980 + struct list_head access_list;
1981 + wait_queue_head_t access_waitq;
1982 +- atomic_t bypass_perm;
1983 + #endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
1984 + int f_flags;
1985 + unsigned int max_marks;
1986 +@@ -308,6 +308,8 @@ extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *op
1987 + extern void fsnotify_get_group(struct fsnotify_group *group);
1988 + /* drop reference on a group from fsnotify_alloc_group */
1989 + extern void fsnotify_put_group(struct fsnotify_group *group);
1990 ++/* group destruction begins, stop queuing new events */
1991 ++extern void fsnotify_group_stop_queueing(struct fsnotify_group *group);
1992 + /* destroy group */
1993 + extern void fsnotify_destroy_group(struct fsnotify_group *group);
1994 + /* fasync handler function */
1995 +@@ -320,8 +322,6 @@ extern int fsnotify_add_event(struct fsnotify_group *group,
1996 + struct fsnotify_event *event,
1997 + int (*merge)(struct list_head *,
1998 + struct fsnotify_event *));
1999 +-/* Remove passed event from groups notification queue */
2000 +-extern void fsnotify_remove_event(struct fsnotify_group *group, struct fsnotify_event *event);
2001 + /* true if the group notification queue is empty */
2002 + extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
2003 + /* return, but do not dequeue the first event on the notification queue */
2004 +diff --git a/include/linux/kernel.h b/include/linux/kernel.h
2005 +index 924853d33a13..e571e592e53a 100644
2006 +--- a/include/linux/kernel.h
2007 ++++ b/include/linux/kernel.h
2008 +@@ -202,26 +202,26 @@ extern int _cond_resched(void);
2009 +
2010 + /**
2011 + * abs - return absolute value of an argument
2012 +- * @x: the value. If it is unsigned type, it is converted to signed type first
2013 +- * (s64, long or int depending on its size).
2014 ++ * @x: the value. If it is unsigned type, it is converted to signed type first.
2015 ++ * char is treated as if it was signed (regardless of whether it really is)
2016 ++ * but the macro's return type is preserved as char.
2017 + *
2018 +- * Return: an absolute value of x. If x is 64-bit, macro's return type is s64,
2019 +- * otherwise it is signed long.
2020 ++ * Return: an absolute value of x.
2021 + */
2022 +-#define abs(x) __builtin_choose_expr(sizeof(x) == sizeof(s64), ({ \
2023 +- s64 __x = (x); \
2024 +- (__x < 0) ? -__x : __x; \
2025 +- }), ({ \
2026 +- long ret; \
2027 +- if (sizeof(x) == sizeof(long)) { \
2028 +- long __x = (x); \
2029 +- ret = (__x < 0) ? -__x : __x; \
2030 +- } else { \
2031 +- int __x = (x); \
2032 +- ret = (__x < 0) ? -__x : __x; \
2033 +- } \
2034 +- ret; \
2035 +- }))
2036 ++#define abs(x) __abs_choose_expr(x, long long, \
2037 ++ __abs_choose_expr(x, long, \
2038 ++ __abs_choose_expr(x, int, \
2039 ++ __abs_choose_expr(x, short, \
2040 ++ __abs_choose_expr(x, char, \
2041 ++ __builtin_choose_expr( \
2042 ++ __builtin_types_compatible_p(typeof(x), char), \
2043 ++ (char)({ signed char __x = (x); __x<0?-__x:__x; }), \
2044 ++ ((void)0)))))))
2045 ++
2046 ++#define __abs_choose_expr(x, type, other) __builtin_choose_expr( \
2047 ++ __builtin_types_compatible_p(typeof(x), signed type) || \
2048 ++ __builtin_types_compatible_p(typeof(x), unsigned type), \
2049 ++ ({ signed type __x = (x); __x < 0 ? -__x : __x; }), other)
2050 +
2051 + /**
2052 + * reciprocal_scale - "scale" a value into range [0, ep_ro)
2053 +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
2054 +index b97d6823ef3c..4e9c75226f07 100644
2055 +--- a/include/linux/netdevice.h
2056 ++++ b/include/linux/netdevice.h
2057 +@@ -3036,6 +3036,7 @@ static inline void napi_free_frags(struct napi_struct *napi)
2058 + napi->skb = NULL;
2059 + }
2060 +
2061 ++bool netdev_is_rx_handler_busy(struct net_device *dev);
2062 + int netdev_rx_handler_register(struct net_device *dev,
2063 + rx_handler_func_t *rx_handler,
2064 + void *rx_handler_data);
2065 +diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
2066 +index 26eabf5ec718..fbfadba81c5a 100644
2067 +--- a/include/linux/pagemap.h
2068 ++++ b/include/linux/pagemap.h
2069 +@@ -601,56 +601,56 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
2070 + */
2071 + static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
2072 + {
2073 +- int ret = 0;
2074 + char __user *end = uaddr + size - 1;
2075 +
2076 + if (unlikely(size == 0))
2077 +- return ret;
2078 ++ return 0;
2079 +
2080 ++ if (unlikely(uaddr > end))
2081 ++ return -EFAULT;
2082 + /*
2083 + * Writing zeroes into userspace here is OK, because we know that if
2084 + * the zero gets there, we'll be overwriting it.
2085 + */
2086 +- while (uaddr <= end) {
2087 +- ret = __put_user(0, uaddr);
2088 +- if (ret != 0)
2089 +- return ret;
2090 ++ do {
2091 ++ if (unlikely(__put_user(0, uaddr) != 0))
2092 ++ return -EFAULT;
2093 + uaddr += PAGE_SIZE;
2094 +- }
2095 ++ } while (uaddr <= end);
2096 +
2097 + /* Check whether the range spilled into the next page. */
2098 + if (((unsigned long)uaddr & PAGE_MASK) ==
2099 + ((unsigned long)end & PAGE_MASK))
2100 +- ret = __put_user(0, end);
2101 ++ return __put_user(0, end);
2102 +
2103 +- return ret;
2104 ++ return 0;
2105 + }
2106 +
2107 + static inline int fault_in_multipages_readable(const char __user *uaddr,
2108 + int size)
2109 + {
2110 + volatile char c;
2111 +- int ret = 0;
2112 + const char __user *end = uaddr + size - 1;
2113 +
2114 + if (unlikely(size == 0))
2115 +- return ret;
2116 ++ return 0;
2117 +
2118 +- while (uaddr <= end) {
2119 +- ret = __get_user(c, uaddr);
2120 +- if (ret != 0)
2121 +- return ret;
2122 ++ if (unlikely(uaddr > end))
2123 ++ return -EFAULT;
2124 ++
2125 ++ do {
2126 ++ if (unlikely(__get_user(c, uaddr) != 0))
2127 ++ return -EFAULT;
2128 + uaddr += PAGE_SIZE;
2129 +- }
2130 ++ } while (uaddr <= end);
2131 +
2132 + /* Check whether the range spilled into the next page. */
2133 + if (((unsigned long)uaddr & PAGE_MASK) ==
2134 + ((unsigned long)end & PAGE_MASK)) {
2135 +- ret = __get_user(c, end);
2136 +- (void)c;
2137 ++ return __get_user(c, end);
2138 + }
2139 +
2140 +- return ret;
2141 ++ return 0;
2142 + }
2143 +
2144 + int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
2145 +diff --git a/include/linux/smc91x.h b/include/linux/smc91x.h
2146 +index 76199b75d584..e302c447e057 100644
2147 +--- a/include/linux/smc91x.h
2148 ++++ b/include/linux/smc91x.h
2149 +@@ -1,6 +1,16 @@
2150 + #ifndef __SMC91X_H__
2151 + #define __SMC91X_H__
2152 +
2153 ++/*
2154 ++ * These bits define which access sizes a platform can support, rather
2155 ++ * than the maximal access size. So, if your platform can do 16-bit
2156 ++ * and 32-bit accesses to the SMC91x device, but not 8-bit, set both
2157 ++ * SMC91X_USE_16BIT and SMC91X_USE_32BIT.
2158 ++ *
2159 ++ * The SMC91x driver requires at least one of SMC91X_USE_8BIT or
2160 ++ * SMC91X_USE_16BIT to be supported - just setting SMC91X_USE_32BIT is
2161 ++ * an invalid configuration.
2162 ++ */
2163 + #define SMC91X_USE_8BIT (1 << 0)
2164 + #define SMC91X_USE_16BIT (1 << 1)
2165 + #define SMC91X_USE_32BIT (1 << 2)
2166 +diff --git a/include/net/af_unix.h b/include/net/af_unix.h
2167 +index 9b4c418bebd8..fd60eccb59a6 100644
2168 +--- a/include/net/af_unix.h
2169 ++++ b/include/net/af_unix.h
2170 +@@ -52,7 +52,7 @@ struct unix_sock {
2171 + struct sock sk;
2172 + struct unix_address *addr;
2173 + struct path path;
2174 +- struct mutex readlock;
2175 ++ struct mutex iolock, bindlock;
2176 + struct sock *peer;
2177 + struct list_head link;
2178 + atomic_long_t inflight;
2179 +diff --git a/include/net/tcp.h b/include/net/tcp.h
2180 +index 414d822bc1db..9c3ab544d3a8 100644
2181 +--- a/include/net/tcp.h
2182 ++++ b/include/net/tcp.h
2183 +@@ -1510,6 +1510,8 @@ static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unli
2184 + {
2185 + if (sk->sk_send_head == skb_unlinked)
2186 + sk->sk_send_head = NULL;
2187 ++ if (tcp_sk(sk)->highest_sack == skb_unlinked)
2188 ++ tcp_sk(sk)->highest_sack = NULL;
2189 + }
2190 +
2191 + static inline void tcp_init_send_head(struct sock *sk)
2192 +diff --git a/kernel/cpuset.c b/kernel/cpuset.c
2193 +index e120bd983ad0..b9279a2844d8 100644
2194 +--- a/kernel/cpuset.c
2195 ++++ b/kernel/cpuset.c
2196 +@@ -2079,7 +2079,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
2197 + * which could have been changed by cpuset just after it inherits the
2198 + * state from the parent and before it sits on the cgroup's task list.
2199 + */
2200 +-void cpuset_fork(struct task_struct *task)
2201 ++void cpuset_fork(struct task_struct *task, void *priv)
2202 + {
2203 + if (task_css_is_root(task, cpuset_cgrp_id))
2204 + return;
2205 +diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
2206 +index b7dd5718836e..3124cebaec31 100644
2207 +--- a/kernel/power/hibernate.c
2208 ++++ b/kernel/power/hibernate.c
2209 +@@ -299,12 +299,12 @@ static int create_image(int platform_mode)
2210 + save_processor_state();
2211 + trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, true);
2212 + error = swsusp_arch_suspend();
2213 ++ /* Restore control flow magically appears here */
2214 ++ restore_processor_state();
2215 + trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false);
2216 + if (error)
2217 + printk(KERN_ERR "PM: Error %d creating hibernation image\n",
2218 + error);
2219 +- /* Restore control flow magically appears here */
2220 +- restore_processor_state();
2221 + if (!in_suspend)
2222 + events_check_enabled = false;
2223 +
2224 +diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
2225 +index 3a970604308f..f155c62f1f2c 100644
2226 +--- a/kernel/power/snapshot.c
2227 ++++ b/kernel/power/snapshot.c
2228 +@@ -765,9 +765,9 @@ static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
2229 + */
2230 + static bool rtree_next_node(struct memory_bitmap *bm)
2231 + {
2232 +- bm->cur.node = list_entry(bm->cur.node->list.next,
2233 +- struct rtree_node, list);
2234 +- if (&bm->cur.node->list != &bm->cur.zone->leaves) {
2235 ++ if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
2236 ++ bm->cur.node = list_entry(bm->cur.node->list.next,
2237 ++ struct rtree_node, list);
2238 + bm->cur.node_pfn += BM_BITS_PER_BLOCK;
2239 + bm->cur.node_bit = 0;
2240 + touch_softlockup_watchdog();
2241 +@@ -775,9 +775,9 @@ static bool rtree_next_node(struct memory_bitmap *bm)
2242 + }
2243 +
2244 + /* No more nodes, goto next zone */
2245 +- bm->cur.zone = list_entry(bm->cur.zone->list.next,
2246 ++ if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
2247 ++ bm->cur.zone = list_entry(bm->cur.zone->list.next,
2248 + struct mem_zone_bm_rtree, list);
2249 +- if (&bm->cur.zone->list != &bm->zones) {
2250 + bm->cur.node = list_entry(bm->cur.zone->leaves.next,
2251 + struct rtree_node, list);
2252 + bm->cur.node_pfn = 0;
2253 +diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
2254 +index 9b1044e936a6..05ea5167e6bb 100644
2255 +--- a/kernel/trace/Makefile
2256 ++++ b/kernel/trace/Makefile
2257 +@@ -1,4 +1,8 @@
2258 +
2259 ++# We are fully aware of the dangers of __builtin_return_address()
2260 ++FRAME_CFLAGS := $(call cc-disable-warning,frame-address)
2261 ++KBUILD_CFLAGS += $(FRAME_CFLAGS)
2262 ++
2263 + # Do not instrument the tracer itself:
2264 +
2265 + ifdef CONFIG_FUNCTION_TRACER
2266 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
2267 +index 8305cbb2d5a2..059233abcfcf 100644
2268 +--- a/kernel/trace/trace.c
2269 ++++ b/kernel/trace/trace.c
2270 +@@ -4727,19 +4727,20 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
2271 + struct trace_iterator *iter = filp->private_data;
2272 + ssize_t sret;
2273 +
2274 +- /* return any leftover data */
2275 +- sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
2276 +- if (sret != -EBUSY)
2277 +- return sret;
2278 +-
2279 +- trace_seq_init(&iter->seq);
2280 +-
2281 + /*
2282 + * Avoid more than one consumer on a single file descriptor
2283 + * This is just a matter of traces coherency, the ring buffer itself
2284 + * is protected.
2285 + */
2286 + mutex_lock(&iter->mutex);
2287 ++
2288 ++ /* return any leftover data */
2289 ++ sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
2290 ++ if (sret != -EBUSY)
2291 ++ goto out;
2292 ++
2293 ++ trace_seq_init(&iter->seq);
2294 ++
2295 + if (iter->trace->read) {
2296 + sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
2297 + if (sret)
2298 +@@ -5766,9 +5767,6 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
2299 + return -EBUSY;
2300 + #endif
2301 +
2302 +- if (splice_grow_spd(pipe, &spd))
2303 +- return -ENOMEM;
2304 +-
2305 + if (*ppos & (PAGE_SIZE - 1))
2306 + return -EINVAL;
2307 +
2308 +@@ -5778,6 +5776,9 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
2309 + len &= PAGE_MASK;
2310 + }
2311 +
2312 ++ if (splice_grow_spd(pipe, &spd))
2313 ++ return -ENOMEM;
2314 ++
2315 + again:
2316 + trace_access_lock(iter->cpu_file);
2317 + entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
2318 +@@ -5835,19 +5836,21 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
2319 + /* did we read anything? */
2320 + if (!spd.nr_pages) {
2321 + if (ret)
2322 +- return ret;
2323 ++ goto out;
2324 +
2325 ++ ret = -EAGAIN;
2326 + if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
2327 +- return -EAGAIN;
2328 ++ goto out;
2329 +
2330 + ret = wait_on_pipe(iter, true);
2331 + if (ret)
2332 +- return ret;
2333 ++ goto out;
2334 +
2335 + goto again;
2336 + }
2337 +
2338 + ret = splice_to_pipe(pipe, &spd);
2339 ++out:
2340 + splice_shrink_spd(&spd);
2341 +
2342 + return ret;
2343 +diff --git a/mm/vmscan.c b/mm/vmscan.c
2344 +index 0c114e2b01d3..0838e9f02b11 100644
2345 +--- a/mm/vmscan.c
2346 ++++ b/mm/vmscan.c
2347 +@@ -2159,23 +2159,6 @@ out:
2348 + }
2349 + }
2350 +
2351 +-#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
2352 +-static void init_tlb_ubc(void)
2353 +-{
2354 +- /*
2355 +- * This deliberately does not clear the cpumask as it's expensive
2356 +- * and unnecessary. If there happens to be data in there then the
2357 +- * first SWAP_CLUSTER_MAX pages will send an unnecessary IPI and
2358 +- * then will be cleared.
2359 +- */
2360 +- current->tlb_ubc.flush_required = false;
2361 +-}
2362 +-#else
2363 +-static inline void init_tlb_ubc(void)
2364 +-{
2365 +-}
2366 +-#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
2367 +-
2368 + /*
2369 + * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
2370 + */
2371 +@@ -2210,8 +2193,6 @@ static void shrink_lruvec(struct lruvec *lruvec, int swappiness,
2372 + scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&
2373 + sc->priority == DEF_PRIORITY);
2374 +
2375 +- init_tlb_ubc();
2376 +-
2377 + blk_start_plug(&plug);
2378 + while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
2379 + nr[LRU_INACTIVE_FILE]) {
2380 +diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
2381 +index 7173a685309a..9542e84a9455 100644
2382 +--- a/net/bridge/br_multicast.c
2383 ++++ b/net/bridge/br_multicast.c
2384 +@@ -1113,7 +1113,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
2385 + } else {
2386 + err = br_ip6_multicast_add_group(br, port,
2387 + &grec->grec_mca, vid);
2388 +- if (!err)
2389 ++ if (err)
2390 + break;
2391 + }
2392 + }
2393 +diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
2394 +index f6c3b2137eea..59ce1fcc220c 100644
2395 +--- a/net/caif/cfpkt_skbuff.c
2396 ++++ b/net/caif/cfpkt_skbuff.c
2397 +@@ -286,7 +286,7 @@ int cfpkt_setlen(struct cfpkt *pkt, u16 len)
2398 + else
2399 + skb_trim(skb, len);
2400 +
2401 +- return cfpkt_getlen(pkt);
2402 ++ return cfpkt_getlen(pkt);
2403 + }
2404 +
2405 + /* Need to expand SKB */
2406 +diff --git a/net/core/dev.c b/net/core/dev.c
2407 +index 9efbdb3ff78a..de4ed2b5a221 100644
2408 +--- a/net/core/dev.c
2409 ++++ b/net/core/dev.c
2410 +@@ -3722,6 +3722,22 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2411 + }
2412 +
2413 + /**
2414 ++ * netdev_is_rx_handler_busy - check if receive handler is registered
2415 ++ * @dev: device to check
2416 ++ *
2417 ++ * Check if a receive handler is already registered for a given device.
2418 ++ * Return true if there one.
2419 ++ *
2420 ++ * The caller must hold the rtnl_mutex.
2421 ++ */
2422 ++bool netdev_is_rx_handler_busy(struct net_device *dev)
2423 ++{
2424 ++ ASSERT_RTNL();
2425 ++ return dev && rtnl_dereference(dev->rx_handler);
2426 ++}
2427 ++EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
2428 ++
2429 ++/**
2430 + * netdev_rx_handler_register - register receive handler
2431 + * @dev: device to register a handler for
2432 + * @rx_handler: receive handler to register
2433 +diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
2434 +index 744e5936c10d..e5a3ff210fec 100644
2435 +--- a/net/ipv4/fib_trie.c
2436 ++++ b/net/ipv4/fib_trie.c
2437 +@@ -2453,9 +2453,7 @@ struct fib_route_iter {
2438 + static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
2439 + loff_t pos)
2440 + {
2441 +- struct fib_table *tb = iter->main_tb;
2442 + struct key_vector *l, **tp = &iter->tnode;
2443 +- struct trie *t;
2444 + t_key key;
2445 +
2446 + /* use cache location of next-to-find key */
2447 +@@ -2463,8 +2461,6 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
2448 + pos -= iter->pos;
2449 + key = iter->key;
2450 + } else {
2451 +- t = (struct trie *)tb->tb_data;
2452 +- iter->tnode = t->kv;
2453 + iter->pos = 0;
2454 + key = 0;
2455 + }
2456 +@@ -2505,12 +2501,12 @@ static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
2457 + return NULL;
2458 +
2459 + iter->main_tb = tb;
2460 ++ t = (struct trie *)tb->tb_data;
2461 ++ iter->tnode = t->kv;
2462 +
2463 + if (*pos != 0)
2464 + return fib_route_get_idx(iter, *pos);
2465 +
2466 +- t = (struct trie *)tb->tb_data;
2467 +- iter->tnode = t->kv;
2468 + iter->pos = 0;
2469 + iter->key = 0;
2470 +
2471 +diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
2472 +index 4d8f0b698777..65036891e080 100644
2473 +--- a/net/ipv4/ip_vti.c
2474 ++++ b/net/ipv4/ip_vti.c
2475 +@@ -540,6 +540,33 @@ static struct rtnl_link_ops vti_link_ops __read_mostly = {
2476 + .get_link_net = ip_tunnel_get_link_net,
2477 + };
2478 +
2479 ++static bool is_vti_tunnel(const struct net_device *dev)
2480 ++{
2481 ++ return dev->netdev_ops == &vti_netdev_ops;
2482 ++}
2483 ++
2484 ++static int vti_device_event(struct notifier_block *unused,
2485 ++ unsigned long event, void *ptr)
2486 ++{
2487 ++ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2488 ++ struct ip_tunnel *tunnel = netdev_priv(dev);
2489 ++
2490 ++ if (!is_vti_tunnel(dev))
2491 ++ return NOTIFY_DONE;
2492 ++
2493 ++ switch (event) {
2494 ++ case NETDEV_DOWN:
2495 ++ if (!net_eq(tunnel->net, dev_net(dev)))
2496 ++ xfrm_garbage_collect(tunnel->net);
2497 ++ break;
2498 ++ }
2499 ++ return NOTIFY_DONE;
2500 ++}
2501 ++
2502 ++static struct notifier_block vti_notifier_block __read_mostly = {
2503 ++ .notifier_call = vti_device_event,
2504 ++};
2505 ++
2506 + static int __init vti_init(void)
2507 + {
2508 + const char *msg;
2509 +@@ -547,6 +574,8 @@ static int __init vti_init(void)
2510 +
2511 + pr_info("IPv4 over IPsec tunneling driver\n");
2512 +
2513 ++ register_netdevice_notifier(&vti_notifier_block);
2514 ++
2515 + msg = "tunnel device";
2516 + err = register_pernet_device(&vti_net_ops);
2517 + if (err < 0)
2518 +@@ -579,6 +608,7 @@ xfrm_proto_ah_failed:
2519 + xfrm_proto_esp_failed:
2520 + unregister_pernet_device(&vti_net_ops);
2521 + pernet_dev_failed:
2522 ++ unregister_netdevice_notifier(&vti_notifier_block);
2523 + pr_err("vti init: failed to register %s\n", msg);
2524 + return err;
2525 + }
2526 +@@ -590,6 +620,7 @@ static void __exit vti_fini(void)
2527 + xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
2528 + xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
2529 + unregister_pernet_device(&vti_net_ops);
2530 ++ unregister_netdevice_notifier(&vti_notifier_block);
2531 + }
2532 +
2533 + module_init(vti_init);
2534 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
2535 +index 048418b049d8..b5853cac3269 100644
2536 +--- a/net/ipv4/tcp_ipv4.c
2537 ++++ b/net/ipv4/tcp_ipv4.c
2538 +@@ -808,8 +808,14 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
2539 + u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
2540 + tcp_sk(sk)->snd_nxt;
2541 +
2542 ++ /* RFC 7323 2.3
2543 ++ * The window field (SEG.WND) of every outgoing segment, with the
2544 ++ * exception of <SYN> segments, MUST be right-shifted by
2545 ++ * Rcv.Wind.Shift bits:
2546 ++ */
2547 + tcp_v4_send_ack(sock_net(sk), skb, seq,
2548 +- tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
2549 ++ tcp_rsk(req)->rcv_nxt,
2550 ++ req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
2551 + tcp_time_stamp,
2552 + req->ts_recent,
2553 + 0,
2554 +diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
2555 +index 3e6a472e6b88..92ab5bc91592 100644
2556 +--- a/net/ipv4/tcp_yeah.c
2557 ++++ b/net/ipv4/tcp_yeah.c
2558 +@@ -75,7 +75,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
2559 + if (!tcp_is_cwnd_limited(sk))
2560 + return;
2561 +
2562 +- if (tp->snd_cwnd <= tp->snd_ssthresh)
2563 ++ if (tcp_in_slow_start(tp))
2564 + tcp_slow_start(tp, acked);
2565 +
2566 + else if (!yeah->doing_reno_now) {
2567 +diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
2568 +index 263a5164a6f5..3e55447b63a4 100644
2569 +--- a/net/ipv6/ping.c
2570 ++++ b/net/ipv6/ping.c
2571 +@@ -150,8 +150,10 @@ int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
2572 + rt = (struct rt6_info *) dst;
2573 +
2574 + np = inet6_sk(sk);
2575 +- if (!np)
2576 +- return -EBADF;
2577 ++ if (!np) {
2578 ++ err = -EBADF;
2579 ++ goto dst_err_out;
2580 ++ }
2581 +
2582 + if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
2583 + fl6.flowi6_oif = np->mcast_oif;
2584 +@@ -186,6 +188,9 @@ int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
2585 + }
2586 + release_sock(sk);
2587 +
2588 ++dst_err_out:
2589 ++ dst_release(dst);
2590 ++
2591 + if (err)
2592 + return err;
2593 +
2594 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
2595 +index 1a1cd3938fd0..2d81e2f33ef2 100644
2596 +--- a/net/ipv6/tcp_ipv6.c
2597 ++++ b/net/ipv6/tcp_ipv6.c
2598 +@@ -932,9 +932,15 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
2599 + /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
2600 + * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
2601 + */
2602 ++ /* RFC 7323 2.3
2603 ++ * The window field (SEG.WND) of every outgoing segment, with the
2604 ++ * exception of <SYN> segments, MUST be right-shifted by
2605 ++ * Rcv.Wind.Shift bits:
2606 ++ */
2607 + tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
2608 + tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
2609 +- tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
2610 ++ tcp_rsk(req)->rcv_nxt,
2611 ++ req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
2612 + tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
2613 + tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
2614 + 0, 0);
2615 +diff --git a/net/irda/iriap.c b/net/irda/iriap.c
2616 +index 4a7ae32afa09..1138eaf5c682 100644
2617 +--- a/net/irda/iriap.c
2618 ++++ b/net/irda/iriap.c
2619 +@@ -185,8 +185,12 @@ struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv,
2620 +
2621 + self->magic = IAS_MAGIC;
2622 + self->mode = mode;
2623 +- if (mode == IAS_CLIENT)
2624 +- iriap_register_lsap(self, slsap_sel, mode);
2625 ++ if (mode == IAS_CLIENT) {
2626 ++ if (iriap_register_lsap(self, slsap_sel, mode)) {
2627 ++ kfree(self);
2628 ++ return NULL;
2629 ++ }
2630 ++ }
2631 +
2632 + self->confirm = callback;
2633 + self->priv = priv;
2634 +diff --git a/net/tipc/socket.c b/net/tipc/socket.c
2635 +index 9b713e0ce00d..b26b7a127773 100644
2636 +--- a/net/tipc/socket.c
2637 ++++ b/net/tipc/socket.c
2638 +@@ -2111,7 +2111,8 @@ restart:
2639 + TIPC_CONN_MSG, SHORT_H_SIZE,
2640 + 0, dnode, onode, dport, oport,
2641 + TIPC_CONN_SHUTDOWN);
2642 +- tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
2643 ++ if (skb)
2644 ++ tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
2645 + }
2646 + tsk->connected = 0;
2647 + sock->state = SS_DISCONNECTING;
2648 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
2649 +index 6579fd6e7459..824cc1e160bc 100644
2650 +--- a/net/unix/af_unix.c
2651 ++++ b/net/unix/af_unix.c
2652 +@@ -661,11 +661,11 @@ static int unix_set_peek_off(struct sock *sk, int val)
2653 + {
2654 + struct unix_sock *u = unix_sk(sk);
2655 +
2656 +- if (mutex_lock_interruptible(&u->readlock))
2657 ++ if (mutex_lock_interruptible(&u->iolock))
2658 + return -EINTR;
2659 +
2660 + sk->sk_peek_off = val;
2661 +- mutex_unlock(&u->readlock);
2662 ++ mutex_unlock(&u->iolock);
2663 +
2664 + return 0;
2665 + }
2666 +@@ -778,7 +778,8 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
2667 + spin_lock_init(&u->lock);
2668 + atomic_long_set(&u->inflight, 0);
2669 + INIT_LIST_HEAD(&u->link);
2670 +- mutex_init(&u->readlock); /* single task reading lock */
2671 ++ mutex_init(&u->iolock); /* single task reading lock */
2672 ++ mutex_init(&u->bindlock); /* single task binding lock */
2673 + init_waitqueue_head(&u->peer_wait);
2674 + init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
2675 + unix_insert_socket(unix_sockets_unbound(sk), sk);
2676 +@@ -847,7 +848,7 @@ static int unix_autobind(struct socket *sock)
2677 + int err;
2678 + unsigned int retries = 0;
2679 +
2680 +- err = mutex_lock_interruptible(&u->readlock);
2681 ++ err = mutex_lock_interruptible(&u->bindlock);
2682 + if (err)
2683 + return err;
2684 +
2685 +@@ -894,7 +895,7 @@ retry:
2686 + spin_unlock(&unix_table_lock);
2687 + err = 0;
2688 +
2689 +-out: mutex_unlock(&u->readlock);
2690 ++out: mutex_unlock(&u->bindlock);
2691 + return err;
2692 + }
2693 +
2694 +@@ -953,20 +954,32 @@ fail:
2695 + return NULL;
2696 + }
2697 +
2698 +-static int unix_mknod(struct dentry *dentry, struct path *path, umode_t mode,
2699 +- struct path *res)
2700 ++static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
2701 + {
2702 +- int err;
2703 ++ struct dentry *dentry;
2704 ++ struct path path;
2705 ++ int err = 0;
2706 ++ /*
2707 ++ * Get the parent directory, calculate the hash for last
2708 ++ * component.
2709 ++ */
2710 ++ dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
2711 ++ err = PTR_ERR(dentry);
2712 ++ if (IS_ERR(dentry))
2713 ++ return err;
2714 +
2715 +- err = security_path_mknod(path, dentry, mode, 0);
2716 ++ /*
2717 ++ * All right, let's create it.
2718 ++ */
2719 ++ err = security_path_mknod(&path, dentry, mode, 0);
2720 + if (!err) {
2721 +- err = vfs_mknod(d_inode(path->dentry), dentry, mode, 0);
2722 ++ err = vfs_mknod(d_inode(path.dentry), dentry, mode, 0);
2723 + if (!err) {
2724 +- res->mnt = mntget(path->mnt);
2725 ++ res->mnt = mntget(path.mnt);
2726 + res->dentry = dget(dentry);
2727 + }
2728 + }
2729 +-
2730 ++ done_path_create(&path, dentry);
2731 + return err;
2732 + }
2733 +
2734 +@@ -977,12 +990,10 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2735 + struct unix_sock *u = unix_sk(sk);
2736 + struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
2737 + char *sun_path = sunaddr->sun_path;
2738 +- int err, name_err;
2739 ++ int err;
2740 + unsigned int hash;
2741 + struct unix_address *addr;
2742 + struct hlist_head *list;
2743 +- struct path path;
2744 +- struct dentry *dentry;
2745 +
2746 + err = -EINVAL;
2747 + if (sunaddr->sun_family != AF_UNIX)
2748 +@@ -998,34 +1009,14 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2749 + goto out;
2750 + addr_len = err;
2751 +
2752 +- name_err = 0;
2753 +- dentry = NULL;
2754 +- if (sun_path[0]) {
2755 +- /* Get the parent directory, calculate the hash for last
2756 +- * component.
2757 +- */
2758 +- dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
2759 +-
2760 +- if (IS_ERR(dentry)) {
2761 +- /* delay report until after 'already bound' check */
2762 +- name_err = PTR_ERR(dentry);
2763 +- dentry = NULL;
2764 +- }
2765 +- }
2766 +-
2767 +- err = mutex_lock_interruptible(&u->readlock);
2768 ++ err = mutex_lock_interruptible(&u->bindlock);
2769 + if (err)
2770 +- goto out_path;
2771 ++ goto out;
2772 +
2773 + err = -EINVAL;
2774 + if (u->addr)
2775 + goto out_up;
2776 +
2777 +- if (name_err) {
2778 +- err = name_err == -EEXIST ? -EADDRINUSE : name_err;
2779 +- goto out_up;
2780 +- }
2781 +-
2782 + err = -ENOMEM;
2783 + addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
2784 + if (!addr)
2785 +@@ -1036,11 +1027,11 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2786 + addr->hash = hash ^ sk->sk_type;
2787 + atomic_set(&addr->refcnt, 1);
2788 +
2789 +- if (dentry) {
2790 +- struct path u_path;
2791 ++ if (sun_path[0]) {
2792 ++ struct path path;
2793 + umode_t mode = S_IFSOCK |
2794 + (SOCK_INODE(sock)->i_mode & ~current_umask());
2795 +- err = unix_mknod(dentry, &path, mode, &u_path);
2796 ++ err = unix_mknod(sun_path, mode, &path);
2797 + if (err) {
2798 + if (err == -EEXIST)
2799 + err = -EADDRINUSE;
2800 +@@ -1048,9 +1039,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2801 + goto out_up;
2802 + }
2803 + addr->hash = UNIX_HASH_SIZE;
2804 +- hash = d_real_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
2805 ++ hash = d_real_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1);
2806 + spin_lock(&unix_table_lock);
2807 +- u->path = u_path;
2808 ++ u->path = path;
2809 + list = &unix_socket_table[hash];
2810 + } else {
2811 + spin_lock(&unix_table_lock);
2812 +@@ -1072,11 +1063,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2813 + out_unlock:
2814 + spin_unlock(&unix_table_lock);
2815 + out_up:
2816 +- mutex_unlock(&u->readlock);
2817 +-out_path:
2818 +- if (dentry)
2819 +- done_path_create(&path, dentry);
2820 +-
2821 ++ mutex_unlock(&u->bindlock);
2822 + out:
2823 + return err;
2824 + }
2825 +@@ -1971,17 +1958,17 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
2826 + if (false) {
2827 + alloc_skb:
2828 + unix_state_unlock(other);
2829 +- mutex_unlock(&unix_sk(other)->readlock);
2830 ++ mutex_unlock(&unix_sk(other)->iolock);
2831 + newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
2832 + &err, 0);
2833 + if (!newskb)
2834 + goto err;
2835 + }
2836 +
2837 +- /* we must acquire readlock as we modify already present
2838 ++ /* we must acquire iolock as we modify already present
2839 + * skbs in the sk_receive_queue and mess with skb->len
2840 + */
2841 +- err = mutex_lock_interruptible(&unix_sk(other)->readlock);
2842 ++ err = mutex_lock_interruptible(&unix_sk(other)->iolock);
2843 + if (err) {
2844 + err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
2845 + goto err;
2846 +@@ -2048,7 +2035,7 @@ alloc_skb:
2847 + }
2848 +
2849 + unix_state_unlock(other);
2850 +- mutex_unlock(&unix_sk(other)->readlock);
2851 ++ mutex_unlock(&unix_sk(other)->iolock);
2852 +
2853 + other->sk_data_ready(other);
2854 + scm_destroy(&scm);
2855 +@@ -2057,7 +2044,7 @@ alloc_skb:
2856 + err_state_unlock:
2857 + unix_state_unlock(other);
2858 + err_unlock:
2859 +- mutex_unlock(&unix_sk(other)->readlock);
2860 ++ mutex_unlock(&unix_sk(other)->iolock);
2861 + err:
2862 + kfree_skb(newskb);
2863 + if (send_sigpipe && !(flags & MSG_NOSIGNAL))
2864 +@@ -2122,7 +2109,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
2865 + if (flags&MSG_OOB)
2866 + goto out;
2867 +
2868 +- err = mutex_lock_interruptible(&u->readlock);
2869 ++ err = mutex_lock_interruptible(&u->iolock);
2870 + if (unlikely(err)) {
2871 + /* recvmsg() in non blocking mode is supposed to return -EAGAIN
2872 + * sk_rcvtimeo is not honored by mutex_lock_interruptible()
2873 +@@ -2198,7 +2185,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
2874 + out_free:
2875 + skb_free_datagram(sk, skb);
2876 + out_unlock:
2877 +- mutex_unlock(&u->readlock);
2878 ++ mutex_unlock(&u->iolock);
2879 + out:
2880 + return err;
2881 + }
2882 +@@ -2293,7 +2280,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
2883 + /* Lock the socket to prevent queue disordering
2884 + * while sleeps in memcpy_tomsg
2885 + */
2886 +- mutex_lock(&u->readlock);
2887 ++ mutex_lock(&u->iolock);
2888 +
2889 + if (flags & MSG_PEEK)
2890 + skip = sk_peek_offset(sk, flags);
2891 +@@ -2334,7 +2321,7 @@ again:
2892 + break;
2893 + }
2894 +
2895 +- mutex_unlock(&u->readlock);
2896 ++ mutex_unlock(&u->iolock);
2897 +
2898 + timeo = unix_stream_data_wait(sk, timeo, last,
2899 + last_len);
2900 +@@ -2345,7 +2332,7 @@ again:
2901 + goto out;
2902 + }
2903 +
2904 +- mutex_lock(&u->readlock);
2905 ++ mutex_lock(&u->iolock);
2906 + continue;
2907 + unlock:
2908 + unix_state_unlock(sk);
2909 +@@ -2448,7 +2435,7 @@ unlock:
2910 + }
2911 + } while (size);
2912 +
2913 +- mutex_unlock(&u->readlock);
2914 ++ mutex_unlock(&u->iolock);
2915 + if (state->msg)
2916 + scm_recv(sock, state->msg, &scm, flags);
2917 + else
2918 +@@ -2489,9 +2476,9 @@ static ssize_t skb_unix_socket_splice(struct sock *sk,
2919 + int ret;
2920 + struct unix_sock *u = unix_sk(sk);
2921 +
2922 +- mutex_unlock(&u->readlock);
2923 ++ mutex_unlock(&u->iolock);
2924 + ret = splice_to_pipe(pipe, spd);
2925 +- mutex_lock(&u->readlock);
2926 ++ mutex_lock(&u->iolock);
2927 +
2928 + return ret;
2929 + }
2930 +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
2931 +index 5d89f13a98db..bf65f31bd55e 100644
2932 +--- a/net/wireless/nl80211.c
2933 ++++ b/net/wireless/nl80211.c
2934 +@@ -6628,7 +6628,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
2935 +
2936 + params.n_counter_offsets_presp = len / sizeof(u16);
2937 + if (rdev->wiphy.max_num_csa_counters &&
2938 +- (params.n_counter_offsets_beacon >
2939 ++ (params.n_counter_offsets_presp >
2940 + rdev->wiphy.max_num_csa_counters))
2941 + return -EINVAL;
2942 +