Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.1 commit in: /
Date: Tue, 29 Sep 2015 17:50:43
Message-Id: 1443549031.a246795e14884680031e6838755d88dfa0ce1790.mpagano@gentoo
1 commit: a246795e14884680031e6838755d88dfa0ce1790
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Sep 29 17:50:31 2015 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Sep 29 17:50:31 2015 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=a246795e
7
8 Linux patch 4.1.9
9
10 0000_README | 4 +
11 1008_linux-4.1.9.patch | 5955 ++++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 5959 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 4a96d2e..46b8cb0 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -75,6 +75,10 @@ Patch: 1007_linux-4.1.8.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.1.8
21
22 +Patch: 1008_linux-4.1.9.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.1.9
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1008_linux-4.1.9.patch b/1008_linux-4.1.9.patch
31 new file mode 100644
32 index 0000000..000c373
33 --- /dev/null
34 +++ b/1008_linux-4.1.9.patch
35 @@ -0,0 +1,5955 @@
36 +diff --git a/Makefile b/Makefile
37 +index dbf3baa5fabb..e071176b2ce6 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 1
43 +-SUBLEVEL = 8
44 ++SUBLEVEL = 9
45 + EXTRAVERSION =
46 + NAME = Series 4800
47 +
48 +diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c
49 +index bd245d34952d..a0765e7ed6c7 100644
50 +--- a/arch/arm/boot/compressed/decompress.c
51 ++++ b/arch/arm/boot/compressed/decompress.c
52 +@@ -57,5 +57,5 @@ extern char * strstr(const char * s1, const char *s2);
53 +
54 + int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x))
55 + {
56 +- return decompress(input, len, NULL, NULL, output, NULL, error);
57 ++ return __decompress(input, len, NULL, NULL, output, 0, NULL, error);
58 + }
59 +diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
60 +index d9631ecddd56..d6223cbcb661 100644
61 +--- a/arch/arm/kvm/arm.c
62 ++++ b/arch/arm/kvm/arm.c
63 +@@ -450,7 +450,7 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
64 + * Map the VGIC hardware resources before running a vcpu the first
65 + * time on this VM.
66 + */
67 +- if (unlikely(!vgic_ready(kvm))) {
68 ++ if (unlikely(irqchip_in_kernel(kvm) && !vgic_ready(kvm))) {
69 + ret = kvm_vgic_map_resources(kvm);
70 + if (ret)
71 + return ret;
72 +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
73 +index 7796af4b1d6f..6f0a3b41b009 100644
74 +--- a/arch/arm64/Kconfig
75 ++++ b/arch/arm64/Kconfig
76 +@@ -101,6 +101,10 @@ config NO_IOPORT_MAP
77 + config STACKTRACE_SUPPORT
78 + def_bool y
79 +
80 ++config ILLEGAL_POINTER_VALUE
81 ++ hex
82 ++ default 0xdead000000000000
83 ++
84 + config LOCKDEP_SUPPORT
85 + def_bool y
86 +
87 +@@ -409,6 +413,22 @@ config ARM64_ERRATUM_845719
88 +
89 + If unsure, say Y.
90 +
91 ++config ARM64_ERRATUM_843419
92 ++ bool "Cortex-A53: 843419: A load or store might access an incorrect address"
93 ++ depends on MODULES
94 ++ default y
95 ++ help
96 ++ This option builds kernel modules using the large memory model in
97 ++ order to avoid the use of the ADRP instruction, which can cause
98 ++ a subsequent memory access to use an incorrect address on Cortex-A53
99 ++ parts up to r0p4.
100 ++
101 ++ Note that the kernel itself must be linked with a version of ld
102 ++ which fixes potentially affected ADRP instructions through the
103 ++ use of veneers.
104 ++
105 ++ If unsure, say Y.
106 ++
107 + endmenu
108 +
109 +
110 +diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
111 +index 4d2a925998f9..81151663ef38 100644
112 +--- a/arch/arm64/Makefile
113 ++++ b/arch/arm64/Makefile
114 +@@ -30,6 +30,10 @@ endif
115 +
116 + CHECKFLAGS += -D__aarch64__
117 +
118 ++ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
119 ++CFLAGS_MODULE += -mcmodel=large
120 ++endif
121 ++
122 + # Default value
123 + head-y := arch/arm64/kernel/head.o
124 +
125 +diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
126 +index f800d45ea226..44a59c20e773 100644
127 +--- a/arch/arm64/include/asm/memory.h
128 ++++ b/arch/arm64/include/asm/memory.h
129 +@@ -114,6 +114,14 @@ extern phys_addr_t memstart_addr;
130 + #define PHYS_OFFSET ({ memstart_addr; })
131 +
132 + /*
133 ++ * The maximum physical address that the linear direct mapping
134 ++ * of system RAM can cover. (PAGE_OFFSET can be interpreted as
135 ++ * a 2's complement signed quantity and negated to derive the
136 ++ * maximum size of the linear mapping.)
137 ++ */
138 ++#define MAX_MEMBLOCK_ADDR ({ memstart_addr - PAGE_OFFSET - 1; })
139 ++
140 ++/*
141 + * PFNs are used to describe any physical page; this means
142 + * PFN 0 == physical address 0.
143 + *
144 +diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
145 +index 3dca15634e69..c31e59fe2cb8 100644
146 +--- a/arch/arm64/kernel/fpsimd.c
147 ++++ b/arch/arm64/kernel/fpsimd.c
148 +@@ -157,6 +157,7 @@ void fpsimd_thread_switch(struct task_struct *next)
149 + void fpsimd_flush_thread(void)
150 + {
151 + memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
152 ++ fpsimd_flush_task_state(current);
153 + set_thread_flag(TIF_FOREIGN_FPSTATE);
154 + }
155 +
156 +diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
157 +index 19f915e8f6e0..36aa31ff2c06 100644
158 +--- a/arch/arm64/kernel/head.S
159 ++++ b/arch/arm64/kernel/head.S
160 +@@ -565,6 +565,11 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
161 + msr hstr_el2, xzr // Disable CP15 traps to EL2
162 + #endif
163 +
164 ++ /* EL2 debug */
165 ++ mrs x0, pmcr_el0 // Disable debug access traps
166 ++ ubfx x0, x0, #11, #5 // to EL2 and allow access to
167 ++ msr mdcr_el2, x0 // all PMU counters from EL1
168 ++
169 + /* Stage-2 translation */
170 + msr vttbr_el2, xzr
171 +
172 +diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
173 +index 67bf4107f6ef..876eb8df50bf 100644
174 +--- a/arch/arm64/kernel/module.c
175 ++++ b/arch/arm64/kernel/module.c
176 +@@ -332,12 +332,14 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
177 + ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
178 + AARCH64_INSN_IMM_ADR);
179 + break;
180 ++#ifndef CONFIG_ARM64_ERRATUM_843419
181 + case R_AARCH64_ADR_PREL_PG_HI21_NC:
182 + overflow_check = false;
183 + case R_AARCH64_ADR_PREL_PG_HI21:
184 + ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
185 + AARCH64_INSN_IMM_ADR);
186 + break;
187 ++#endif
188 + case R_AARCH64_ADD_ABS_LO12_NC:
189 + case R_AARCH64_LDST8_ABS_LO12_NC:
190 + overflow_check = false;
191 +diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
192 +index c0cff3410166..c58aee062590 100644
193 +--- a/arch/arm64/kernel/signal32.c
194 ++++ b/arch/arm64/kernel/signal32.c
195 +@@ -212,14 +212,32 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
196 +
197 + /*
198 + * VFP save/restore code.
199 ++ *
200 ++ * We have to be careful with endianness, since the fpsimd context-switch
201 ++ * code operates on 128-bit (Q) register values whereas the compat ABI
202 ++ * uses an array of 64-bit (D) registers. Consequently, we need to swap
203 ++ * the two halves of each Q register when running on a big-endian CPU.
204 + */
205 ++union __fpsimd_vreg {
206 ++ __uint128_t raw;
207 ++ struct {
208 ++#ifdef __AARCH64EB__
209 ++ u64 hi;
210 ++ u64 lo;
211 ++#else
212 ++ u64 lo;
213 ++ u64 hi;
214 ++#endif
215 ++ };
216 ++};
217 ++
218 + static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
219 + {
220 + struct fpsimd_state *fpsimd = &current->thread.fpsimd_state;
221 + compat_ulong_t magic = VFP_MAGIC;
222 + compat_ulong_t size = VFP_STORAGE_SIZE;
223 + compat_ulong_t fpscr, fpexc;
224 +- int err = 0;
225 ++ int i, err = 0;
226 +
227 + /*
228 + * Save the hardware registers to the fpsimd_state structure.
229 +@@ -235,10 +253,15 @@ static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
230 + /*
231 + * Now copy the FP registers. Since the registers are packed,
232 + * we can copy the prefix we want (V0-V15) as it is.
233 +- * FIXME: Won't work if big endian.
234 + */
235 +- err |= __copy_to_user(&frame->ufp.fpregs, fpsimd->vregs,
236 +- sizeof(frame->ufp.fpregs));
237 ++ for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
238 ++ union __fpsimd_vreg vreg = {
239 ++ .raw = fpsimd->vregs[i >> 1],
240 ++ };
241 ++
242 ++ __put_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
243 ++ __put_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
244 ++ }
245 +
246 + /* Create an AArch32 fpscr from the fpsr and the fpcr. */
247 + fpscr = (fpsimd->fpsr & VFP_FPSCR_STAT_MASK) |
248 +@@ -263,7 +286,7 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame)
249 + compat_ulong_t magic = VFP_MAGIC;
250 + compat_ulong_t size = VFP_STORAGE_SIZE;
251 + compat_ulong_t fpscr;
252 +- int err = 0;
253 ++ int i, err = 0;
254 +
255 + __get_user_error(magic, &frame->magic, err);
256 + __get_user_error(size, &frame->size, err);
257 +@@ -273,12 +296,14 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame)
258 + if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
259 + return -EINVAL;
260 +
261 +- /*
262 +- * Copy the FP registers into the start of the fpsimd_state.
263 +- * FIXME: Won't work if big endian.
264 +- */
265 +- err |= __copy_from_user(fpsimd.vregs, frame->ufp.fpregs,
266 +- sizeof(frame->ufp.fpregs));
267 ++ /* Copy the FP registers into the start of the fpsimd_state. */
268 ++ for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
269 ++ union __fpsimd_vreg vreg;
270 ++
271 ++ __get_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
272 ++ __get_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
273 ++ fpsimd.vregs[i >> 1] = vreg.raw;
274 ++ }
275 +
276 + /* Extract the fpsr and the fpcr from the fpscr */
277 + __get_user_error(fpscr, &frame->ufp.fpscr, err);
278 +diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
279 +index 5befd010e232..64f9e60b31da 100644
280 +--- a/arch/arm64/kvm/hyp.S
281 ++++ b/arch/arm64/kvm/hyp.S
282 +@@ -844,8 +844,6 @@
283 + mrs x3, cntv_ctl_el0
284 + and x3, x3, #3
285 + str w3, [x0, #VCPU_TIMER_CNTV_CTL]
286 +- bic x3, x3, #1 // Clear Enable
287 +- msr cntv_ctl_el0, x3
288 +
289 + isb
290 +
291 +@@ -853,6 +851,9 @@
292 + str x3, [x0, #VCPU_TIMER_CNTV_CVAL]
293 +
294 + 1:
295 ++ // Disable the virtual timer
296 ++ msr cntv_ctl_el0, xzr
297 ++
298 + // Allow physical timer/counter access for the host
299 + mrs x2, cnthctl_el2
300 + orr x2, x2, #3
301 +@@ -947,13 +948,15 @@ ENTRY(__kvm_vcpu_run)
302 + // Guest context
303 + add x2, x0, #VCPU_CONTEXT
304 +
305 ++ // We must restore the 32-bit state before the sysregs, thanks
306 ++ // to Cortex-A57 erratum #852523.
307 ++ restore_guest_32bit_state
308 + bl __restore_sysregs
309 + bl __restore_fpsimd
310 +
311 + skip_debug_state x3, 1f
312 + bl __restore_debug
313 + 1:
314 +- restore_guest_32bit_state
315 + restore_guest_regs
316 +
317 + // That's it, no more messing around.
318 +diff --git a/arch/m32r/boot/compressed/misc.c b/arch/m32r/boot/compressed/misc.c
319 +index 28a09529f206..3a7692745868 100644
320 +--- a/arch/m32r/boot/compressed/misc.c
321 ++++ b/arch/m32r/boot/compressed/misc.c
322 +@@ -86,6 +86,7 @@ decompress_kernel(int mmu_on, unsigned char *zimage_data,
323 + free_mem_end_ptr = free_mem_ptr + BOOT_HEAP_SIZE;
324 +
325 + puts("\nDecompressing Linux... ");
326 +- decompress(input_data, input_len, NULL, NULL, output_data, NULL, error);
327 ++ __decompress(input_data, input_len, NULL, NULL, output_data, 0,
328 ++ NULL, error);
329 + puts("done.\nBooting the kernel.\n");
330 + }
331 +diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c
332 +index 54831069a206..080cd53bac36 100644
333 +--- a/arch/mips/boot/compressed/decompress.c
334 ++++ b/arch/mips/boot/compressed/decompress.c
335 +@@ -111,8 +111,8 @@ void decompress_kernel(unsigned long boot_heap_start)
336 + puts("\n");
337 +
338 + /* Decompress the kernel with according algorithm */
339 +- decompress((char *)zimage_start, zimage_size, 0, 0,
340 +- (void *)VMLINUX_LOAD_ADDRESS_ULL, 0, error);
341 ++ __decompress((char *)zimage_start, zimage_size, 0, 0,
342 ++ (void *)VMLINUX_LOAD_ADDRESS_ULL, 0, 0, error);
343 +
344 + /* FIXME: should we flush cache here? */
345 + puts("Now, booting the kernel...\n");
346 +diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
347 +index 6983fcd48131..2b95e34fa9e8 100644
348 +--- a/arch/mips/math-emu/cp1emu.c
349 ++++ b/arch/mips/math-emu/cp1emu.c
350 +@@ -1137,7 +1137,7 @@ emul:
351 + break;
352 +
353 + case mfhc_op:
354 +- if (!cpu_has_mips_r2)
355 ++ if (!cpu_has_mips_r2_r6)
356 + goto sigill;
357 +
358 + /* copregister rd -> gpr[rt] */
359 +@@ -1148,7 +1148,7 @@ emul:
360 + break;
361 +
362 + case mthc_op:
363 +- if (!cpu_has_mips_r2)
364 ++ if (!cpu_has_mips_r2_r6)
365 + goto sigill;
366 +
367 + /* copregister rd <- gpr[rt] */
368 +@@ -1181,6 +1181,24 @@ emul:
369 + }
370 + break;
371 +
372 ++ case bc1eqz_op:
373 ++ case bc1nez_op:
374 ++ if (!cpu_has_mips_r6 || delay_slot(xcp))
375 ++ return SIGILL;
376 ++
377 ++ cond = likely = 0;
378 ++ switch (MIPSInst_RS(ir)) {
379 ++ case bc1eqz_op:
380 ++ if (get_fpr32(&current->thread.fpu.fpr[MIPSInst_RT(ir)], 0) & 0x1)
381 ++ cond = 1;
382 ++ break;
383 ++ case bc1nez_op:
384 ++ if (!(get_fpr32(&current->thread.fpu.fpr[MIPSInst_RT(ir)], 0) & 0x1))
385 ++ cond = 1;
386 ++ break;
387 ++ }
388 ++ goto branch_common;
389 ++
390 + case bc_op:
391 + if (delay_slot(xcp))
392 + return SIGILL;
393 +@@ -1207,7 +1225,7 @@ emul:
394 + case bct_op:
395 + break;
396 + }
397 +-
398 ++branch_common:
399 + set_delay_slot(xcp);
400 + if (cond) {
401 + /*
402 +diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
403 +index f3191db6e2e9..c0eab24f6a9e 100644
404 +--- a/arch/parisc/kernel/irq.c
405 ++++ b/arch/parisc/kernel/irq.c
406 +@@ -507,8 +507,8 @@ void do_cpu_irq_mask(struct pt_regs *regs)
407 + struct pt_regs *old_regs;
408 + unsigned long eirr_val;
409 + int irq, cpu = smp_processor_id();
410 +-#ifdef CONFIG_SMP
411 + struct irq_desc *desc;
412 ++#ifdef CONFIG_SMP
413 + cpumask_t dest;
414 + #endif
415 +
416 +@@ -521,8 +521,12 @@ void do_cpu_irq_mask(struct pt_regs *regs)
417 + goto set_out;
418 + irq = eirr_to_irq(eirr_val);
419 +
420 +-#ifdef CONFIG_SMP
421 ++ /* Filter out spurious interrupts, mostly from serial port at bootup */
422 + desc = irq_to_desc(irq);
423 ++ if (unlikely(!desc->action))
424 ++ goto set_out;
425 ++
426 ++#ifdef CONFIG_SMP
427 + cpumask_copy(&dest, desc->irq_data.affinity);
428 + if (irqd_is_per_cpu(&desc->irq_data) &&
429 + !cpumask_test_cpu(smp_processor_id(), &dest)) {
430 +diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
431 +index 7ef22e3387e0..0b8d26d3ba43 100644
432 +--- a/arch/parisc/kernel/syscall.S
433 ++++ b/arch/parisc/kernel/syscall.S
434 +@@ -821,7 +821,7 @@ cas2_action:
435 + /* 64bit CAS */
436 + #ifdef CONFIG_64BIT
437 + 19: ldd,ma 0(%sr3,%r26), %r29
438 +- sub,= %r29, %r25, %r0
439 ++ sub,*= %r29, %r25, %r0
440 + b,n cas2_end
441 + 20: std,ma %r24, 0(%sr3,%r26)
442 + copy %r0, %r28
443 +diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
444 +index 73eddda53b8e..4eec430d8fa8 100644
445 +--- a/arch/powerpc/boot/Makefile
446 ++++ b/arch/powerpc/boot/Makefile
447 +@@ -28,6 +28,9 @@ BOOTCFLAGS += -m64
448 + endif
449 + ifdef CONFIG_CPU_BIG_ENDIAN
450 + BOOTCFLAGS += -mbig-endian
451 ++else
452 ++BOOTCFLAGS += -mlittle-endian
453 ++BOOTCFLAGS += $(call cc-option,-mabi=elfv2)
454 + endif
455 +
456 + BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc
457 +diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
458 +index 43e6ad424c7f..88d27e3258d2 100644
459 +--- a/arch/powerpc/include/asm/pgtable-ppc64.h
460 ++++ b/arch/powerpc/include/asm/pgtable-ppc64.h
461 +@@ -135,7 +135,19 @@
462 + #define pte_iterate_hashed_end() } while(0)
463 +
464 + #ifdef CONFIG_PPC_HAS_HASH_64K
465 +-#define pte_pagesize_index(mm, addr, pte) get_slice_psize(mm, addr)
466 ++/*
467 ++ * We expect this to be called only for user addresses or kernel virtual
468 ++ * addresses other than the linear mapping.
469 ++ */
470 ++#define pte_pagesize_index(mm, addr, pte) \
471 ++ ({ \
472 ++ unsigned int psize; \
473 ++ if (is_kernel_addr(addr)) \
474 ++ psize = MMU_PAGE_4K; \
475 ++ else \
476 ++ psize = get_slice_psize(mm, addr); \
477 ++ psize; \
478 ++ })
479 + #else
480 + #define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
481 + #endif
482 +diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
483 +index 7a4ede16b283..b77ef369c0f0 100644
484 +--- a/arch/powerpc/include/asm/rtas.h
485 ++++ b/arch/powerpc/include/asm/rtas.h
486 +@@ -343,6 +343,7 @@ extern void rtas_power_off(void);
487 + extern void rtas_halt(void);
488 + extern void rtas_os_term(char *str);
489 + extern int rtas_get_sensor(int sensor, int index, int *state);
490 ++extern int rtas_get_sensor_fast(int sensor, int index, int *state);
491 + extern int rtas_get_power_level(int powerdomain, int *level);
492 + extern int rtas_set_power_level(int powerdomain, int level, int *setlevel);
493 + extern bool rtas_indicator_present(int token, int *maxindex);
494 +diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
495 +index 58abeda64cb7..15cca17cba4b 100644
496 +--- a/arch/powerpc/include/asm/switch_to.h
497 ++++ b/arch/powerpc/include/asm/switch_to.h
498 +@@ -29,6 +29,7 @@ static inline void save_early_sprs(struct thread_struct *prev) {}
499 +
500 + extern void enable_kernel_fp(void);
501 + extern void enable_kernel_altivec(void);
502 ++extern void enable_kernel_vsx(void);
503 + extern int emulate_altivec(struct pt_regs *);
504 + extern void __giveup_vsx(struct task_struct *);
505 + extern void giveup_vsx(struct task_struct *);
506 +diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
507 +index 9ee61d15653d..cb565ad0a5b6 100644
508 +--- a/arch/powerpc/kernel/eeh.c
509 ++++ b/arch/powerpc/kernel/eeh.c
510 +@@ -310,11 +310,26 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
511 + if (!(pe->type & EEH_PE_PHB)) {
512 + if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG))
513 + eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
514 ++
515 ++ /*
516 ++ * The config space of some PCI devices can't be accessed
517 ++ * when their PEs are in frozen state. Otherwise, fenced
518 ++ * PHB might be seen. Those PEs are identified with flag
519 ++ * EEH_PE_CFG_RESTRICTED, indicating EEH_PE_CFG_BLOCKED
520 ++ * is set automatically when the PE is put to EEH_PE_ISOLATED.
521 ++ *
522 ++ * Restoring BARs possibly triggers PCI config access in
523 ++ * (OPAL) firmware and then causes fenced PHB. If the
524 ++ * PCI config is blocked with flag EEH_PE_CFG_BLOCKED, it's
525 ++ * pointless to restore BARs and dump config space.
526 ++ */
527 + eeh_ops->configure_bridge(pe);
528 +- eeh_pe_restore_bars(pe);
529 ++ if (!(pe->state & EEH_PE_CFG_BLOCKED)) {
530 ++ eeh_pe_restore_bars(pe);
531 +
532 +- pci_regs_buf[0] = 0;
533 +- eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen);
534 ++ pci_regs_buf[0] = 0;
535 ++ eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen);
536 ++ }
537 + }
538 +
539 + eeh_ops->get_log(pe, severity, pci_regs_buf, loglen);
540 +@@ -1118,9 +1133,6 @@ void eeh_add_device_late(struct pci_dev *dev)
541 + return;
542 + }
543 +
544 +- if (eeh_has_flag(EEH_PROBE_MODE_DEV))
545 +- eeh_ops->probe(pdn, NULL);
546 +-
547 + /*
548 + * The EEH cache might not be removed correctly because of
549 + * unbalanced kref to the device during unplug time, which
550 +@@ -1144,6 +1156,9 @@ void eeh_add_device_late(struct pci_dev *dev)
551 + dev->dev.archdata.edev = NULL;
552 + }
553 +
554 ++ if (eeh_has_flag(EEH_PROBE_MODE_DEV))
555 ++ eeh_ops->probe(pdn, NULL);
556 ++
557 + edev->pdev = dev;
558 + dev->dev.archdata.edev = edev;
559 +
560 +diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
561 +index febb50dd5328..0596373cd1c3 100644
562 +--- a/arch/powerpc/kernel/process.c
563 ++++ b/arch/powerpc/kernel/process.c
564 +@@ -204,8 +204,6 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
565 + #endif /* CONFIG_ALTIVEC */
566 +
567 + #ifdef CONFIG_VSX
568 +-#if 0
569 +-/* not currently used, but some crazy RAID module might want to later */
570 + void enable_kernel_vsx(void)
571 + {
572 + WARN_ON(preemptible());
573 +@@ -220,7 +218,6 @@ void enable_kernel_vsx(void)
574 + #endif /* CONFIG_SMP */
575 + }
576 + EXPORT_SYMBOL(enable_kernel_vsx);
577 +-#endif
578 +
579 + void giveup_vsx(struct task_struct *tsk)
580 + {
581 +diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
582 +index 7a488c108410..caffb10e7aa3 100644
583 +--- a/arch/powerpc/kernel/rtas.c
584 ++++ b/arch/powerpc/kernel/rtas.c
585 +@@ -584,6 +584,23 @@ int rtas_get_sensor(int sensor, int index, int *state)
586 + }
587 + EXPORT_SYMBOL(rtas_get_sensor);
588 +
589 ++int rtas_get_sensor_fast(int sensor, int index, int *state)
590 ++{
591 ++ int token = rtas_token("get-sensor-state");
592 ++ int rc;
593 ++
594 ++ if (token == RTAS_UNKNOWN_SERVICE)
595 ++ return -ENOENT;
596 ++
597 ++ rc = rtas_call(token, 2, 2, state, sensor, index);
598 ++ WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
599 ++ rc <= RTAS_EXTENDED_DELAY_MAX));
600 ++
601 ++ if (rc < 0)
602 ++ return rtas_error_rc(rc);
603 ++ return rc;
604 ++}
605 ++
606 + bool rtas_indicator_present(int token, int *maxindex)
607 + {
608 + int proplen, count, i;
609 +diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
610 +index 43dafb9d6a46..4d87122cf6a7 100644
611 +--- a/arch/powerpc/mm/hugepage-hash64.c
612 ++++ b/arch/powerpc/mm/hugepage-hash64.c
613 +@@ -85,7 +85,6 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
614 + BUG_ON(index >= 4096);
615 +
616 + vpn = hpt_vpn(ea, vsid, ssize);
617 +- hash = hpt_hash(vpn, shift, ssize);
618 + hpte_slot_array = get_hpte_slot_array(pmdp);
619 + if (psize == MMU_PAGE_4K) {
620 + /*
621 +@@ -101,6 +100,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
622 + valid = hpte_valid(hpte_slot_array, index);
623 + if (valid) {
624 + /* update the hpte bits */
625 ++ hash = hpt_hash(vpn, shift, ssize);
626 + hidx = hpte_hash_index(hpte_slot_array, index);
627 + if (hidx & _PTEIDX_SECONDARY)
628 + hash = ~hash;
629 +@@ -126,6 +126,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
630 + if (!valid) {
631 + unsigned long hpte_group;
632 +
633 ++ hash = hpt_hash(vpn, shift, ssize);
634 + /* insert new entry */
635 + pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
636 + new_pmd |= _PAGE_HASHPTE;
637 +diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
638 +index 02e4a1745516..3b6647e574b6 100644
639 +--- a/arch/powerpc/platforms/pseries/ras.c
640 ++++ b/arch/powerpc/platforms/pseries/ras.c
641 +@@ -189,7 +189,8 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id)
642 + int state;
643 + int critical;
644 +
645 +- status = rtas_get_sensor(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX, &state);
646 ++ status = rtas_get_sensor_fast(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX,
647 ++ &state);
648 +
649 + if (state > 3)
650 + critical = 1; /* Time Critical */
651 +diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
652 +index df6a7041922b..e6e8b241d717 100644
653 +--- a/arch/powerpc/platforms/pseries/setup.c
654 ++++ b/arch/powerpc/platforms/pseries/setup.c
655 +@@ -268,6 +268,11 @@ static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long act
656 + eeh_dev_init(PCI_DN(np), pci->phb);
657 + }
658 + break;
659 ++ case OF_RECONFIG_DETACH_NODE:
660 ++ pci = PCI_DN(np);
661 ++ if (pci)
662 ++ list_del(&pci->list);
663 ++ break;
664 + default:
665 + err = NOTIFY_DONE;
666 + break;
667 +diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
668 +index 42506b371b74..4da604ebf6fd 100644
669 +--- a/arch/s390/boot/compressed/misc.c
670 ++++ b/arch/s390/boot/compressed/misc.c
671 +@@ -167,7 +167,7 @@ unsigned long decompress_kernel(void)
672 + #endif
673 +
674 + puts("Uncompressing Linux... ");
675 +- decompress(input_data, input_len, NULL, NULL, output, NULL, error);
676 ++ __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
677 + puts("Ok, booting the kernel.\n");
678 + return (unsigned long) output;
679 + }
680 +diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c
681 +index 95470a472d2c..208a9753ab38 100644
682 +--- a/arch/sh/boot/compressed/misc.c
683 ++++ b/arch/sh/boot/compressed/misc.c
684 +@@ -132,7 +132,7 @@ void decompress_kernel(void)
685 +
686 + puts("Uncompressing Linux... ");
687 + cache_control(CACHE_ENABLE);
688 +- decompress(input_data, input_len, NULL, NULL, output, NULL, error);
689 ++ __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
690 + cache_control(CACHE_DISABLE);
691 + puts("Ok, booting the kernel.\n");
692 + }
693 +diff --git a/arch/unicore32/boot/compressed/misc.c b/arch/unicore32/boot/compressed/misc.c
694 +index 176d5bda3559..5c65dfee278c 100644
695 +--- a/arch/unicore32/boot/compressed/misc.c
696 ++++ b/arch/unicore32/boot/compressed/misc.c
697 +@@ -119,8 +119,8 @@ unsigned long decompress_kernel(unsigned long output_start,
698 + output_ptr = get_unaligned_le32(tmp);
699 +
700 + arch_decomp_puts("Uncompressing Linux...");
701 +- decompress(input_data, input_data_end - input_data, NULL, NULL,
702 +- output_data, NULL, error);
703 ++ __decompress(input_data, input_data_end - input_data, NULL, NULL,
704 ++ output_data, 0, NULL, error);
705 + arch_decomp_puts(" done, booting the kernel.\n");
706 + return output_ptr;
707 + }
708 +diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
709 +index a107b935e22f..e28437e0f708 100644
710 +--- a/arch/x86/boot/compressed/misc.c
711 ++++ b/arch/x86/boot/compressed/misc.c
712 +@@ -424,7 +424,8 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
713 + #endif
714 +
715 + debug_putstr("\nDecompressing Linux... ");
716 +- decompress(input_data, input_len, NULL, NULL, output, NULL, error);
717 ++ __decompress(input_data, input_len, NULL, NULL, output, output_len,
718 ++ NULL, error);
719 + parse_elf(output);
720 + /*
721 + * 32-bit always performs relocations. 64-bit relocations are only
722 +diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
723 +index c8140e12816a..c23ab1ee3a9a 100644
724 +--- a/arch/x86/mm/init_32.c
725 ++++ b/arch/x86/mm/init_32.c
726 +@@ -137,6 +137,7 @@ page_table_range_init_count(unsigned long start, unsigned long end)
727 +
728 + vaddr = start;
729 + pgd_idx = pgd_index(vaddr);
730 ++ pmd_idx = pmd_index(vaddr);
731 +
732 + for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) {
733 + for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
734 +diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
735 +index b79685e06b70..279c5d674edf 100644
736 +--- a/block/blk-mq-sysfs.c
737 ++++ b/block/blk-mq-sysfs.c
738 +@@ -141,15 +141,26 @@ static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page)
739 +
740 + static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg)
741 + {
742 +- char *start_page = page;
743 + struct request *rq;
744 ++ int len = snprintf(page, PAGE_SIZE - 1, "%s:\n", msg);
745 ++
746 ++ list_for_each_entry(rq, list, queuelist) {
747 ++ const int rq_len = 2 * sizeof(rq) + 2;
748 ++
749 ++ /* if the output will be truncated */
750 ++ if (PAGE_SIZE - 1 < len + rq_len) {
751 ++ /* backspacing if it can't hold '\t...\n' */
752 ++ if (PAGE_SIZE - 1 < len + 5)
753 ++ len -= rq_len;
754 ++ len += snprintf(page + len, PAGE_SIZE - 1 - len,
755 ++ "\t...\n");
756 ++ break;
757 ++ }
758 ++ len += snprintf(page + len, PAGE_SIZE - 1 - len,
759 ++ "\t%p\n", rq);
760 ++ }
761 +
762 +- page += sprintf(page, "%s:\n", msg);
763 +-
764 +- list_for_each_entry(rq, list, queuelist)
765 +- page += sprintf(page, "\t%p\n", rq);
766 +-
767 +- return page - start_page;
768 ++ return len;
769 + }
770 +
771 + static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
772 +diff --git a/drivers/base/node.c b/drivers/base/node.c
773 +index a2aa65b4215d..b10479c87357 100644
774 +--- a/drivers/base/node.c
775 ++++ b/drivers/base/node.c
776 +@@ -388,6 +388,16 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
777 + for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
778 + int page_nid;
779 +
780 ++ /*
781 ++ * memory block could have several absent sections from start.
782 ++ * skip pfn range from absent section
783 ++ */
784 ++ if (!pfn_present(pfn)) {
785 ++ pfn = round_down(pfn + PAGES_PER_SECTION,
786 ++ PAGES_PER_SECTION) - 1;
787 ++ continue;
788 ++ }
789 ++
790 + page_nid = get_nid_for_pfn(pfn);
791 + if (page_nid < 0)
792 + continue;
793 +diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
794 +index ab300ea19434..41f93334cc44 100644
795 +--- a/drivers/crypto/vmx/aes.c
796 ++++ b/drivers/crypto/vmx/aes.c
797 +@@ -80,6 +80,7 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
798 +
799 + pagefault_disable();
800 + enable_kernel_altivec();
801 ++ enable_kernel_vsx();
802 + ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
803 + ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
804 + pagefault_enable();
805 +@@ -97,6 +98,7 @@ static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
806 + } else {
807 + pagefault_disable();
808 + enable_kernel_altivec();
809 ++ enable_kernel_vsx();
810 + aes_p8_encrypt(src, dst, &ctx->enc_key);
811 + pagefault_enable();
812 + }
813 +@@ -111,6 +113,7 @@ static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
814 + } else {
815 + pagefault_disable();
816 + enable_kernel_altivec();
817 ++ enable_kernel_vsx();
818 + aes_p8_decrypt(src, dst, &ctx->dec_key);
819 + pagefault_enable();
820 + }
821 +diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
822 +index 1a559b7dddb5..c8e7f653e5d3 100644
823 +--- a/drivers/crypto/vmx/aes_cbc.c
824 ++++ b/drivers/crypto/vmx/aes_cbc.c
825 +@@ -81,6 +81,7 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
826 +
827 + pagefault_disable();
828 + enable_kernel_altivec();
829 ++ enable_kernel_vsx();
830 + ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
831 + ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
832 + pagefault_enable();
833 +@@ -108,6 +109,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
834 + } else {
835 + pagefault_disable();
836 + enable_kernel_altivec();
837 ++ enable_kernel_vsx();
838 +
839 + blkcipher_walk_init(&walk, dst, src, nbytes);
840 + ret = blkcipher_walk_virt(desc, &walk);
841 +@@ -143,6 +145,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
842 + } else {
843 + pagefault_disable();
844 + enable_kernel_altivec();
845 ++ enable_kernel_vsx();
846 +
847 + blkcipher_walk_init(&walk, dst, src, nbytes);
848 + ret = blkcipher_walk_virt(desc, &walk);
849 +diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
850 +index 96dbee4bf4a6..266e708d63df 100644
851 +--- a/drivers/crypto/vmx/aes_ctr.c
852 ++++ b/drivers/crypto/vmx/aes_ctr.c
853 +@@ -79,6 +79,7 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
854 +
855 + pagefault_disable();
856 + enable_kernel_altivec();
857 ++ enable_kernel_vsx();
858 + ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
859 + pagefault_enable();
860 +
861 +@@ -97,6 +98,7 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
862 +
863 + pagefault_disable();
864 + enable_kernel_altivec();
865 ++ enable_kernel_vsx();
866 + aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key);
867 + pagefault_enable();
868 +
869 +@@ -127,6 +129,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
870 + while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
871 + pagefault_disable();
872 + enable_kernel_altivec();
873 ++ enable_kernel_vsx();
874 + aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, walk.dst.virt.addr,
875 + (nbytes & AES_BLOCK_MASK)/AES_BLOCK_SIZE, &ctx->enc_key, walk.iv);
876 + pagefault_enable();
877 +diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
878 +index d0ffe277af5c..917b3f09e724 100644
879 +--- a/drivers/crypto/vmx/ghash.c
880 ++++ b/drivers/crypto/vmx/ghash.c
881 +@@ -116,6 +116,7 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
882 +
883 + pagefault_disable();
884 + enable_kernel_altivec();
885 ++ enable_kernel_vsx();
886 + enable_kernel_fp();
887 + gcm_init_p8(ctx->htable, (const u64 *) key);
888 + pagefault_enable();
889 +@@ -142,6 +143,7 @@ static int p8_ghash_update(struct shash_desc *desc,
890 + GHASH_DIGEST_SIZE - dctx->bytes);
891 + pagefault_disable();
892 + enable_kernel_altivec();
893 ++ enable_kernel_vsx();
894 + enable_kernel_fp();
895 + gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer,
896 + GHASH_DIGEST_SIZE);
897 +@@ -154,6 +156,7 @@ static int p8_ghash_update(struct shash_desc *desc,
898 + if (len) {
899 + pagefault_disable();
900 + enable_kernel_altivec();
901 ++ enable_kernel_vsx();
902 + enable_kernel_fp();
903 + gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
904 + pagefault_enable();
905 +@@ -182,6 +185,7 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out)
906 + dctx->buffer[i] = 0;
907 + pagefault_disable();
908 + enable_kernel_altivec();
909 ++ enable_kernel_vsx();
910 + enable_kernel_fp();
911 + gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer,
912 + GHASH_DIGEST_SIZE);
913 +diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
914 +index c097d3a82bda..a9b01bcf7d0a 100644
915 +--- a/drivers/gpu/drm/radeon/radeon_combios.c
916 ++++ b/drivers/gpu/drm/radeon/radeon_combios.c
917 +@@ -3387,6 +3387,14 @@ void radeon_combios_asic_init(struct drm_device *dev)
918 + rdev->pdev->subsystem_device == 0x30ae)
919 + return;
920 +
921 ++ /* quirk for rs4xx HP Compaq dc5750 Small Form Factor to make it resume
922 ++ * - it hangs on resume inside the dynclk 1 table.
923 ++ */
924 ++ if (rdev->family == CHIP_RS480 &&
925 ++ rdev->pdev->subsystem_vendor == 0x103c &&
926 ++ rdev->pdev->subsystem_device == 0x280a)
927 ++ return;
928 ++
929 + /* DYN CLK 1 */
930 + table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
931 + if (table)
932 +diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
933 +index b716b0815644..bebf11a6622a 100644
934 +--- a/drivers/infiniband/core/uverbs.h
935 ++++ b/drivers/infiniband/core/uverbs.h
936 +@@ -85,7 +85,7 @@
937 + */
938 +
939 + struct ib_uverbs_device {
940 +- struct kref ref;
941 ++ atomic_t refcount;
942 + int num_comp_vectors;
943 + struct completion comp;
944 + struct device *dev;
945 +@@ -94,6 +94,7 @@ struct ib_uverbs_device {
946 + struct cdev cdev;
947 + struct rb_root xrcd_tree;
948 + struct mutex xrcd_tree_mutex;
949 ++ struct kobject kobj;
950 + };
951 +
952 + struct ib_uverbs_event_file {
953 +diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
954 +index a9f048990dfc..ccc2494b4ea7 100644
955 +--- a/drivers/infiniband/core/uverbs_cmd.c
956 ++++ b/drivers/infiniband/core/uverbs_cmd.c
957 +@@ -2244,6 +2244,12 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
958 + next->send_flags = user_wr->send_flags;
959 +
960 + if (is_ud) {
961 ++ if (next->opcode != IB_WR_SEND &&
962 ++ next->opcode != IB_WR_SEND_WITH_IMM) {
963 ++ ret = -EINVAL;
964 ++ goto out_put;
965 ++ }
966 ++
967 + next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
968 + file->ucontext);
969 + if (!next->wr.ud.ah) {
970 +@@ -2283,9 +2289,11 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
971 + user_wr->wr.atomic.compare_add;
972 + next->wr.atomic.swap = user_wr->wr.atomic.swap;
973 + next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
974 ++ case IB_WR_SEND:
975 + break;
976 + default:
977 +- break;
978 ++ ret = -EINVAL;
979 ++ goto out_put;
980 + }
981 + }
982 +
983 +diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
984 +index 88cce9bb72fe..09686d49d4c1 100644
985 +--- a/drivers/infiniband/core/uverbs_main.c
986 ++++ b/drivers/infiniband/core/uverbs_main.c
987 +@@ -129,14 +129,18 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
988 + static void ib_uverbs_add_one(struct ib_device *device);
989 + static void ib_uverbs_remove_one(struct ib_device *device);
990 +
991 +-static void ib_uverbs_release_dev(struct kref *ref)
992 ++static void ib_uverbs_release_dev(struct kobject *kobj)
993 + {
994 + struct ib_uverbs_device *dev =
995 +- container_of(ref, struct ib_uverbs_device, ref);
996 ++ container_of(kobj, struct ib_uverbs_device, kobj);
997 +
998 +- complete(&dev->comp);
999 ++ kfree(dev);
1000 + }
1001 +
1002 ++static struct kobj_type ib_uverbs_dev_ktype = {
1003 ++ .release = ib_uverbs_release_dev,
1004 ++};
1005 ++
1006 + static void ib_uverbs_release_event_file(struct kref *ref)
1007 + {
1008 + struct ib_uverbs_event_file *file =
1009 +@@ -302,13 +306,19 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
1010 + return context->device->dealloc_ucontext(context);
1011 + }
1012 +
1013 ++static void ib_uverbs_comp_dev(struct ib_uverbs_device *dev)
1014 ++{
1015 ++ complete(&dev->comp);
1016 ++}
1017 ++
1018 + static void ib_uverbs_release_file(struct kref *ref)
1019 + {
1020 + struct ib_uverbs_file *file =
1021 + container_of(ref, struct ib_uverbs_file, ref);
1022 +
1023 + module_put(file->device->ib_dev->owner);
1024 +- kref_put(&file->device->ref, ib_uverbs_release_dev);
1025 ++ if (atomic_dec_and_test(&file->device->refcount))
1026 ++ ib_uverbs_comp_dev(file->device);
1027 +
1028 + kfree(file);
1029 + }
1030 +@@ -742,9 +752,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
1031 + int ret;
1032 +
1033 + dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev);
1034 +- if (dev)
1035 +- kref_get(&dev->ref);
1036 +- else
1037 ++ if (!atomic_inc_not_zero(&dev->refcount))
1038 + return -ENXIO;
1039 +
1040 + if (!try_module_get(dev->ib_dev->owner)) {
1041 +@@ -765,6 +773,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
1042 + mutex_init(&file->mutex);
1043 +
1044 + filp->private_data = file;
1045 ++ kobject_get(&dev->kobj);
1046 +
1047 + return nonseekable_open(inode, filp);
1048 +
1049 +@@ -772,13 +781,16 @@ err_module:
1050 + module_put(dev->ib_dev->owner);
1051 +
1052 + err:
1053 +- kref_put(&dev->ref, ib_uverbs_release_dev);
1054 ++ if (atomic_dec_and_test(&dev->refcount))
1055 ++ ib_uverbs_comp_dev(dev);
1056 ++
1057 + return ret;
1058 + }
1059 +
1060 + static int ib_uverbs_close(struct inode *inode, struct file *filp)
1061 + {
1062 + struct ib_uverbs_file *file = filp->private_data;
1063 ++ struct ib_uverbs_device *dev = file->device;
1064 +
1065 + ib_uverbs_cleanup_ucontext(file, file->ucontext);
1066 +
1067 +@@ -786,6 +798,7 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
1068 + kref_put(&file->async_file->ref, ib_uverbs_release_event_file);
1069 +
1070 + kref_put(&file->ref, ib_uverbs_release_file);
1071 ++ kobject_put(&dev->kobj);
1072 +
1073 + return 0;
1074 + }
1075 +@@ -881,10 +894,11 @@ static void ib_uverbs_add_one(struct ib_device *device)
1076 + if (!uverbs_dev)
1077 + return;
1078 +
1079 +- kref_init(&uverbs_dev->ref);
1080 ++ atomic_set(&uverbs_dev->refcount, 1);
1081 + init_completion(&uverbs_dev->comp);
1082 + uverbs_dev->xrcd_tree = RB_ROOT;
1083 + mutex_init(&uverbs_dev->xrcd_tree_mutex);
1084 ++ kobject_init(&uverbs_dev->kobj, &ib_uverbs_dev_ktype);
1085 +
1086 + spin_lock(&map_lock);
1087 + devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
1088 +@@ -911,6 +925,7 @@ static void ib_uverbs_add_one(struct ib_device *device)
1089 + cdev_init(&uverbs_dev->cdev, NULL);
1090 + uverbs_dev->cdev.owner = THIS_MODULE;
1091 + uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops;
1092 ++ uverbs_dev->cdev.kobj.parent = &uverbs_dev->kobj;
1093 + kobject_set_name(&uverbs_dev->cdev.kobj, "uverbs%d", uverbs_dev->devnum);
1094 + if (cdev_add(&uverbs_dev->cdev, base, 1))
1095 + goto err_cdev;
1096 +@@ -941,9 +956,10 @@ err_cdev:
1097 + clear_bit(devnum, overflow_map);
1098 +
1099 + err:
1100 +- kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
1101 ++ if (atomic_dec_and_test(&uverbs_dev->refcount))
1102 ++ ib_uverbs_comp_dev(uverbs_dev);
1103 + wait_for_completion(&uverbs_dev->comp);
1104 +- kfree(uverbs_dev);
1105 ++ kobject_put(&uverbs_dev->kobj);
1106 + return;
1107 + }
1108 +
1109 +@@ -963,9 +979,10 @@ static void ib_uverbs_remove_one(struct ib_device *device)
1110 + else
1111 + clear_bit(uverbs_dev->devnum - IB_UVERBS_MAX_DEVICES, overflow_map);
1112 +
1113 +- kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
1114 ++ if (atomic_dec_and_test(&uverbs_dev->refcount))
1115 ++ ib_uverbs_comp_dev(uverbs_dev);
1116 + wait_for_completion(&uverbs_dev->comp);
1117 +- kfree(uverbs_dev);
1118 ++ kobject_put(&uverbs_dev->kobj);
1119 + }
1120 +
1121 + static char *uverbs_devnode(struct device *dev, umode_t *mode)
1122 +diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
1123 +index f50a546224ad..33fdd50123f7 100644
1124 +--- a/drivers/infiniband/hw/mlx4/ah.c
1125 ++++ b/drivers/infiniband/hw/mlx4/ah.c
1126 +@@ -148,9 +148,13 @@ int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
1127 + enum rdma_link_layer ll;
1128 +
1129 + memset(ah_attr, 0, sizeof *ah_attr);
1130 +- ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
1131 + ah_attr->port_num = be32_to_cpu(ah->av.ib.port_pd) >> 24;
1132 + ll = rdma_port_get_link_layer(ibah->device, ah_attr->port_num);
1133 ++ if (ll == IB_LINK_LAYER_ETHERNET)
1134 ++ ah_attr->sl = be32_to_cpu(ah->av.eth.sl_tclass_flowlabel) >> 29;
1135 ++ else
1136 ++ ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
1137 ++
1138 + ah_attr->dlid = ll == IB_LINK_LAYER_INFINIBAND ? be16_to_cpu(ah->av.ib.dlid) : 0;
1139 + if (ah->av.ib.stat_rate)
1140 + ah_attr->static_rate = ah->av.ib.stat_rate - MLX4_STAT_RATE_OFFSET;
1141 +diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
1142 +index 0176caa5792c..2857ed89725e 100644
1143 +--- a/drivers/infiniband/hw/mlx4/cq.c
1144 ++++ b/drivers/infiniband/hw/mlx4/cq.c
1145 +@@ -629,7 +629,7 @@ static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries,
1146 + * simulated FLUSH_ERR completions
1147 + */
1148 + list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) {
1149 +- mlx4_ib_qp_sw_comp(qp, num_entries, wc, npolled, 1);
1150 ++ mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 1);
1151 + if (*npolled >= num_entries)
1152 + goto out;
1153 + }
1154 +diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
1155 +index ed327e6c8fdc..a0559a8af4f4 100644
1156 +--- a/drivers/infiniband/hw/mlx4/mcg.c
1157 ++++ b/drivers/infiniband/hw/mlx4/mcg.c
1158 +@@ -206,15 +206,16 @@ static int send_mad_to_wire(struct mlx4_ib_demux_ctx *ctx, struct ib_mad *mad)
1159 + {
1160 + struct mlx4_ib_dev *dev = ctx->dev;
1161 + struct ib_ah_attr ah_attr;
1162 ++ unsigned long flags;
1163 +
1164 +- spin_lock(&dev->sm_lock);
1165 ++ spin_lock_irqsave(&dev->sm_lock, flags);
1166 + if (!dev->sm_ah[ctx->port - 1]) {
1167 + /* port is not yet Active, sm_ah not ready */
1168 +- spin_unlock(&dev->sm_lock);
1169 ++ spin_unlock_irqrestore(&dev->sm_lock, flags);
1170 + return -EAGAIN;
1171 + }
1172 + mlx4_ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr);
1173 +- spin_unlock(&dev->sm_lock);
1174 ++ spin_unlock_irqrestore(&dev->sm_lock, flags);
1175 + return mlx4_ib_send_to_wire(dev, mlx4_master_func_num(dev->dev),
1176 + ctx->port, IB_QPT_GSI, 0, 1, IB_QP1_QKEY,
1177 + &ah_attr, NULL, mad);
1178 +diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
1179 +index 6797108ce873..69fb5ba94d0f 100644
1180 +--- a/drivers/infiniband/hw/mlx4/sysfs.c
1181 ++++ b/drivers/infiniband/hw/mlx4/sysfs.c
1182 +@@ -640,6 +640,8 @@ static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave)
1183 + struct mlx4_port *p;
1184 + int i;
1185 + int ret;
1186 ++ int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port_num) ==
1187 ++ IB_LINK_LAYER_ETHERNET;
1188 +
1189 + p = kzalloc(sizeof *p, GFP_KERNEL);
1190 + if (!p)
1191 +@@ -657,7 +659,8 @@ static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave)
1192 +
1193 + p->pkey_group.name = "pkey_idx";
1194 + p->pkey_group.attrs =
1195 +- alloc_group_attrs(show_port_pkey, store_port_pkey,
1196 ++ alloc_group_attrs(show_port_pkey,
1197 ++ is_eth ? NULL : store_port_pkey,
1198 + dev->dev->caps.pkey_table_len[port_num]);
1199 + if (!p->pkey_group.attrs) {
1200 + ret = -ENOMEM;
1201 +diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
1202 +index 71c593583864..0c52f078759c 100644
1203 +--- a/drivers/infiniband/hw/mlx5/mr.c
1204 ++++ b/drivers/infiniband/hw/mlx5/mr.c
1205 +@@ -1119,19 +1119,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1206 + return &mr->ibmr;
1207 +
1208 + error:
1209 +- /*
1210 +- * Destroy the umem *before* destroying the MR, to ensure we
1211 +- * will not have any in-flight notifiers when destroying the
1212 +- * MR.
1213 +- *
1214 +- * As the MR is completely invalid to begin with, and this
1215 +- * error path is only taken if we can't push the mr entry into
1216 +- * the pagefault tree, this is safe.
1217 +- */
1218 +-
1219 + ib_umem_release(umem);
1220 +- /* Kill the MR, and return an error code. */
1221 +- clean_mr(mr);
1222 + return ERR_PTR(err);
1223 + }
1224 +
1225 +diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
1226 +index ad843c786e72..5afaa218508d 100644
1227 +--- a/drivers/infiniband/hw/qib/qib_keys.c
1228 ++++ b/drivers/infiniband/hw/qib/qib_keys.c
1229 +@@ -86,6 +86,10 @@ int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
1230 + * unrestricted LKEY.
1231 + */
1232 + rkt->gen++;
1233 ++ /*
1234 ++ * bits are capped in qib_verbs.c to insure enough bits
1235 ++ * for generation number
1236 ++ */
1237 + mr->lkey = (r << (32 - ib_qib_lkey_table_size)) |
1238 + ((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen)
1239 + << 8);
1240 +diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
1241 +index 4a3599890ea5..9dd5d9a0556b 100644
1242 +--- a/drivers/infiniband/hw/qib/qib_verbs.c
1243 ++++ b/drivers/infiniband/hw/qib/qib_verbs.c
1244 +@@ -40,6 +40,7 @@
1245 + #include <linux/rculist.h>
1246 + #include <linux/mm.h>
1247 + #include <linux/random.h>
1248 ++#include <linux/vmalloc.h>
1249 +
1250 + #include "qib.h"
1251 + #include "qib_common.h"
1252 +@@ -2089,10 +2090,16 @@ int qib_register_ib_device(struct qib_devdata *dd)
1253 + * the LKEY). The remaining bits act as a generation number or tag.
1254 + */
1255 + spin_lock_init(&dev->lk_table.lock);
1256 ++ /* insure generation is at least 4 bits see keys.c */
1257 ++ if (ib_qib_lkey_table_size > MAX_LKEY_TABLE_BITS) {
1258 ++ qib_dev_warn(dd, "lkey bits %u too large, reduced to %u\n",
1259 ++ ib_qib_lkey_table_size, MAX_LKEY_TABLE_BITS);
1260 ++ ib_qib_lkey_table_size = MAX_LKEY_TABLE_BITS;
1261 ++ }
1262 + dev->lk_table.max = 1 << ib_qib_lkey_table_size;
1263 + lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
1264 + dev->lk_table.table = (struct qib_mregion __rcu **)
1265 +- __get_free_pages(GFP_KERNEL, get_order(lk_tab_size));
1266 ++ vmalloc(lk_tab_size);
1267 + if (dev->lk_table.table == NULL) {
1268 + ret = -ENOMEM;
1269 + goto err_lk;
1270 +@@ -2265,7 +2272,7 @@ err_tx:
1271 + sizeof(struct qib_pio_header),
1272 + dev->pio_hdrs, dev->pio_hdrs_phys);
1273 + err_hdrs:
1274 +- free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size));
1275 ++ vfree(dev->lk_table.table);
1276 + err_lk:
1277 + kfree(dev->qp_table);
1278 + err_qpt:
1279 +@@ -2319,8 +2326,7 @@ void qib_unregister_ib_device(struct qib_devdata *dd)
1280 + sizeof(struct qib_pio_header),
1281 + dev->pio_hdrs, dev->pio_hdrs_phys);
1282 + lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
1283 +- free_pages((unsigned long) dev->lk_table.table,
1284 +- get_order(lk_tab_size));
1285 ++ vfree(dev->lk_table.table);
1286 + kfree(dev->qp_table);
1287 + }
1288 +
1289 +diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
1290 +index bfc8948fdd35..44ca28c83fe6 100644
1291 +--- a/drivers/infiniband/hw/qib/qib_verbs.h
1292 ++++ b/drivers/infiniband/hw/qib/qib_verbs.h
1293 +@@ -647,6 +647,8 @@ struct qib_qpn_table {
1294 + struct qpn_map map[QPNMAP_ENTRIES];
1295 + };
1296 +
1297 ++#define MAX_LKEY_TABLE_BITS 23
1298 ++
1299 + struct qib_lkey_table {
1300 + spinlock_t lock; /* protect changes in this struct */
1301 + u32 next; /* next unused index (speeds search) */
1302 +diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
1303 +index 6a594aac2290..c933d882c35c 100644
1304 +--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
1305 ++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
1306 +@@ -201,6 +201,7 @@ iser_initialize_task_headers(struct iscsi_task *task,
1307 + goto out;
1308 + }
1309 +
1310 ++ tx_desc->mapped = true;
1311 + tx_desc->dma_addr = dma_addr;
1312 + tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
1313 + tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
1314 +@@ -360,16 +361,19 @@ iscsi_iser_task_xmit(struct iscsi_task *task)
1315 + static void iscsi_iser_cleanup_task(struct iscsi_task *task)
1316 + {
1317 + struct iscsi_iser_task *iser_task = task->dd_data;
1318 +- struct iser_tx_desc *tx_desc = &iser_task->desc;
1319 +- struct iser_conn *iser_conn = task->conn->dd_data;
1320 ++ struct iser_tx_desc *tx_desc = &iser_task->desc;
1321 ++ struct iser_conn *iser_conn = task->conn->dd_data;
1322 + struct iser_device *device = iser_conn->ib_conn.device;
1323 +
1324 + /* DEVICE_REMOVAL event might have already released the device */
1325 + if (!device)
1326 + return;
1327 +
1328 +- ib_dma_unmap_single(device->ib_device,
1329 +- tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
1330 ++ if (likely(tx_desc->mapped)) {
1331 ++ ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
1332 ++ ISER_HEADERS_LEN, DMA_TO_DEVICE);
1333 ++ tx_desc->mapped = false;
1334 ++ }
1335 +
1336 + /* mgmt tasks do not need special cleanup */
1337 + if (!task->sc)
1338 +diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
1339 +index 262ba1f8ee50..d2b6caf7694d 100644
1340 +--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
1341 ++++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
1342 +@@ -270,6 +270,7 @@ enum iser_desc_type {
1343 + * sg[1] optionally points to either of immediate data
1344 + * unsolicited data-out or control
1345 + * @num_sge: number sges used on this TX task
1346 ++ * @mapped: Is the task header mapped
1347 + */
1348 + struct iser_tx_desc {
1349 + struct iser_hdr iser_header;
1350 +@@ -278,6 +279,7 @@ struct iser_tx_desc {
1351 + u64 dma_addr;
1352 + struct ib_sge tx_sg[2];
1353 + int num_sge;
1354 ++ bool mapped;
1355 + };
1356 +
1357 + #define ISER_RX_PAD_SIZE (256 - (ISER_RX_PAYLOAD_SIZE + \
1358 +diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
1359 +index 3e2118e8ed87..0a47f42fec24 100644
1360 +--- a/drivers/infiniband/ulp/iser/iser_initiator.c
1361 ++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
1362 +@@ -454,7 +454,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
1363 + unsigned long buf_offset;
1364 + unsigned long data_seg_len;
1365 + uint32_t itt;
1366 +- int err = 0;
1367 ++ int err;
1368 + struct ib_sge *tx_dsg;
1369 +
1370 + itt = (__force uint32_t)hdr->itt;
1371 +@@ -475,7 +475,9 @@ int iser_send_data_out(struct iscsi_conn *conn,
1372 + memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr));
1373 +
1374 + /* build the tx desc */
1375 +- iser_initialize_task_headers(task, tx_desc);
1376 ++ err = iser_initialize_task_headers(task, tx_desc);
1377 ++ if (err)
1378 ++ goto send_data_out_error;
1379 +
1380 + mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT];
1381 + tx_dsg = &tx_desc->tx_sg[1];
1382 +@@ -502,7 +504,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
1383 +
1384 + send_data_out_error:
1385 + kmem_cache_free(ig.desc_cache, tx_desc);
1386 +- iser_err("conn %p failed err %d\n",conn, err);
1387 ++ iser_err("conn %p failed err %d\n", conn, err);
1388 + return err;
1389 + }
1390 +
1391 +diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
1392 +index 75c01b27bd0b..025f93105444 100644
1393 +--- a/drivers/infiniband/ulp/srp/ib_srp.c
1394 ++++ b/drivers/infiniband/ulp/srp/ib_srp.c
1395 +@@ -2761,6 +2761,13 @@ static int srp_sdev_count(struct Scsi_Host *host)
1396 + return c;
1397 + }
1398 +
1399 ++/*
1400 ++ * Return values:
1401 ++ * < 0 upon failure. Caller is responsible for SRP target port cleanup.
1402 ++ * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
1403 ++ * removal has been scheduled.
1404 ++ * 0 and target->state != SRP_TARGET_REMOVED upon success.
1405 ++ */
1406 + static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
1407 + {
1408 + struct srp_rport_identifiers ids;
1409 +@@ -3266,7 +3273,7 @@ static ssize_t srp_create_target(struct device *dev,
1410 + srp_free_ch_ib(target, ch);
1411 + srp_free_req_data(target, ch);
1412 + target->ch_count = ch - target->ch;
1413 +- break;
1414 ++ goto connected;
1415 + }
1416 + }
1417 +
1418 +@@ -3276,6 +3283,7 @@ static ssize_t srp_create_target(struct device *dev,
1419 + node_idx++;
1420 + }
1421 +
1422 ++connected:
1423 + target->scsi_host->nr_hw_queues = target->ch_count;
1424 +
1425 + ret = srp_add_target(host, target);
1426 +@@ -3298,6 +3306,8 @@ out:
1427 + mutex_unlock(&host->add_target_mutex);
1428 +
1429 + scsi_host_put(target->scsi_host);
1430 ++ if (ret < 0)
1431 ++ scsi_host_put(target->scsi_host);
1432 +
1433 + return ret;
1434 +
1435 +diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
1436 +index a18f41b89b6a..2ae522f0d2b2 100644
1437 +--- a/drivers/input/evdev.c
1438 ++++ b/drivers/input/evdev.c
1439 +@@ -290,19 +290,14 @@ static int evdev_flush(struct file *file, fl_owner_t id)
1440 + {
1441 + struct evdev_client *client = file->private_data;
1442 + struct evdev *evdev = client->evdev;
1443 +- int retval;
1444 +
1445 +- retval = mutex_lock_interruptible(&evdev->mutex);
1446 +- if (retval)
1447 +- return retval;
1448 ++ mutex_lock(&evdev->mutex);
1449 +
1450 +- if (!evdev->exist || client->revoked)
1451 +- retval = -ENODEV;
1452 +- else
1453 +- retval = input_flush_device(&evdev->handle, file);
1454 ++ if (evdev->exist && !client->revoked)
1455 ++ input_flush_device(&evdev->handle, file);
1456 +
1457 + mutex_unlock(&evdev->mutex);
1458 +- return retval;
1459 ++ return 0;
1460 + }
1461 +
1462 + static void evdev_free(struct device *dev)
1463 +diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
1464 +index abeedc9a78c2..2570f2a25dc4 100644
1465 +--- a/drivers/iommu/fsl_pamu.c
1466 ++++ b/drivers/iommu/fsl_pamu.c
1467 +@@ -41,7 +41,6 @@ struct pamu_isr_data {
1468 +
1469 + static struct paace *ppaact;
1470 + static struct paace *spaact;
1471 +-static struct ome *omt __initdata;
1472 +
1473 + /*
1474 + * Table for matching compatible strings, for device tree
1475 +@@ -50,7 +49,7 @@ static struct ome *omt __initdata;
1476 + * SOCs. For the older SOCs "fsl,qoriq-device-config-1.0"
1477 + * string would be used.
1478 + */
1479 +-static const struct of_device_id guts_device_ids[] __initconst = {
1480 ++static const struct of_device_id guts_device_ids[] = {
1481 + { .compatible = "fsl,qoriq-device-config-1.0", },
1482 + { .compatible = "fsl,qoriq-device-config-2.0", },
1483 + {}
1484 +@@ -599,7 +598,7 @@ found_cpu_node:
1485 + * Memory accesses to QMAN and BMAN private memory need not be coherent, so
1486 + * clear the PAACE entry coherency attribute for them.
1487 + */
1488 +-static void __init setup_qbman_paace(struct paace *ppaace, int paace_type)
1489 ++static void setup_qbman_paace(struct paace *ppaace, int paace_type)
1490 + {
1491 + switch (paace_type) {
1492 + case QMAN_PAACE:
1493 +@@ -629,7 +628,7 @@ static void __init setup_qbman_paace(struct paace *ppaace, int paace_type)
1494 + * this table to translate device transaction to appropriate corenet
1495 + * transaction.
1496 + */
1497 +-static void __init setup_omt(struct ome *omt)
1498 ++static void setup_omt(struct ome *omt)
1499 + {
1500 + struct ome *ome;
1501 +
1502 +@@ -666,7 +665,7 @@ static void __init setup_omt(struct ome *omt)
1503 + * Get the maximum number of PAACT table entries
1504 + * and subwindows supported by PAMU
1505 + */
1506 +-static void __init get_pamu_cap_values(unsigned long pamu_reg_base)
1507 ++static void get_pamu_cap_values(unsigned long pamu_reg_base)
1508 + {
1509 + u32 pc_val;
1510 +
1511 +@@ -676,9 +675,9 @@ static void __init get_pamu_cap_values(unsigned long pamu_reg_base)
1512 + }
1513 +
1514 + /* Setup PAMU registers pointing to PAACT, SPAACT and OMT */
1515 +-static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
1516 +- phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
1517 +- phys_addr_t omt_phys)
1518 ++static int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
1519 ++ phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
1520 ++ phys_addr_t omt_phys)
1521 + {
1522 + u32 *pc;
1523 + struct pamu_mmap_regs *pamu_regs;
1524 +@@ -720,7 +719,7 @@ static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu
1525 + }
1526 +
1527 + /* Enable all device LIODNS */
1528 +-static void __init setup_liodns(void)
1529 ++static void setup_liodns(void)
1530 + {
1531 + int i, len;
1532 + struct paace *ppaace;
1533 +@@ -846,7 +845,7 @@ struct ccsr_law {
1534 + /*
1535 + * Create a coherence subdomain for a given memory block.
1536 + */
1537 +-static int __init create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
1538 ++static int create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
1539 + {
1540 + struct device_node *np;
1541 + const __be32 *iprop;
1542 +@@ -988,7 +987,7 @@ error:
1543 + static const struct {
1544 + u32 svr;
1545 + u32 port_id;
1546 +-} port_id_map[] __initconst = {
1547 ++} port_id_map[] = {
1548 + {(SVR_P2040 << 8) | 0x10, 0xFF000000}, /* P2040 1.0 */
1549 + {(SVR_P2040 << 8) | 0x11, 0xFF000000}, /* P2040 1.1 */
1550 + {(SVR_P2041 << 8) | 0x10, 0xFF000000}, /* P2041 1.0 */
1551 +@@ -1006,7 +1005,7 @@ static const struct {
1552 +
1553 + #define SVR_SECURITY 0x80000 /* The Security (E) bit */
1554 +
1555 +-static int __init fsl_pamu_probe(struct platform_device *pdev)
1556 ++static int fsl_pamu_probe(struct platform_device *pdev)
1557 + {
1558 + struct device *dev = &pdev->dev;
1559 + void __iomem *pamu_regs = NULL;
1560 +@@ -1022,6 +1021,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
1561 + int irq;
1562 + phys_addr_t ppaact_phys;
1563 + phys_addr_t spaact_phys;
1564 ++ struct ome *omt;
1565 + phys_addr_t omt_phys;
1566 + size_t mem_size = 0;
1567 + unsigned int order = 0;
1568 +@@ -1200,7 +1200,7 @@ error:
1569 + return ret;
1570 + }
1571 +
1572 +-static struct platform_driver fsl_of_pamu_driver __initdata = {
1573 ++static struct platform_driver fsl_of_pamu_driver = {
1574 + .driver = {
1575 + .name = "fsl-of-pamu",
1576 + },
1577 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
1578 +index c87c4b1bfc00..c23427951ec1 100644
1579 +--- a/drivers/iommu/intel-iommu.c
1580 ++++ b/drivers/iommu/intel-iommu.c
1581 +@@ -681,6 +681,7 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
1582 + struct context_entry *context;
1583 + u64 *entry;
1584 +
1585 ++ entry = &root->lo;
1586 + if (ecs_enabled(iommu)) {
1587 + if (devfn >= 0x80) {
1588 + devfn -= 0x80;
1589 +@@ -688,7 +689,6 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
1590 + }
1591 + devfn *= 2;
1592 + }
1593 +- entry = &root->lo;
1594 + if (*entry & 1)
1595 + context = phys_to_virt(*entry & VTD_PAGE_MASK);
1596 + else {
1597 +diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
1598 +index 4e460216bd16..e29d5d7fe220 100644
1599 +--- a/drivers/iommu/io-pgtable-arm.c
1600 ++++ b/drivers/iommu/io-pgtable-arm.c
1601 +@@ -200,6 +200,10 @@ typedef u64 arm_lpae_iopte;
1602 +
1603 + static bool selftest_running = false;
1604 +
1605 ++static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
1606 ++ unsigned long iova, size_t size, int lvl,
1607 ++ arm_lpae_iopte *ptep);
1608 ++
1609 + static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
1610 + unsigned long iova, phys_addr_t paddr,
1611 + arm_lpae_iopte prot, int lvl,
1612 +@@ -207,10 +211,21 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
1613 + {
1614 + arm_lpae_iopte pte = prot;
1615 +
1616 +- /* We require an unmap first */
1617 + if (iopte_leaf(*ptep, lvl)) {
1618 ++ /* We require an unmap first */
1619 + WARN_ON(!selftest_running);
1620 + return -EEXIST;
1621 ++ } else if (iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
1622 ++ /*
1623 ++ * We need to unmap and free the old table before
1624 ++ * overwriting it with a block entry.
1625 ++ */
1626 ++ arm_lpae_iopte *tblp;
1627 ++ size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
1628 ++
1629 ++ tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
1630 ++ if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
1631 ++ return -EINVAL;
1632 + }
1633 +
1634 + if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
1635 +diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
1636 +index c845d99ecf6b..e0ff5f4d7fed 100644
1637 +--- a/drivers/iommu/tegra-smmu.c
1638 ++++ b/drivers/iommu/tegra-smmu.c
1639 +@@ -26,6 +26,7 @@ struct tegra_smmu {
1640 + const struct tegra_smmu_soc *soc;
1641 +
1642 + unsigned long pfn_mask;
1643 ++ unsigned long tlb_mask;
1644 +
1645 + unsigned long *asids;
1646 + struct mutex lock;
1647 +@@ -65,7 +66,8 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
1648 + #define SMMU_TLB_CONFIG 0x14
1649 + #define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
1650 + #define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
1651 +-#define SMMU_TLB_CONFIG_ACTIVE_LINES(x) ((x) & 0x3f)
1652 ++#define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
1653 ++ ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
1654 +
1655 + #define SMMU_PTC_CONFIG 0x18
1656 + #define SMMU_PTC_CONFIG_ENABLE (1 << 29)
1657 +@@ -716,6 +718,9 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
1658 + smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
1659 + dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
1660 + mc->soc->num_address_bits, smmu->pfn_mask);
1661 ++ smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1;
1662 ++ dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
1663 ++ smmu->tlb_mask);
1664 +
1665 + value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
1666 +
1667 +@@ -725,7 +730,7 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
1668 + smmu_writel(smmu, value, SMMU_PTC_CONFIG);
1669 +
1670 + value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
1671 +- SMMU_TLB_CONFIG_ACTIVE_LINES(0x20);
1672 ++ SMMU_TLB_CONFIG_ACTIVE_LINES(smmu);
1673 +
1674 + if (soc->supports_round_robin_arbitration)
1675 + value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
1676 +diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
1677 +index 8c91fd5eb6fd..3ac9c4194814 100644
1678 +--- a/drivers/isdn/gigaset/ser-gigaset.c
1679 ++++ b/drivers/isdn/gigaset/ser-gigaset.c
1680 +@@ -524,9 +524,18 @@ gigaset_tty_open(struct tty_struct *tty)
1681 + cs->hw.ser->tty = tty;
1682 + atomic_set(&cs->hw.ser->refcnt, 1);
1683 + init_completion(&cs->hw.ser->dead_cmp);
1684 +-
1685 + tty->disc_data = cs;
1686 +
1687 ++ /* Set the amount of data we're willing to receive per call
1688 ++ * from the hardware driver to half of the input buffer size
1689 ++ * to leave some reserve.
1690 ++ * Note: We don't do flow control towards the hardware driver.
1691 ++ * If more data is received than will fit into the input buffer,
1692 ++ * it will be dropped and an error will be logged. This should
1693 ++ * never happen as the device is slow and the buffer size ample.
1694 ++ */
1695 ++ tty->receive_room = RBUFSIZE/2;
1696 ++
1697 + /* OK.. Initialization of the datastructures and the HW is done.. Now
1698 + * startup system and notify the LL that we are ready to run
1699 + */
1700 +diff --git a/drivers/md/md.c b/drivers/md/md.c
1701 +index e4621511d118..e8c44fcb1ad1 100644
1702 +--- a/drivers/md/md.c
1703 ++++ b/drivers/md/md.c
1704 +@@ -5365,6 +5365,8 @@ static void __md_stop(struct mddev *mddev)
1705 + {
1706 + struct md_personality *pers = mddev->pers;
1707 + mddev_detach(mddev);
1708 ++ /* Ensure ->event_work is done */
1709 ++ flush_workqueue(md_misc_wq);
1710 + spin_lock(&mddev->lock);
1711 + mddev->ready = 0;
1712 + mddev->pers = NULL;
1713 +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
1714 +index f55c3f35b746..fe0122771642 100644
1715 +--- a/drivers/md/raid10.c
1716 ++++ b/drivers/md/raid10.c
1717 +@@ -3566,6 +3566,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
1718 + /* far_copies must be 1 */
1719 + conf->prev.stride = conf->dev_sectors;
1720 + }
1721 ++ conf->reshape_safe = conf->reshape_progress;
1722 + spin_lock_init(&conf->device_lock);
1723 + INIT_LIST_HEAD(&conf->retry_list);
1724 +
1725 +@@ -3770,7 +3771,6 @@ static int run(struct mddev *mddev)
1726 + }
1727 + conf->offset_diff = min_offset_diff;
1728 +
1729 +- conf->reshape_safe = conf->reshape_progress;
1730 + clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
1731 + clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
1732 + set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
1733 +@@ -4113,6 +4113,7 @@ static int raid10_start_reshape(struct mddev *mddev)
1734 + conf->reshape_progress = size;
1735 + } else
1736 + conf->reshape_progress = 0;
1737 ++ conf->reshape_safe = conf->reshape_progress;
1738 + spin_unlock_irq(&conf->device_lock);
1739 +
1740 + if (mddev->delta_disks && mddev->bitmap) {
1741 +@@ -4180,6 +4181,7 @@ abort:
1742 + rdev->new_data_offset = rdev->data_offset;
1743 + smp_wmb();
1744 + conf->reshape_progress = MaxSector;
1745 ++ conf->reshape_safe = MaxSector;
1746 + mddev->reshape_position = MaxSector;
1747 + spin_unlock_irq(&conf->device_lock);
1748 + return ret;
1749 +@@ -4534,6 +4536,7 @@ static void end_reshape(struct r10conf *conf)
1750 + md_finish_reshape(conf->mddev);
1751 + smp_wmb();
1752 + conf->reshape_progress = MaxSector;
1753 ++ conf->reshape_safe = MaxSector;
1754 + spin_unlock_irq(&conf->device_lock);
1755 +
1756 + /* read-ahead size must cover two whole stripes, which is
1757 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
1758 +index b6793d2e051f..23af6772f146 100644
1759 +--- a/drivers/md/raid5.c
1760 ++++ b/drivers/md/raid5.c
1761 +@@ -2151,6 +2151,9 @@ static int resize_stripes(struct r5conf *conf, int newsize)
1762 + if (!sc)
1763 + return -ENOMEM;
1764 +
1765 ++ /* Need to ensure auto-resizing doesn't interfere */
1766 ++ mutex_lock(&conf->cache_size_mutex);
1767 ++
1768 + for (i = conf->max_nr_stripes; i; i--) {
1769 + nsh = alloc_stripe(sc, GFP_KERNEL);
1770 + if (!nsh)
1771 +@@ -2167,6 +2170,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
1772 + kmem_cache_free(sc, nsh);
1773 + }
1774 + kmem_cache_destroy(sc);
1775 ++ mutex_unlock(&conf->cache_size_mutex);
1776 + return -ENOMEM;
1777 + }
1778 + /* Step 2 - Must use GFP_NOIO now.
1779 +@@ -2213,6 +2217,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
1780 + } else
1781 + err = -ENOMEM;
1782 +
1783 ++ mutex_unlock(&conf->cache_size_mutex);
1784 + /* Step 4, return new stripes to service */
1785 + while(!list_empty(&newstripes)) {
1786 + nsh = list_entry(newstripes.next, struct stripe_head, lru);
1787 +@@ -2240,7 +2245,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
1788 + static int drop_one_stripe(struct r5conf *conf)
1789 + {
1790 + struct stripe_head *sh;
1791 +- int hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS;
1792 ++ int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK;
1793 +
1794 + spin_lock_irq(conf->hash_locks + hash);
1795 + sh = get_free_stripe(conf, hash);
1796 +@@ -5846,12 +5851,14 @@ static void raid5d(struct md_thread *thread)
1797 + pr_debug("%d stripes handled\n", handled);
1798 +
1799 + spin_unlock_irq(&conf->device_lock);
1800 +- if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state)) {
1801 ++ if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) &&
1802 ++ mutex_trylock(&conf->cache_size_mutex)) {
1803 + grow_one_stripe(conf, __GFP_NOWARN);
1804 + /* Set flag even if allocation failed. This helps
1805 + * slow down allocation requests when mem is short
1806 + */
1807 + set_bit(R5_DID_ALLOC, &conf->cache_state);
1808 ++ mutex_unlock(&conf->cache_size_mutex);
1809 + }
1810 +
1811 + async_tx_issue_pending_all();
1812 +@@ -5883,18 +5890,22 @@ raid5_set_cache_size(struct mddev *mddev, int size)
1813 + return -EINVAL;
1814 +
1815 + conf->min_nr_stripes = size;
1816 ++ mutex_lock(&conf->cache_size_mutex);
1817 + while (size < conf->max_nr_stripes &&
1818 + drop_one_stripe(conf))
1819 + ;
1820 ++ mutex_unlock(&conf->cache_size_mutex);
1821 +
1822 +
1823 + err = md_allow_write(mddev);
1824 + if (err)
1825 + return err;
1826 +
1827 ++ mutex_lock(&conf->cache_size_mutex);
1828 + while (size > conf->max_nr_stripes)
1829 + if (!grow_one_stripe(conf, GFP_KERNEL))
1830 + break;
1831 ++ mutex_unlock(&conf->cache_size_mutex);
1832 +
1833 + return 0;
1834 + }
1835 +@@ -6360,11 +6371,19 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink,
1836 + struct shrink_control *sc)
1837 + {
1838 + struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
1839 +- int ret = 0;
1840 +- while (ret < sc->nr_to_scan) {
1841 +- if (drop_one_stripe(conf) == 0)
1842 +- return SHRINK_STOP;
1843 +- ret++;
1844 ++ unsigned long ret = SHRINK_STOP;
1845 ++
1846 ++ if (mutex_trylock(&conf->cache_size_mutex)) {
1847 ++ ret= 0;
1848 ++ while (ret < sc->nr_to_scan &&
1849 ++ conf->max_nr_stripes > conf->min_nr_stripes) {
1850 ++ if (drop_one_stripe(conf) == 0) {
1851 ++ ret = SHRINK_STOP;
1852 ++ break;
1853 ++ }
1854 ++ ret++;
1855 ++ }
1856 ++ mutex_unlock(&conf->cache_size_mutex);
1857 + }
1858 + return ret;
1859 + }
1860 +@@ -6433,6 +6452,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
1861 + goto abort;
1862 + spin_lock_init(&conf->device_lock);
1863 + seqcount_init(&conf->gen_lock);
1864 ++ mutex_init(&conf->cache_size_mutex);
1865 + init_waitqueue_head(&conf->wait_for_stripe);
1866 + init_waitqueue_head(&conf->wait_for_overlap);
1867 + INIT_LIST_HEAD(&conf->handle_list);
1868 +diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
1869 +index 896d603ad0da..03472fbbd882 100644
1870 +--- a/drivers/md/raid5.h
1871 ++++ b/drivers/md/raid5.h
1872 +@@ -482,7 +482,8 @@ struct r5conf {
1873 + */
1874 + int active_name;
1875 + char cache_name[2][32];
1876 +- struct kmem_cache *slab_cache; /* for allocating stripes */
1877 ++ struct kmem_cache *slab_cache; /* for allocating stripes */
1878 ++ struct mutex cache_size_mutex; /* Protect changes to cache size */
1879 +
1880 + int seq_flush, seq_write;
1881 + int quiesce;
1882 +diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
1883 +index a30cc2f7e4f1..ddf59ee5ca40 100644
1884 +--- a/drivers/media/platform/am437x/am437x-vpfe.c
1885 ++++ b/drivers/media/platform/am437x/am437x-vpfe.c
1886 +@@ -1185,14 +1185,24 @@ static int vpfe_initialize_device(struct vpfe_device *vpfe)
1887 + static int vpfe_release(struct file *file)
1888 + {
1889 + struct vpfe_device *vpfe = video_drvdata(file);
1890 ++ bool fh_singular;
1891 + int ret;
1892 +
1893 + mutex_lock(&vpfe->lock);
1894 +
1895 +- if (v4l2_fh_is_singular_file(file))
1896 +- vpfe_ccdc_close(&vpfe->ccdc, vpfe->pdev);
1897 ++ /* Save the singular status before we call the clean-up helper */
1898 ++ fh_singular = v4l2_fh_is_singular_file(file);
1899 ++
1900 ++ /* the release helper will cleanup any on-going streaming */
1901 + ret = _vb2_fop_release(file, NULL);
1902 +
1903 ++ /*
1904 ++ * If this was the last open file.
1905 ++ * Then de-initialize hw module.
1906 ++ */
1907 ++ if (fh_singular)
1908 ++ vpfe_ccdc_close(&vpfe->ccdc, vpfe->pdev);
1909 ++
1910 + mutex_unlock(&vpfe->lock);
1911 +
1912 + return ret;
1913 +@@ -1577,7 +1587,7 @@ static int vpfe_s_fmt(struct file *file, void *priv,
1914 + return -EBUSY;
1915 + }
1916 +
1917 +- ret = vpfe_try_fmt(file, priv, fmt);
1918 ++ ret = vpfe_try_fmt(file, priv, &format);
1919 + if (ret)
1920 + return ret;
1921 +
1922 +diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
1923 +index 18d0a871747f..947d8be7b245 100644
1924 +--- a/drivers/media/platform/omap3isp/isp.c
1925 ++++ b/drivers/media/platform/omap3isp/isp.c
1926 +@@ -829,14 +829,14 @@ static int isp_pipeline_link_notify(struct media_link *link, u32 flags,
1927 + int ret;
1928 +
1929 + if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
1930 +- !(link->flags & MEDIA_LNK_FL_ENABLED)) {
1931 ++ !(flags & MEDIA_LNK_FL_ENABLED)) {
1932 + /* Powering off entities is assumed to never fail. */
1933 + isp_pipeline_pm_power(source, -sink_use);
1934 + isp_pipeline_pm_power(sink, -source_use);
1935 + return 0;
1936 + }
1937 +
1938 +- if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
1939 ++ if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH &&
1940 + (flags & MEDIA_LNK_FL_ENABLED)) {
1941 +
1942 + ret = isp_pipeline_pm_power(source, sink_use);
1943 +diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
1944 +index f8c5e47a30aa..0aba9ff92102 100644
1945 +--- a/drivers/media/rc/rc-main.c
1946 ++++ b/drivers/media/rc/rc-main.c
1947 +@@ -1191,9 +1191,6 @@ static int rc_dev_uevent(struct device *device, struct kobj_uevent_env *env)
1948 + {
1949 + struct rc_dev *dev = to_rc_dev(device);
1950 +
1951 +- if (!dev || !dev->input_dev)
1952 +- return -ENODEV;
1953 +-
1954 + if (dev->rc_map.name)
1955 + ADD_HOTPLUG_VAR("NAME=%s", dev->rc_map.name);
1956 + if (dev->driver_name)
1957 +diff --git a/drivers/memory/tegra/tegra114.c b/drivers/memory/tegra/tegra114.c
1958 +index 511e9a25c151..16c4d26f51e7 100644
1959 +--- a/drivers/memory/tegra/tegra114.c
1960 ++++ b/drivers/memory/tegra/tegra114.c
1961 +@@ -935,6 +935,7 @@ static const struct tegra_smmu_soc tegra114_smmu_soc = {
1962 + .num_swgroups = ARRAY_SIZE(tegra114_swgroups),
1963 + .supports_round_robin_arbitration = false,
1964 + .supports_request_limit = false,
1965 ++ .num_tlb_lines = 32,
1966 + .num_asids = 4,
1967 + .ops = &tegra114_smmu_ops,
1968 + };
1969 +diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c
1970 +index 278d40b854c1..b153d0b732cf 100644
1971 +--- a/drivers/memory/tegra/tegra124.c
1972 ++++ b/drivers/memory/tegra/tegra124.c
1973 +@@ -981,6 +981,7 @@ static const struct tegra_smmu_soc tegra124_smmu_soc = {
1974 + .num_swgroups = ARRAY_SIZE(tegra124_swgroups),
1975 + .supports_round_robin_arbitration = true,
1976 + .supports_request_limit = true,
1977 ++ .num_tlb_lines = 32,
1978 + .num_asids = 128,
1979 + .ops = &tegra124_smmu_ops,
1980 + };
1981 +diff --git a/drivers/memory/tegra/tegra30.c b/drivers/memory/tegra/tegra30.c
1982 +index 71fe9376fe53..f422b18f45f3 100644
1983 +--- a/drivers/memory/tegra/tegra30.c
1984 ++++ b/drivers/memory/tegra/tegra30.c
1985 +@@ -957,6 +957,7 @@ static const struct tegra_smmu_soc tegra30_smmu_soc = {
1986 + .num_swgroups = ARRAY_SIZE(tegra30_swgroups),
1987 + .supports_round_robin_arbitration = false,
1988 + .supports_request_limit = false,
1989 ++ .num_tlb_lines = 16,
1990 + .num_asids = 4,
1991 + .ops = &tegra30_smmu_ops,
1992 + };
1993 +diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
1994 +index 1ef01647265f..4f1b0bdb9cf8 100644
1995 +--- a/drivers/misc/cxl/pci.c
1996 ++++ b/drivers/misc/cxl/pci.c
1997 +@@ -778,14 +778,9 @@ int cxl_reset(struct cxl *adapter)
1998 + {
1999 + struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
2000 + int rc;
2001 +- int i;
2002 +- u32 val;
2003 +
2004 + dev_info(&dev->dev, "CXL reset\n");
2005 +
2006 +- for (i = 0; i < adapter->slices; i++)
2007 +- cxl_remove_afu(adapter->afu[i]);
2008 +-
2009 + /* pcie_warm_reset requests a fundamental pci reset which includes a
2010 + * PERST assert/deassert. PERST triggers a loading of the image
2011 + * if "user" or "factory" is selected in sysfs */
2012 +@@ -794,20 +789,6 @@ int cxl_reset(struct cxl *adapter)
2013 + return rc;
2014 + }
2015 +
2016 +- /* the PERST done above fences the PHB. So, reset depends on EEH
2017 +- * to unbind the driver, tell Sapphire to reinit the PHB, and rebind
2018 +- * the driver. Do an mmio read explictly to ensure EEH notices the
2019 +- * fenced PHB. Retry for a few seconds before giving up. */
2020 +- i = 0;
2021 +- while (((val = mmio_read32be(adapter->p1_mmio)) != 0xffffffff) &&
2022 +- (i < 5)) {
2023 +- msleep(500);
2024 +- i++;
2025 +- }
2026 +-
2027 +- if (val != 0xffffffff)
2028 +- dev_err(&dev->dev, "cxl: PERST failed to trigger EEH\n");
2029 +-
2030 + return rc;
2031 + }
2032 +
2033 +@@ -1062,8 +1043,6 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
2034 + int slice;
2035 + int rc;
2036 +
2037 +- pci_dev_get(dev);
2038 +-
2039 + if (cxl_verbose)
2040 + dump_cxl_config_space(dev);
2041 +
2042 +diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
2043 +index 92e7671426eb..588fb7908642 100644
2044 +--- a/drivers/mmc/core/core.c
2045 ++++ b/drivers/mmc/core/core.c
2046 +@@ -330,8 +330,10 @@ EXPORT_SYMBOL(mmc_start_bkops);
2047 + */
2048 + static void mmc_wait_data_done(struct mmc_request *mrq)
2049 + {
2050 +- mrq->host->context_info.is_done_rcv = true;
2051 +- wake_up_interruptible(&mrq->host->context_info.wait);
2052 ++ struct mmc_context_info *context_info = &mrq->host->context_info;
2053 ++
2054 ++ context_info->is_done_rcv = true;
2055 ++ wake_up_interruptible(&context_info->wait);
2056 + }
2057 +
2058 + static void mmc_wait_done(struct mmc_request *mrq)
2059 +diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
2060 +index 7a3fc16d0a6c..53cfc7cedefe 100644
2061 +--- a/drivers/mmc/host/sdhci-pci.c
2062 ++++ b/drivers/mmc/host/sdhci-pci.c
2063 +@@ -549,6 +549,7 @@ static int jmicron_resume(struct sdhci_pci_chip *chip)
2064 + static const struct sdhci_pci_fixes sdhci_o2 = {
2065 + .probe = sdhci_pci_o2_probe,
2066 + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
2067 ++ .quirks2 = SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD,
2068 + .probe_slot = sdhci_pci_o2_probe_slot,
2069 + .resume = sdhci_pci_o2_resume,
2070 + };
2071 +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
2072 +index bec8a307f8cd..fd41b91436ec 100644
2073 +--- a/drivers/mmc/host/sdhci.c
2074 ++++ b/drivers/mmc/host/sdhci.c
2075 +@@ -1146,6 +1146,7 @@ static u16 sdhci_get_preset_value(struct sdhci_host *host)
2076 + preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
2077 + break;
2078 + case MMC_TIMING_UHS_DDR50:
2079 ++ case MMC_TIMING_MMC_DDR52:
2080 + preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
2081 + break;
2082 + case MMC_TIMING_MMC_HS400:
2083 +@@ -1598,7 +1599,8 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
2084 + (ios->timing == MMC_TIMING_UHS_SDR25) ||
2085 + (ios->timing == MMC_TIMING_UHS_SDR50) ||
2086 + (ios->timing == MMC_TIMING_UHS_SDR104) ||
2087 +- (ios->timing == MMC_TIMING_UHS_DDR50))) {
2088 ++ (ios->timing == MMC_TIMING_UHS_DDR50) ||
2089 ++ (ios->timing == MMC_TIMING_MMC_DDR52))) {
2090 + u16 preset;
2091 +
2092 + sdhci_enable_preset_value(host, true);
2093 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
2094 +index d5fe5d5f490f..16d87bf8ac3c 100644
2095 +--- a/drivers/net/bonding/bond_main.c
2096 ++++ b/drivers/net/bonding/bond_main.c
2097 +@@ -625,6 +625,23 @@ static void bond_set_dev_addr(struct net_device *bond_dev,
2098 + call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
2099 + }
2100 +
2101 ++static struct slave *bond_get_old_active(struct bonding *bond,
2102 ++ struct slave *new_active)
2103 ++{
2104 ++ struct slave *slave;
2105 ++ struct list_head *iter;
2106 ++
2107 ++ bond_for_each_slave(bond, slave, iter) {
2108 ++ if (slave == new_active)
2109 ++ continue;
2110 ++
2111 ++ if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
2112 ++ return slave;
2113 ++ }
2114 ++
2115 ++ return NULL;
2116 ++}
2117 ++
2118 + /* bond_do_fail_over_mac
2119 + *
2120 + * Perform special MAC address swapping for fail_over_mac settings
2121 +@@ -652,6 +669,9 @@ static void bond_do_fail_over_mac(struct bonding *bond,
2122 + if (!new_active)
2123 + return;
2124 +
2125 ++ if (!old_active)
2126 ++ old_active = bond_get_old_active(bond, new_active);
2127 ++
2128 + if (old_active) {
2129 + ether_addr_copy(tmp_mac, new_active->dev->dev_addr);
2130 + ether_addr_copy(saddr.sa_data,
2131 +@@ -1902,6 +1922,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
2132 + bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
2133 + netdev_info(bond_dev, "Destroying bond %s\n",
2134 + bond_dev->name);
2135 ++ bond_remove_proc_entry(bond);
2136 + unregister_netdevice(bond_dev);
2137 + }
2138 + return ret;
2139 +diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
2140 +index 069952fa5d64..0d8af5bb5907 100644
2141 +--- a/drivers/net/ethernet/broadcom/tg3.c
2142 ++++ b/drivers/net/ethernet/broadcom/tg3.c
2143 +@@ -10757,7 +10757,7 @@ static ssize_t tg3_show_temp(struct device *dev,
2144 + tg3_ape_scratchpad_read(tp, &temperature, attr->index,
2145 + sizeof(temperature));
2146 + spin_unlock_bh(&tp->lock);
2147 +- return sprintf(buf, "%u\n", temperature);
2148 ++ return sprintf(buf, "%u\n", temperature * 1000);
2149 + }
2150 +
2151 +
2152 +diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
2153 +index caae6cb2bc1a..a1c30ee60888 100644
2154 +--- a/drivers/net/ethernet/brocade/bna/bnad.c
2155 ++++ b/drivers/net/ethernet/brocade/bna/bnad.c
2156 +@@ -675,6 +675,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
2157 + if (!next_cmpl->valid)
2158 + break;
2159 + }
2160 ++ packets++;
2161 +
2162 + /* TODO: BNA_CQ_EF_LOCAL ? */
2163 + if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
2164 +@@ -691,7 +692,6 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
2165 + else
2166 + bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
2167 +
2168 +- packets++;
2169 + rcb->rxq->rx_packets++;
2170 + rcb->rxq->rx_bytes += totlen;
2171 + ccb->bytes_per_intr += totlen;
2172 +diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
2173 +index c754b2027281..c9da1b5d4804 100644
2174 +--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
2175 ++++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
2176 +@@ -216,7 +216,7 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
2177 +
2178 + static inline bool fm10k_page_is_reserved(struct page *page)
2179 + {
2180 +- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
2181 ++ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
2182 + }
2183 +
2184 + static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
2185 +diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
2186 +index c2bd4f98a837..212d668dabb3 100644
2187 +--- a/drivers/net/ethernet/intel/igb/igb.h
2188 ++++ b/drivers/net/ethernet/intel/igb/igb.h
2189 +@@ -540,6 +540,7 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
2190 + struct sk_buff *skb);
2191 + int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
2192 + int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
2193 ++void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
2194 + #ifdef CONFIG_IGB_HWMON
2195 + void igb_sysfs_exit(struct igb_adapter *adapter);
2196 + int igb_sysfs_init(struct igb_adapter *adapter);
2197 +diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
2198 +index d5673eb90c54..0afc0913e5b9 100644
2199 +--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
2200 ++++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
2201 +@@ -2991,6 +2991,7 @@ static int igb_set_channels(struct net_device *netdev,
2202 + {
2203 + struct igb_adapter *adapter = netdev_priv(netdev);
2204 + unsigned int count = ch->combined_count;
2205 ++ unsigned int max_combined = 0;
2206 +
2207 + /* Verify they are not requesting separate vectors */
2208 + if (!count || ch->rx_count || ch->tx_count)
2209 +@@ -3001,11 +3002,13 @@ static int igb_set_channels(struct net_device *netdev,
2210 + return -EINVAL;
2211 +
2212 + /* Verify the number of channels doesn't exceed hw limits */
2213 +- if (count > igb_max_channels(adapter))
2214 ++ max_combined = igb_max_channels(adapter);
2215 ++ if (count > max_combined)
2216 + return -EINVAL;
2217 +
2218 + if (count != adapter->rss_queues) {
2219 + adapter->rss_queues = count;
2220 ++ igb_set_flag_queue_pairs(adapter, max_combined);
2221 +
2222 + /* Hardware has to reinitialize queues and interrupts to
2223 + * match the new configuration.
2224 +diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
2225 +index a0a9b1fcb5e8..4f6bf996851e 100644
2226 +--- a/drivers/net/ethernet/intel/igb/igb_main.c
2227 ++++ b/drivers/net/ethernet/intel/igb/igb_main.c
2228 +@@ -1205,10 +1205,14 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
2229 +
2230 + /* allocate q_vector and rings */
2231 + q_vector = adapter->q_vector[v_idx];
2232 +- if (!q_vector)
2233 ++ if (!q_vector) {
2234 + q_vector = kzalloc(size, GFP_KERNEL);
2235 +- else
2236 ++ } else if (size > ksize(q_vector)) {
2237 ++ kfree_rcu(q_vector, rcu);
2238 ++ q_vector = kzalloc(size, GFP_KERNEL);
2239 ++ } else {
2240 + memset(q_vector, 0, size);
2241 ++ }
2242 + if (!q_vector)
2243 + return -ENOMEM;
2244 +
2245 +@@ -2901,6 +2905,14 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter)
2246 +
2247 + adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
2248 +
2249 ++ igb_set_flag_queue_pairs(adapter, max_rss_queues);
2250 ++}
2251 ++
2252 ++void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
2253 ++ const u32 max_rss_queues)
2254 ++{
2255 ++ struct e1000_hw *hw = &adapter->hw;
2256 ++
2257 + /* Determine if we need to pair queues. */
2258 + switch (hw->mac.type) {
2259 + case e1000_82575:
2260 +@@ -6584,7 +6596,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
2261 +
2262 + static inline bool igb_page_is_reserved(struct page *page)
2263 + {
2264 +- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
2265 ++ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
2266 + }
2267 +
2268 + static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
2269 +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2270 +index 5be12a00e1f4..463ff47200f1 100644
2271 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2272 ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2273 +@@ -1829,7 +1829,7 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
2274 +
2275 + static inline bool ixgbe_page_is_reserved(struct page *page)
2276 + {
2277 +- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
2278 ++ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
2279 + }
2280 +
2281 + /**
2282 +diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
2283 +index e71cdde9cb01..1d7b00b038a2 100644
2284 +--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
2285 ++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
2286 +@@ -765,7 +765,7 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
2287 +
2288 + static inline bool ixgbevf_page_is_reserved(struct page *page)
2289 + {
2290 +- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
2291 ++ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
2292 + }
2293 +
2294 + /**
2295 +diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
2296 +index 2619c9fbf42d..983b1d51244d 100644
2297 +--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
2298 ++++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
2299 +@@ -573,7 +573,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
2300 + continue;
2301 + mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
2302 + __func__, i, port);
2303 +- s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
2304 ++ s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
2305 + if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
2306 + eqe->event.port_change.port =
2307 + cpu_to_be32(
2308 +@@ -608,7 +608,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
2309 + continue;
2310 + if (i == mlx4_master_func_num(dev))
2311 + continue;
2312 +- s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
2313 ++ s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
2314 + if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
2315 + eqe->event.port_change.port =
2316 + cpu_to_be32(
2317 +diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
2318 +index cf98cc9bbc8d..73b6fc21ea00 100644
2319 +--- a/drivers/net/ethernet/rocker/rocker.c
2320 ++++ b/drivers/net/ethernet/rocker/rocker.c
2321 +@@ -4587,6 +4587,7 @@ static void rocker_remove_ports(struct rocker *rocker)
2322 + rocker_port = rocker->ports[i];
2323 + rocker_port_ig_tbl(rocker_port, ROCKER_OP_FLAG_REMOVE);
2324 + unregister_netdev(rocker_port->dev);
2325 ++ free_netdev(rocker_port->dev);
2326 + }
2327 + kfree(rocker->ports);
2328 + }
2329 +diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
2330 +index ad3996038018..799c2929c536 100644
2331 +--- a/drivers/net/ethernet/stmicro/stmmac/descs.h
2332 ++++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
2333 +@@ -158,6 +158,8 @@ struct dma_desc {
2334 + u32 buffer2_size:13;
2335 + u32 reserved4:3;
2336 + } etx; /* -- enhanced -- */
2337 ++
2338 ++ u64 all_flags;
2339 + } des01;
2340 + unsigned int des2;
2341 + unsigned int des3;
2342 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
2343 +index 6249a4ec08f0..573708123338 100644
2344 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
2345 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
2346 +@@ -38,7 +38,6 @@ struct rk_priv_data {
2347 + bool clock_input;
2348 +
2349 + struct clk *clk_mac;
2350 +- struct clk *clk_mac_pll;
2351 + struct clk *gmac_clkin;
2352 + struct clk *mac_clk_rx;
2353 + struct clk *mac_clk_tx;
2354 +@@ -208,7 +207,7 @@ static int gmac_clk_init(struct rk_priv_data *bsp_priv)
2355 + dev_info(dev, "%s: clock input from PHY\n", __func__);
2356 + } else {
2357 + if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
2358 +- clk_set_rate(bsp_priv->clk_mac_pll, 50000000);
2359 ++ clk_set_rate(bsp_priv->clk_mac, 50000000);
2360 + }
2361 +
2362 + return 0;
2363 +diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
2364 +index 1e2bcf5f89e1..7d944449f5ef 100644
2365 +--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
2366 ++++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
2367 +@@ -240,6 +240,7 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
2368 + static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
2369 + int mode, int end)
2370 + {
2371 ++ p->des01.all_flags = 0;
2372 + p->des01.erx.own = 1;
2373 + p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
2374 +
2375 +@@ -254,7 +255,7 @@ static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
2376 +
2377 + static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
2378 + {
2379 +- p->des01.etx.own = 0;
2380 ++ p->des01.all_flags = 0;
2381 + if (mode == STMMAC_CHAIN_MODE)
2382 + ehn_desc_tx_set_on_chain(p, end);
2383 + else
2384 +diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
2385 +index 35ad4f427ae2..48c3456445b2 100644
2386 +--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
2387 ++++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
2388 +@@ -123,6 +123,7 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
2389 + static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
2390 + int end)
2391 + {
2392 ++ p->des01.all_flags = 0;
2393 + p->des01.rx.own = 1;
2394 + p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
2395 +
2396 +@@ -137,7 +138,7 @@ static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
2397 +
2398 + static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
2399 + {
2400 +- p->des01.tx.own = 0;
2401 ++ p->des01.all_flags = 0;
2402 + if (mode == STMMAC_CHAIN_MODE)
2403 + ndesc_tx_set_on_chain(p, end);
2404 + else
2405 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2406 +index 2c5ce2baca87..c274cdc5df1e 100644
2407 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2408 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2409 +@@ -829,8 +829,11 @@ static int stmmac_init_phy(struct net_device *dev)
2410 +
2411 + phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, interface);
2412 +
2413 +- if (IS_ERR(phydev)) {
2414 ++ if (IS_ERR_OR_NULL(phydev)) {
2415 + pr_err("%s: Could not attach to PHY\n", dev->name);
2416 ++ if (!phydev)
2417 ++ return -ENODEV;
2418 ++
2419 + return PTR_ERR(phydev);
2420 + }
2421 +
2422 +@@ -1189,41 +1192,41 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv)
2423 + goto err_tx_skbuff;
2424 +
2425 + if (priv->extend_desc) {
2426 +- priv->dma_erx = dma_alloc_coherent(priv->device, rxsize *
2427 +- sizeof(struct
2428 +- dma_extended_desc),
2429 +- &priv->dma_rx_phy,
2430 +- GFP_KERNEL);
2431 ++ priv->dma_erx = dma_zalloc_coherent(priv->device, rxsize *
2432 ++ sizeof(struct
2433 ++ dma_extended_desc),
2434 ++ &priv->dma_rx_phy,
2435 ++ GFP_KERNEL);
2436 + if (!priv->dma_erx)
2437 + goto err_dma;
2438 +
2439 +- priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
2440 +- sizeof(struct
2441 +- dma_extended_desc),
2442 +- &priv->dma_tx_phy,
2443 +- GFP_KERNEL);
2444 ++ priv->dma_etx = dma_zalloc_coherent(priv->device, txsize *
2445 ++ sizeof(struct
2446 ++ dma_extended_desc),
2447 ++ &priv->dma_tx_phy,
2448 ++ GFP_KERNEL);
2449 + if (!priv->dma_etx) {
2450 + dma_free_coherent(priv->device, priv->dma_rx_size *
2451 +- sizeof(struct dma_extended_desc),
2452 +- priv->dma_erx, priv->dma_rx_phy);
2453 ++ sizeof(struct dma_extended_desc),
2454 ++ priv->dma_erx, priv->dma_rx_phy);
2455 + goto err_dma;
2456 + }
2457 + } else {
2458 +- priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
2459 +- sizeof(struct dma_desc),
2460 +- &priv->dma_rx_phy,
2461 +- GFP_KERNEL);
2462 ++ priv->dma_rx = dma_zalloc_coherent(priv->device, rxsize *
2463 ++ sizeof(struct dma_desc),
2464 ++ &priv->dma_rx_phy,
2465 ++ GFP_KERNEL);
2466 + if (!priv->dma_rx)
2467 + goto err_dma;
2468 +
2469 +- priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
2470 +- sizeof(struct dma_desc),
2471 +- &priv->dma_tx_phy,
2472 +- GFP_KERNEL);
2473 ++ priv->dma_tx = dma_zalloc_coherent(priv->device, txsize *
2474 ++ sizeof(struct dma_desc),
2475 ++ &priv->dma_tx_phy,
2476 ++ GFP_KERNEL);
2477 + if (!priv->dma_tx) {
2478 + dma_free_coherent(priv->device, priv->dma_rx_size *
2479 +- sizeof(struct dma_desc),
2480 +- priv->dma_rx, priv->dma_rx_phy);
2481 ++ sizeof(struct dma_desc),
2482 ++ priv->dma_rx, priv->dma_rx_phy);
2483 + goto err_dma;
2484 + }
2485 + }
2486 +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
2487 +index 63c7810e1545..7fbca37a1adf 100644
2488 +--- a/drivers/net/virtio_net.c
2489 ++++ b/drivers/net/virtio_net.c
2490 +@@ -1828,7 +1828,8 @@ static int virtnet_probe(struct virtio_device *vdev)
2491 + else
2492 + vi->hdr_len = sizeof(struct virtio_net_hdr);
2493 +
2494 +- if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT))
2495 ++ if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
2496 ++ virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
2497 + vi->any_header_sg = true;
2498 +
2499 + if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
2500 +diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
2501 +index 23806c243a53..fd4a5353d216 100644
2502 +--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
2503 ++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
2504 +@@ -321,6 +321,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
2505 + {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
2506 + {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
2507 + {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
2508 ++ {RTL_USB_DEVICE(0x0846, 0x9043, rtl92cu_hal_cfg)}, /*NG WNA1000Mv2*/
2509 + {RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
2510 + {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
2511 + {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
2512 +diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
2513 +index 57966e3c8e8d..3fa2fb7c8e4e 100644
2514 +--- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
2515 ++++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
2516 +@@ -2180,7 +2180,7 @@ static int _rtl8821ae_set_media_status(struct ieee80211_hw *hw,
2517 +
2518 + rtl_write_byte(rtlpriv, MSR, bt_msr);
2519 + rtlpriv->cfg->ops->led_control(hw, ledaction);
2520 +- if ((bt_msr & 0xfc) == MSR_AP)
2521 ++ if ((bt_msr & MSR_MASK) == MSR_AP)
2522 + rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
2523 + else
2524 + rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
2525 +diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h b/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h
2526 +index 53668fc8f23e..1d6110f9c1fb 100644
2527 +--- a/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h
2528 ++++ b/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h
2529 +@@ -429,6 +429,7 @@
2530 + #define MSR_ADHOC 0x01
2531 + #define MSR_INFRA 0x02
2532 + #define MSR_AP 0x03
2533 ++#define MSR_MASK 0x03
2534 +
2535 + #define RRSR_RSC_OFFSET 21
2536 + #define RRSR_SHORT_OFFSET 23
2537 +diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
2538 +index 0d2594395ffb..0866c5dfdf87 100644
2539 +--- a/drivers/net/xen-netback/netback.c
2540 ++++ b/drivers/net/xen-netback/netback.c
2541 +@@ -1571,13 +1571,13 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
2542 + smp_rmb();
2543 +
2544 + while (dc != dp) {
2545 +- BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS);
2546 ++ BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
2547 + pending_idx =
2548 + queue->dealloc_ring[pending_index(dc++)];
2549 +
2550 +- pending_idx_release[gop-queue->tx_unmap_ops] =
2551 ++ pending_idx_release[gop - queue->tx_unmap_ops] =
2552 + pending_idx;
2553 +- queue->pages_to_unmap[gop-queue->tx_unmap_ops] =
2554 ++ queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
2555 + queue->mmap_pages[pending_idx];
2556 + gnttab_set_unmap_op(gop,
2557 + idx_to_kaddr(queue, pending_idx),
2558 +diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c
2559 +index d251f7229c4e..051286562fab 100644
2560 +--- a/drivers/nfc/st21nfca/st21nfca.c
2561 ++++ b/drivers/nfc/st21nfca/st21nfca.c
2562 +@@ -148,14 +148,14 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
2563 + ST21NFCA_DEVICE_MGNT_GATE,
2564 + ST21NFCA_DEVICE_MGNT_PIPE);
2565 + if (r < 0)
2566 +- goto free_info;
2567 ++ return r;
2568 +
2569 + /* Get pipe list */
2570 + r = nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE,
2571 + ST21NFCA_DM_GETINFO, pipe_list, sizeof(pipe_list),
2572 + &skb_pipe_list);
2573 + if (r < 0)
2574 +- goto free_info;
2575 ++ return r;
2576 +
2577 + /* Complete the existing gate_pipe table */
2578 + for (i = 0; i < skb_pipe_list->len; i++) {
2579 +@@ -181,6 +181,7 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
2580 + info->src_host_id != ST21NFCA_ESE_HOST_ID) {
2581 + pr_err("Unexpected apdu_reader pipe on host %x\n",
2582 + info->src_host_id);
2583 ++ kfree_skb(skb_pipe_info);
2584 + continue;
2585 + }
2586 +
2587 +@@ -200,6 +201,7 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
2588 + hdev->pipes[st21nfca_gates[j].pipe].dest_host =
2589 + info->src_host_id;
2590 + }
2591 ++ kfree_skb(skb_pipe_info);
2592 + }
2593 +
2594 + /*
2595 +@@ -214,13 +216,12 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
2596 + st21nfca_gates[i].gate,
2597 + st21nfca_gates[i].pipe);
2598 + if (r < 0)
2599 +- goto free_info;
2600 ++ goto free_list;
2601 + }
2602 + }
2603 +
2604 + memcpy(hdev->init_data.gates, st21nfca_gates, sizeof(st21nfca_gates));
2605 +-free_info:
2606 +- kfree_skb(skb_pipe_info);
2607 ++free_list:
2608 + kfree_skb(skb_pipe_list);
2609 + return r;
2610 + }
2611 +diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
2612 +index cde35c5d0191..d91f721a05b6 100644
2613 +--- a/drivers/of/fdt.c
2614 ++++ b/drivers/of/fdt.c
2615 +@@ -955,7 +955,9 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
2616 + }
2617 +
2618 + #ifdef CONFIG_HAVE_MEMBLOCK
2619 +-#define MAX_PHYS_ADDR ((phys_addr_t)~0)
2620 ++#ifndef MAX_MEMBLOCK_ADDR
2621 ++#define MAX_MEMBLOCK_ADDR ((phys_addr_t)~0)
2622 ++#endif
2623 +
2624 + void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
2625 + {
2626 +@@ -972,16 +974,16 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
2627 + }
2628 + size &= PAGE_MASK;
2629 +
2630 +- if (base > MAX_PHYS_ADDR) {
2631 ++ if (base > MAX_MEMBLOCK_ADDR) {
2632 + pr_warning("Ignoring memory block 0x%llx - 0x%llx\n",
2633 + base, base + size);
2634 + return;
2635 + }
2636 +
2637 +- if (base + size - 1 > MAX_PHYS_ADDR) {
2638 ++ if (base + size - 1 > MAX_MEMBLOCK_ADDR) {
2639 + pr_warning("Ignoring memory range 0x%llx - 0x%llx\n",
2640 +- ((u64)MAX_PHYS_ADDR) + 1, base + size);
2641 +- size = MAX_PHYS_ADDR - base + 1;
2642 ++ ((u64)MAX_MEMBLOCK_ADDR) + 1, base + size);
2643 ++ size = MAX_MEMBLOCK_ADDR - base + 1;
2644 + }
2645 +
2646 + if (base + size < phys_offset) {
2647 +diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
2648 +index dceb9ddfd99a..a32c1f6c252c 100644
2649 +--- a/drivers/parisc/lba_pci.c
2650 ++++ b/drivers/parisc/lba_pci.c
2651 +@@ -1556,8 +1556,11 @@ lba_driver_probe(struct parisc_device *dev)
2652 + if (lba_dev->hba.lmmio_space.flags)
2653 + pci_add_resource_offset(&resources, &lba_dev->hba.lmmio_space,
2654 + lba_dev->hba.lmmio_space_offset);
2655 +- if (lba_dev->hba.gmmio_space.flags)
2656 +- pci_add_resource(&resources, &lba_dev->hba.gmmio_space);
2657 ++ if (lba_dev->hba.gmmio_space.flags) {
2658 ++ /* pci_add_resource(&resources, &lba_dev->hba.gmmio_space); */
2659 ++ pr_warn("LBA: Not registering GMMIO space %pR\n",
2660 ++ &lba_dev->hba.gmmio_space);
2661 ++ }
2662 +
2663 + pci_add_resource(&resources, &lba_dev->hba.bus_num);
2664 +
2665 +diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
2666 +index 944f50015ed0..73de4efcbe6e 100644
2667 +--- a/drivers/pci/Kconfig
2668 ++++ b/drivers/pci/Kconfig
2669 +@@ -2,7 +2,7 @@
2670 + # PCI configuration
2671 + #
2672 + config PCI_BUS_ADDR_T_64BIT
2673 +- def_bool y if (ARCH_DMA_ADDR_T_64BIT || (64BIT && !PARISC))
2674 ++ def_bool y if (ARCH_DMA_ADDR_T_64BIT || 64BIT)
2675 + depends on PCI
2676 +
2677 + config PCI_MSI
2678 +diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
2679 +index 2f797cb7e205..774781450885 100644
2680 +--- a/drivers/pinctrl/pinctrl-at91.c
2681 ++++ b/drivers/pinctrl/pinctrl-at91.c
2682 +@@ -320,6 +320,9 @@ static const struct pinctrl_ops at91_pctrl_ops = {
2683 + static void __iomem *pin_to_controller(struct at91_pinctrl *info,
2684 + unsigned int bank)
2685 + {
2686 ++ if (!gpio_chips[bank])
2687 ++ return NULL;
2688 ++
2689 + return gpio_chips[bank]->regbase;
2690 + }
2691 +
2692 +@@ -729,6 +732,10 @@ static int at91_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
2693 + pin = &pins_conf[i];
2694 + at91_pin_dbg(info->dev, pin);
2695 + pio = pin_to_controller(info, pin->bank);
2696 ++
2697 ++ if (!pio)
2698 ++ continue;
2699 ++
2700 + mask = pin_to_mask(pin->pin);
2701 + at91_mux_disable_interrupt(pio, mask);
2702 + switch (pin->mux) {
2703 +@@ -848,6 +855,10 @@ static int at91_pinconf_get(struct pinctrl_dev *pctldev,
2704 + *config = 0;
2705 + dev_dbg(info->dev, "%s:%d, pin_id=%d", __func__, __LINE__, pin_id);
2706 + pio = pin_to_controller(info, pin_to_bank(pin_id));
2707 ++
2708 ++ if (!pio)
2709 ++ return -EINVAL;
2710 ++
2711 + pin = pin_id % MAX_NB_GPIO_PER_BANK;
2712 +
2713 + if (at91_mux_get_multidrive(pio, pin))
2714 +@@ -889,6 +900,10 @@ static int at91_pinconf_set(struct pinctrl_dev *pctldev,
2715 + "%s:%d, pin_id=%d, config=0x%lx",
2716 + __func__, __LINE__, pin_id, config);
2717 + pio = pin_to_controller(info, pin_to_bank(pin_id));
2718 ++
2719 ++ if (!pio)
2720 ++ return -EINVAL;
2721 ++
2722 + pin = pin_id % MAX_NB_GPIO_PER_BANK;
2723 + mask = pin_to_mask(pin);
2724 +
2725 +diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
2726 +index cb7cd8d79329..cd78f1166b33 100644
2727 +--- a/drivers/platform/x86/ideapad-laptop.c
2728 ++++ b/drivers/platform/x86/ideapad-laptop.c
2729 +@@ -852,6 +852,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
2730 + },
2731 + },
2732 + {
2733 ++ .ident = "Lenovo Yoga 3 14",
2734 ++ .matches = {
2735 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2736 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 3 14"),
2737 ++ },
2738 ++ },
2739 ++ {
2740 + .ident = "Lenovo Yoga 3 Pro 1370",
2741 + .matches = {
2742 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2743 +diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c
2744 +index 4337c3bc6ace..afea84c7a155 100644
2745 +--- a/drivers/rtc/rtc-abx80x.c
2746 ++++ b/drivers/rtc/rtc-abx80x.c
2747 +@@ -28,7 +28,7 @@
2748 + #define ABX8XX_REG_WD 0x07
2749 +
2750 + #define ABX8XX_REG_CTRL1 0x10
2751 +-#define ABX8XX_CTRL_WRITE BIT(1)
2752 ++#define ABX8XX_CTRL_WRITE BIT(0)
2753 + #define ABX8XX_CTRL_12_24 BIT(6)
2754 +
2755 + #define ABX8XX_REG_CFG_KEY 0x1f
2756 +diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
2757 +index 76cbad7a99d3..c5a2523b0185 100644
2758 +--- a/drivers/rtc/rtc-s3c.c
2759 ++++ b/drivers/rtc/rtc-s3c.c
2760 +@@ -39,6 +39,7 @@ struct s3c_rtc {
2761 + void __iomem *base;
2762 + struct clk *rtc_clk;
2763 + struct clk *rtc_src_clk;
2764 ++ bool clk_disabled;
2765 +
2766 + struct s3c_rtc_data *data;
2767 +
2768 +@@ -71,9 +72,12 @@ static void s3c_rtc_enable_clk(struct s3c_rtc *info)
2769 + unsigned long irq_flags;
2770 +
2771 + spin_lock_irqsave(&info->alarm_clk_lock, irq_flags);
2772 +- clk_enable(info->rtc_clk);
2773 +- if (info->data->needs_src_clk)
2774 +- clk_enable(info->rtc_src_clk);
2775 ++ if (info->clk_disabled) {
2776 ++ clk_enable(info->rtc_clk);
2777 ++ if (info->data->needs_src_clk)
2778 ++ clk_enable(info->rtc_src_clk);
2779 ++ info->clk_disabled = false;
2780 ++ }
2781 + spin_unlock_irqrestore(&info->alarm_clk_lock, irq_flags);
2782 + }
2783 +
2784 +@@ -82,9 +86,12 @@ static void s3c_rtc_disable_clk(struct s3c_rtc *info)
2785 + unsigned long irq_flags;
2786 +
2787 + spin_lock_irqsave(&info->alarm_clk_lock, irq_flags);
2788 +- if (info->data->needs_src_clk)
2789 +- clk_disable(info->rtc_src_clk);
2790 +- clk_disable(info->rtc_clk);
2791 ++ if (!info->clk_disabled) {
2792 ++ if (info->data->needs_src_clk)
2793 ++ clk_disable(info->rtc_src_clk);
2794 ++ clk_disable(info->rtc_clk);
2795 ++ info->clk_disabled = true;
2796 ++ }
2797 + spin_unlock_irqrestore(&info->alarm_clk_lock, irq_flags);
2798 + }
2799 +
2800 +@@ -128,6 +135,11 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)
2801 +
2802 + s3c_rtc_disable_clk(info);
2803 +
2804 ++ if (enabled)
2805 ++ s3c_rtc_enable_clk(info);
2806 ++ else
2807 ++ s3c_rtc_disable_clk(info);
2808 ++
2809 + return 0;
2810 + }
2811 +
2812 +diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
2813 +index 8c70d785ba73..ab60287ee72d 100644
2814 +--- a/drivers/rtc/rtc-s5m.c
2815 ++++ b/drivers/rtc/rtc-s5m.c
2816 +@@ -635,6 +635,16 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info)
2817 + case S2MPS13X:
2818 + data[0] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
2819 + ret = regmap_write(info->regmap, info->regs->ctrl, data[0]);
2820 ++ if (ret < 0)
2821 ++ break;
2822 ++
2823 ++ /*
2824 ++ * Should set WUDR & (RUDR or AUDR) bits to high after writing
2825 ++ * RTC_CTRL register like writing Alarm registers. We can't find
2826 ++ * the description from datasheet but vendor code does that
2827 ++ * really.
2828 ++ */
2829 ++ ret = s5m8767_rtc_set_alarm_reg(info);
2830 + break;
2831 +
2832 + default:
2833 +diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
2834 +index 94e909c5a503..00d18c2bdb0f 100644
2835 +--- a/fs/btrfs/transaction.c
2836 ++++ b/fs/btrfs/transaction.c
2837 +@@ -1875,8 +1875,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
2838 + spin_unlock(&root->fs_info->trans_lock);
2839 +
2840 + wait_for_commit(root, prev_trans);
2841 ++ ret = prev_trans->aborted;
2842 +
2843 + btrfs_put_transaction(prev_trans);
2844 ++ if (ret)
2845 ++ goto cleanup_transaction;
2846 + } else {
2847 + spin_unlock(&root->fs_info->trans_lock);
2848 + }
2849 +diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
2850 +index 8b7898b7670f..64a9bca976d0 100644
2851 +--- a/fs/cifs/ioctl.c
2852 ++++ b/fs/cifs/ioctl.c
2853 +@@ -67,6 +67,12 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
2854 + goto out_drop_write;
2855 + }
2856 +
2857 ++ if (src_file.file->f_op->unlocked_ioctl != cifs_ioctl) {
2858 ++ rc = -EBADF;
2859 ++ cifs_dbg(VFS, "src file seems to be from a different filesystem type\n");
2860 ++ goto out_fput;
2861 ++ }
2862 ++
2863 + if ((!src_file.file->private_data) || (!dst_file->private_data)) {
2864 + rc = -EBADF;
2865 + cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
2866 +diff --git a/fs/coredump.c b/fs/coredump.c
2867 +index bbbe139ab280..8dd099dc5f9b 100644
2868 +--- a/fs/coredump.c
2869 ++++ b/fs/coredump.c
2870 +@@ -506,10 +506,10 @@ void do_coredump(const siginfo_t *siginfo)
2871 + const struct cred *old_cred;
2872 + struct cred *cred;
2873 + int retval = 0;
2874 +- int flag = 0;
2875 + int ispipe;
2876 + struct files_struct *displaced;
2877 +- bool need_nonrelative = false;
2878 ++ /* require nonrelative corefile path and be extra careful */
2879 ++ bool need_suid_safe = false;
2880 + bool core_dumped = false;
2881 + static atomic_t core_dump_count = ATOMIC_INIT(0);
2882 + struct coredump_params cprm = {
2883 +@@ -543,9 +543,8 @@ void do_coredump(const siginfo_t *siginfo)
2884 + */
2885 + if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) {
2886 + /* Setuid core dump mode */
2887 +- flag = O_EXCL; /* Stop rewrite attacks */
2888 + cred->fsuid = GLOBAL_ROOT_UID; /* Dump root private */
2889 +- need_nonrelative = true;
2890 ++ need_suid_safe = true;
2891 + }
2892 +
2893 + retval = coredump_wait(siginfo->si_signo, &core_state);
2894 +@@ -626,7 +625,7 @@ void do_coredump(const siginfo_t *siginfo)
2895 + if (cprm.limit < binfmt->min_coredump)
2896 + goto fail_unlock;
2897 +
2898 +- if (need_nonrelative && cn.corename[0] != '/') {
2899 ++ if (need_suid_safe && cn.corename[0] != '/') {
2900 + printk(KERN_WARNING "Pid %d(%s) can only dump core "\
2901 + "to fully qualified path!\n",
2902 + task_tgid_vnr(current), current->comm);
2903 +@@ -634,8 +633,35 @@ void do_coredump(const siginfo_t *siginfo)
2904 + goto fail_unlock;
2905 + }
2906 +
2907 ++ /*
2908 ++ * Unlink the file if it exists unless this is a SUID
2909 ++ * binary - in that case, we're running around with root
2910 ++ * privs and don't want to unlink another user's coredump.
2911 ++ */
2912 ++ if (!need_suid_safe) {
2913 ++ mm_segment_t old_fs;
2914 ++
2915 ++ old_fs = get_fs();
2916 ++ set_fs(KERNEL_DS);
2917 ++ /*
2918 ++ * If it doesn't exist, that's fine. If there's some
2919 ++ * other problem, we'll catch it at the filp_open().
2920 ++ */
2921 ++ (void) sys_unlink((const char __user *)cn.corename);
2922 ++ set_fs(old_fs);
2923 ++ }
2924 ++
2925 ++ /*
2926 ++ * There is a race between unlinking and creating the
2927 ++ * file, but if that causes an EEXIST here, that's
2928 ++ * fine - another process raced with us while creating
2929 ++ * the corefile, and the other process won. To userspace,
2930 ++ * what matters is that at least one of the two processes
2931 ++ * writes its coredump successfully, not which one.
2932 ++ */
2933 + cprm.file = filp_open(cn.corename,
2934 +- O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
2935 ++ O_CREAT | 2 | O_NOFOLLOW |
2936 ++ O_LARGEFILE | O_EXCL,
2937 + 0600);
2938 + if (IS_ERR(cprm.file))
2939 + goto fail_unlock;
2940 +@@ -652,11 +678,15 @@ void do_coredump(const siginfo_t *siginfo)
2941 + if (!S_ISREG(inode->i_mode))
2942 + goto close_fail;
2943 + /*
2944 +- * Dont allow local users get cute and trick others to coredump
2945 +- * into their pre-created files.
2946 ++ * Don't dump core if the filesystem changed owner or mode
2947 ++ * of the file during file creation. This is an issue when
2948 ++ * a process dumps core while its cwd is e.g. on a vfat
2949 ++ * filesystem.
2950 + */
2951 + if (!uid_eq(inode->i_uid, current_fsuid()))
2952 + goto close_fail;
2953 ++ if ((inode->i_mode & 0677) != 0600)
2954 ++ goto close_fail;
2955 + if (!(cprm.file->f_mode & FMODE_CAN_WRITE))
2956 + goto close_fail;
2957 + if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file))
2958 +diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c
2959 +index 8db0b464483f..63cd2c147221 100644
2960 +--- a/fs/ecryptfs/dentry.c
2961 ++++ b/fs/ecryptfs/dentry.c
2962 +@@ -45,20 +45,20 @@
2963 + static int ecryptfs_d_revalidate(struct dentry *dentry, unsigned int flags)
2964 + {
2965 + struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
2966 +- int rc;
2967 +-
2968 +- if (!(lower_dentry->d_flags & DCACHE_OP_REVALIDATE))
2969 +- return 1;
2970 ++ int rc = 1;
2971 +
2972 + if (flags & LOOKUP_RCU)
2973 + return -ECHILD;
2974 +
2975 +- rc = lower_dentry->d_op->d_revalidate(lower_dentry, flags);
2976 ++ if (lower_dentry->d_flags & DCACHE_OP_REVALIDATE)
2977 ++ rc = lower_dentry->d_op->d_revalidate(lower_dentry, flags);
2978 ++
2979 + if (d_really_is_positive(dentry)) {
2980 +- struct inode *lower_inode =
2981 +- ecryptfs_inode_to_lower(d_inode(dentry));
2982 ++ struct inode *inode = d_inode(dentry);
2983 +
2984 +- fsstack_copy_attr_all(d_inode(dentry), lower_inode);
2985 ++ fsstack_copy_attr_all(inode, ecryptfs_inode_to_lower(inode));
2986 ++ if (!inode->i_nlink)
2987 ++ return 0;
2988 + }
2989 + return rc;
2990 + }
2991 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
2992 +index 6b4eb94b04a5..ff89971e3ee0 100644
2993 +--- a/fs/ext4/super.c
2994 ++++ b/fs/ext4/super.c
2995 +@@ -324,6 +324,22 @@ static void save_error_info(struct super_block *sb, const char *func,
2996 + ext4_commit_super(sb, 1);
2997 + }
2998 +
2999 ++/*
3000 ++ * The del_gendisk() function uninitializes the disk-specific data
3001 ++ * structures, including the bdi structure, without telling anyone
3002 ++ * else. Once this happens, any attempt to call mark_buffer_dirty()
3003 ++ * (for example, by ext4_commit_super), will cause a kernel OOPS.
3004 ++ * This is a kludge to prevent these oops until we can put in a proper
3005 ++ * hook in del_gendisk() to inform the VFS and file system layers.
3006 ++ */
3007 ++static int block_device_ejected(struct super_block *sb)
3008 ++{
3009 ++ struct inode *bd_inode = sb->s_bdev->bd_inode;
3010 ++ struct backing_dev_info *bdi = inode_to_bdi(bd_inode);
3011 ++
3012 ++ return bdi->dev == NULL;
3013 ++}
3014 ++
3015 + static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
3016 + {
3017 + struct super_block *sb = journal->j_private;
3018 +@@ -4591,7 +4607,7 @@ static int ext4_commit_super(struct super_block *sb, int sync)
3019 + struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
3020 + int error = 0;
3021 +
3022 +- if (!sbh)
3023 ++ if (!sbh || block_device_ejected(sb))
3024 + return error;
3025 + if (buffer_write_io_error(sbh)) {
3026 + /*
3027 +@@ -4807,10 +4823,11 @@ static int ext4_freeze(struct super_block *sb)
3028 + error = jbd2_journal_flush(journal);
3029 + if (error < 0)
3030 + goto out;
3031 ++
3032 ++ /* Journal blocked and flushed, clear needs_recovery flag. */
3033 ++ EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
3034 + }
3035 +
3036 +- /* Journal blocked and flushed, clear needs_recovery flag. */
3037 +- EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
3038 + error = ext4_commit_super(sb, 1);
3039 + out:
3040 + if (journal)
3041 +@@ -4828,8 +4845,11 @@ static int ext4_unfreeze(struct super_block *sb)
3042 + if (sb->s_flags & MS_RDONLY)
3043 + return 0;
3044 +
3045 +- /* Reset the needs_recovery flag before the fs is unlocked. */
3046 +- EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
3047 ++ if (EXT4_SB(sb)->s_journal) {
3048 ++ /* Reset the needs_recovery flag before the fs is unlocked. */
3049 ++ EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
3050 ++ }
3051 ++
3052 + ext4_commit_super(sb, 1);
3053 + return 0;
3054 + }
3055 +diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
3056 +index d3fa6bd9503e..221719eac5de 100644
3057 +--- a/fs/hfs/bnode.c
3058 ++++ b/fs/hfs/bnode.c
3059 +@@ -288,7 +288,6 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
3060 + page_cache_release(page);
3061 + goto fail;
3062 + }
3063 +- page_cache_release(page);
3064 + node->page[i] = page;
3065 + }
3066 +
3067 +@@ -398,11 +397,11 @@ node_error:
3068 +
3069 + void hfs_bnode_free(struct hfs_bnode *node)
3070 + {
3071 +- //int i;
3072 ++ int i;
3073 +
3074 +- //for (i = 0; i < node->tree->pages_per_bnode; i++)
3075 +- // if (node->page[i])
3076 +- // page_cache_release(node->page[i]);
3077 ++ for (i = 0; i < node->tree->pages_per_bnode; i++)
3078 ++ if (node->page[i])
3079 ++ page_cache_release(node->page[i]);
3080 + kfree(node);
3081 + }
3082 +
3083 +diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c
3084 +index 9f4ee7f52026..6fc766df0461 100644
3085 +--- a/fs/hfs/brec.c
3086 ++++ b/fs/hfs/brec.c
3087 +@@ -131,13 +131,16 @@ skip:
3088 + hfs_bnode_write(node, entry, data_off + key_len, entry_len);
3089 + hfs_bnode_dump(node);
3090 +
3091 +- if (new_node) {
3092 +- /* update parent key if we inserted a key
3093 +- * at the start of the first node
3094 +- */
3095 +- if (!rec && new_node != node)
3096 +- hfs_brec_update_parent(fd);
3097 ++ /*
3098 ++ * update parent key if we inserted a key
3099 ++ * at the start of the node and it is not the new node
3100 ++ */
3101 ++ if (!rec && new_node != node) {
3102 ++ hfs_bnode_read_key(node, fd->search_key, data_off + size);
3103 ++ hfs_brec_update_parent(fd);
3104 ++ }
3105 +
3106 ++ if (new_node) {
3107 + hfs_bnode_put(fd->bnode);
3108 + if (!new_node->parent) {
3109 + hfs_btree_inc_height(tree);
3110 +@@ -166,9 +169,6 @@ skip:
3111 + goto again;
3112 + }
3113 +
3114 +- if (!rec)
3115 +- hfs_brec_update_parent(fd);
3116 +-
3117 + return 0;
3118 + }
3119 +
3120 +@@ -366,6 +366,8 @@ again:
3121 + if (IS_ERR(parent))
3122 + return PTR_ERR(parent);
3123 + __hfs_brec_find(parent, fd);
3124 ++ if (fd->record < 0)
3125 ++ return -ENOENT;
3126 + hfs_bnode_dump(parent);
3127 + rec = fd->record;
3128 +
3129 +diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
3130 +index 759708fd9331..63924662aaf3 100644
3131 +--- a/fs/hfsplus/bnode.c
3132 ++++ b/fs/hfsplus/bnode.c
3133 +@@ -454,7 +454,6 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
3134 + page_cache_release(page);
3135 + goto fail;
3136 + }
3137 +- page_cache_release(page);
3138 + node->page[i] = page;
3139 + }
3140 +
3141 +@@ -566,13 +565,11 @@ node_error:
3142 +
3143 + void hfs_bnode_free(struct hfs_bnode *node)
3144 + {
3145 +-#if 0
3146 + int i;
3147 +
3148 + for (i = 0; i < node->tree->pages_per_bnode; i++)
3149 + if (node->page[i])
3150 + page_cache_release(node->page[i]);
3151 +-#endif
3152 + kfree(node);
3153 + }
3154 +
3155 +diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
3156 +index 4227dc4f7437..8c44654ce274 100644
3157 +--- a/fs/jbd2/checkpoint.c
3158 ++++ b/fs/jbd2/checkpoint.c
3159 +@@ -417,12 +417,12 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
3160 + * journal_clean_one_cp_list
3161 + *
3162 + * Find all the written-back checkpoint buffers in the given list and
3163 +- * release them.
3164 ++ * release them. If 'destroy' is set, clean all buffers unconditionally.
3165 + *
3166 + * Called with j_list_lock held.
3167 + * Returns 1 if we freed the transaction, 0 otherwise.
3168 + */
3169 +-static int journal_clean_one_cp_list(struct journal_head *jh)
3170 ++static int journal_clean_one_cp_list(struct journal_head *jh, bool destroy)
3171 + {
3172 + struct journal_head *last_jh;
3173 + struct journal_head *next_jh = jh;
3174 +@@ -436,7 +436,10 @@ static int journal_clean_one_cp_list(struct journal_head *jh)
3175 + do {
3176 + jh = next_jh;
3177 + next_jh = jh->b_cpnext;
3178 +- ret = __try_to_free_cp_buf(jh);
3179 ++ if (!destroy)
3180 ++ ret = __try_to_free_cp_buf(jh);
3181 ++ else
3182 ++ ret = __jbd2_journal_remove_checkpoint(jh) + 1;
3183 + if (!ret)
3184 + return freed;
3185 + if (ret == 2)
3186 +@@ -459,10 +462,11 @@ static int journal_clean_one_cp_list(struct journal_head *jh)
3187 + * journal_clean_checkpoint_list
3188 + *
3189 + * Find all the written-back checkpoint buffers in the journal and release them.
3190 ++ * If 'destroy' is set, release all buffers unconditionally.
3191 + *
3192 + * Called with j_list_lock held.
3193 + */
3194 +-void __jbd2_journal_clean_checkpoint_list(journal_t *journal)
3195 ++void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
3196 + {
3197 + transaction_t *transaction, *last_transaction, *next_transaction;
3198 + int ret;
3199 +@@ -476,7 +480,8 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal)
3200 + do {
3201 + transaction = next_transaction;
3202 + next_transaction = transaction->t_cpnext;
3203 +- ret = journal_clean_one_cp_list(transaction->t_checkpoint_list);
3204 ++ ret = journal_clean_one_cp_list(transaction->t_checkpoint_list,
3205 ++ destroy);
3206 + /*
3207 + * This function only frees up some memory if possible so we
3208 + * dont have an obligation to finish processing. Bail out if
3209 +@@ -492,7 +497,7 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal)
3210 + * we can possibly see not yet submitted buffers on io_list
3211 + */
3212 + ret = journal_clean_one_cp_list(transaction->
3213 +- t_checkpoint_io_list);
3214 ++ t_checkpoint_io_list, destroy);
3215 + if (need_resched())
3216 + return;
3217 + /*
3218 +@@ -506,6 +511,28 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal)
3219 + }
3220 +
3221 + /*
3222 ++ * Remove buffers from all checkpoint lists as journal is aborted and we just
3223 ++ * need to free memory
3224 ++ */
3225 ++void jbd2_journal_destroy_checkpoint(journal_t *journal)
3226 ++{
3227 ++ /*
3228 ++ * We loop because __jbd2_journal_clean_checkpoint_list() may abort
3229 ++ * early due to a need of rescheduling.
3230 ++ */
3231 ++ while (1) {
3232 ++ spin_lock(&journal->j_list_lock);
3233 ++ if (!journal->j_checkpoint_transactions) {
3234 ++ spin_unlock(&journal->j_list_lock);
3235 ++ break;
3236 ++ }
3237 ++ __jbd2_journal_clean_checkpoint_list(journal, true);
3238 ++ spin_unlock(&journal->j_list_lock);
3239 ++ cond_resched();
3240 ++ }
3241 ++}
3242 ++
3243 ++/*
3244 + * journal_remove_checkpoint: called after a buffer has been committed
3245 + * to disk (either by being write-back flushed to disk, or being
3246 + * committed to the log).
3247 +diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
3248 +index b73e0215baa7..362e5f614450 100644
3249 +--- a/fs/jbd2/commit.c
3250 ++++ b/fs/jbd2/commit.c
3251 +@@ -510,7 +510,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
3252 + * frees some memory
3253 + */
3254 + spin_lock(&journal->j_list_lock);
3255 +- __jbd2_journal_clean_checkpoint_list(journal);
3256 ++ __jbd2_journal_clean_checkpoint_list(journal, false);
3257 + spin_unlock(&journal->j_list_lock);
3258 +
3259 + jbd_debug(3, "JBD2: commit phase 1\n");
3260 +diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
3261 +index 112fad9e1e20..7003c0925760 100644
3262 +--- a/fs/jbd2/journal.c
3263 ++++ b/fs/jbd2/journal.c
3264 +@@ -1708,8 +1708,17 @@ int jbd2_journal_destroy(journal_t *journal)
3265 + while (journal->j_checkpoint_transactions != NULL) {
3266 + spin_unlock(&journal->j_list_lock);
3267 + mutex_lock(&journal->j_checkpoint_mutex);
3268 +- jbd2_log_do_checkpoint(journal);
3269 ++ err = jbd2_log_do_checkpoint(journal);
3270 + mutex_unlock(&journal->j_checkpoint_mutex);
3271 ++ /*
3272 ++ * If checkpointing failed, just free the buffers to avoid
3273 ++ * looping forever
3274 ++ */
3275 ++ if (err) {
3276 ++ jbd2_journal_destroy_checkpoint(journal);
3277 ++ spin_lock(&journal->j_list_lock);
3278 ++ break;
3279 ++ }
3280 + spin_lock(&journal->j_list_lock);
3281 + }
3282 +
3283 +diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
3284 +index 6f5f0f425e86..fecd9201dbad 100644
3285 +--- a/fs/nfs/flexfilelayout/flexfilelayout.c
3286 ++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
3287 +@@ -1039,6 +1039,11 @@ static int ff_layout_write_done_cb(struct rpc_task *task,
3288 + hdr->res.verf->committed == NFS_DATA_SYNC)
3289 + ff_layout_set_layoutcommit(hdr);
3290 +
3291 ++ /* zero out fattr since we don't care DS attr at all */
3292 ++ hdr->fattr.valid = 0;
3293 ++ if (task->tk_status >= 0)
3294 ++ nfs_writeback_update_inode(hdr);
3295 ++
3296 + return 0;
3297 + }
3298 +
3299 +diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
3300 +index f13e1969eedd..b28fa4cbea52 100644
3301 +--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
3302 ++++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
3303 +@@ -500,16 +500,19 @@ int ff_layout_encode_ds_ioerr(struct nfs4_flexfile_layout *flo,
3304 + range->offset, range->length))
3305 + continue;
3306 + /* offset(8) + length(8) + stateid(NFS4_STATEID_SIZE)
3307 +- * + deviceid(NFS4_DEVICEID4_SIZE) + status(4) + opnum(4)
3308 ++ * + array length + deviceid(NFS4_DEVICEID4_SIZE)
3309 ++ * + status(4) + opnum(4)
3310 + */
3311 + p = xdr_reserve_space(xdr,
3312 +- 24 + NFS4_STATEID_SIZE + NFS4_DEVICEID4_SIZE);
3313 ++ 28 + NFS4_STATEID_SIZE + NFS4_DEVICEID4_SIZE);
3314 + if (unlikely(!p))
3315 + return -ENOBUFS;
3316 + p = xdr_encode_hyper(p, err->offset);
3317 + p = xdr_encode_hyper(p, err->length);
3318 + p = xdr_encode_opaque_fixed(p, &err->stateid,
3319 + NFS4_STATEID_SIZE);
3320 ++ /* Encode 1 error */
3321 ++ *p++ = cpu_to_be32(1);
3322 + p = xdr_encode_opaque_fixed(p, &err->deviceid,
3323 + NFS4_DEVICEID4_SIZE);
3324 + *p++ = cpu_to_be32(err->status);
3325 +diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
3326 +index 5d25b9d97c29..976ba792fbc6 100644
3327 +--- a/fs/nfs/inode.c
3328 ++++ b/fs/nfs/inode.c
3329 +@@ -1270,13 +1270,6 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
3330 + return 0;
3331 + }
3332 +
3333 +-static int nfs_ctime_need_update(const struct inode *inode, const struct nfs_fattr *fattr)
3334 +-{
3335 +- if (!(fattr->valid & NFS_ATTR_FATTR_CTIME))
3336 +- return 0;
3337 +- return timespec_compare(&fattr->ctime, &inode->i_ctime) > 0;
3338 +-}
3339 +-
3340 + static atomic_long_t nfs_attr_generation_counter;
3341 +
3342 + static unsigned long nfs_read_attr_generation_counter(void)
3343 +@@ -1425,7 +1418,6 @@ static int nfs_inode_attrs_need_update(const struct inode *inode, const struct n
3344 + const struct nfs_inode *nfsi = NFS_I(inode);
3345 +
3346 + return ((long)fattr->gencount - (long)nfsi->attr_gencount) > 0 ||
3347 +- nfs_ctime_need_update(inode, fattr) ||
3348 + ((long)nfsi->attr_gencount - (long)nfs_read_attr_generation_counter() > 0);
3349 + }
3350 +
3351 +@@ -1488,6 +1480,13 @@ static int nfs_post_op_update_inode_locked(struct inode *inode, struct nfs_fattr
3352 + {
3353 + unsigned long invalid = NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
3354 +
3355 ++ /*
3356 ++ * Don't revalidate the pagecache if we hold a delegation, but do
3357 ++ * force an attribute update
3358 ++ */
3359 ++ if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
3360 ++ invalid = NFS_INO_INVALID_ATTR|NFS_INO_REVAL_FORCED;
3361 ++
3362 + if (S_ISDIR(inode->i_mode))
3363 + invalid |= NFS_INO_INVALID_DATA;
3364 + nfs_set_cache_invalid(inode, invalid);
3365 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
3366 +index d3f205126609..c245874d7e9d 100644
3367 +--- a/fs/nfs/nfs4proc.c
3368 ++++ b/fs/nfs/nfs4proc.c
3369 +@@ -1152,6 +1152,8 @@ static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
3370 + return 0;
3371 + if ((delegation->type & fmode) != fmode)
3372 + return 0;
3373 ++ if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
3374 ++ return 0;
3375 + if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
3376 + return 0;
3377 + nfs_mark_delegation_referenced(delegation);
3378 +@@ -1216,6 +1218,7 @@ static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
3379 + }
3380 +
3381 + static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
3382 ++ nfs4_stateid *arg_stateid,
3383 + nfs4_stateid *stateid, fmode_t fmode)
3384 + {
3385 + clear_bit(NFS_O_RDWR_STATE, &state->flags);
3386 +@@ -1234,8 +1237,9 @@ static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
3387 + if (stateid == NULL)
3388 + return;
3389 + /* Handle races with OPEN */
3390 +- if (!nfs4_stateid_match_other(stateid, &state->open_stateid) ||
3391 +- !nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
3392 ++ if (!nfs4_stateid_match_other(arg_stateid, &state->open_stateid) ||
3393 ++ (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
3394 ++ !nfs4_stateid_is_newer(stateid, &state->open_stateid))) {
3395 + nfs_resync_open_stateid_locked(state);
3396 + return;
3397 + }
3398 +@@ -1244,10 +1248,12 @@ static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
3399 + nfs4_stateid_copy(&state->open_stateid, stateid);
3400 + }
3401 +
3402 +-static void nfs_clear_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
3403 ++static void nfs_clear_open_stateid(struct nfs4_state *state,
3404 ++ nfs4_stateid *arg_stateid,
3405 ++ nfs4_stateid *stateid, fmode_t fmode)
3406 + {
3407 + write_seqlock(&state->seqlock);
3408 +- nfs_clear_open_stateid_locked(state, stateid, fmode);
3409 ++ nfs_clear_open_stateid_locked(state, arg_stateid, stateid, fmode);
3410 + write_sequnlock(&state->seqlock);
3411 + if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
3412 + nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
3413 +@@ -2413,7 +2419,7 @@ static int _nfs4_do_open(struct inode *dir,
3414 + goto err_free_label;
3415 + state = ctx->state;
3416 +
3417 +- if ((opendata->o_arg.open_flags & O_EXCL) &&
3418 ++ if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) &&
3419 + (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
3420 + nfs4_exclusive_attrset(opendata, sattr);
3421 +
3422 +@@ -2672,7 +2678,8 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
3423 + goto out_release;
3424 + }
3425 + }
3426 +- nfs_clear_open_stateid(state, res_stateid, calldata->arg.fmode);
3427 ++ nfs_clear_open_stateid(state, &calldata->arg.stateid,
3428 ++ res_stateid, calldata->arg.fmode);
3429 + out_release:
3430 + nfs_release_seqid(calldata->arg.seqid);
3431 + nfs_refresh_inode(calldata->inode, calldata->res.fattr);
3432 +@@ -8571,6 +8578,7 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
3433 + .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
3434 + .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
3435 + .state_renewal_ops = &nfs41_state_renewal_ops,
3436 ++ .mig_recovery_ops = &nfs41_mig_recovery_ops,
3437 + };
3438 + #endif
3439 +
3440 +diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
3441 +index 7b4552678536..069914ce7641 100644
3442 +--- a/fs/nfs/pagelist.c
3443 ++++ b/fs/nfs/pagelist.c
3444 +@@ -77,8 +77,8 @@ EXPORT_SYMBOL_GPL(nfs_pgheader_init);
3445 + void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
3446 + {
3447 + spin_lock(&hdr->lock);
3448 +- if (pos < hdr->io_start + hdr->good_bytes) {
3449 +- set_bit(NFS_IOHDR_ERROR, &hdr->flags);
3450 ++ if (!test_and_set_bit(NFS_IOHDR_ERROR, &hdr->flags)
3451 ++ || pos < hdr->io_start + hdr->good_bytes) {
3452 + clear_bit(NFS_IOHDR_EOF, &hdr->flags);
3453 + hdr->good_bytes = pos - hdr->io_start;
3454 + hdr->error = error;
3455 +diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
3456 +index f37e25b6311c..1705c78ee2d8 100644
3457 +--- a/fs/nfs/pnfs_nfs.c
3458 ++++ b/fs/nfs/pnfs_nfs.c
3459 +@@ -359,26 +359,31 @@ same_sockaddr(struct sockaddr *addr1, struct sockaddr *addr2)
3460 + return false;
3461 + }
3462 +
3463 ++/*
3464 ++ * Checks if 'dsaddrs1' contains a subset of 'dsaddrs2'. If it does,
3465 ++ * declare a match.
3466 ++ */
3467 + static bool
3468 + _same_data_server_addrs_locked(const struct list_head *dsaddrs1,
3469 + const struct list_head *dsaddrs2)
3470 + {
3471 + struct nfs4_pnfs_ds_addr *da1, *da2;
3472 +-
3473 +- /* step through both lists, comparing as we go */
3474 +- for (da1 = list_first_entry(dsaddrs1, typeof(*da1), da_node),
3475 +- da2 = list_first_entry(dsaddrs2, typeof(*da2), da_node);
3476 +- da1 != NULL && da2 != NULL;
3477 +- da1 = list_entry(da1->da_node.next, typeof(*da1), da_node),
3478 +- da2 = list_entry(da2->da_node.next, typeof(*da2), da_node)) {
3479 +- if (!same_sockaddr((struct sockaddr *)&da1->da_addr,
3480 +- (struct sockaddr *)&da2->da_addr))
3481 +- return false;
3482 ++ struct sockaddr *sa1, *sa2;
3483 ++ bool match = false;
3484 ++
3485 ++ list_for_each_entry(da1, dsaddrs1, da_node) {
3486 ++ sa1 = (struct sockaddr *)&da1->da_addr;
3487 ++ match = false;
3488 ++ list_for_each_entry(da2, dsaddrs2, da_node) {
3489 ++ sa2 = (struct sockaddr *)&da2->da_addr;
3490 ++ match = same_sockaddr(sa1, sa2);
3491 ++ if (match)
3492 ++ break;
3493 ++ }
3494 ++ if (!match)
3495 ++ break;
3496 + }
3497 +- if (da1 == NULL && da2 == NULL)
3498 +- return true;
3499 +-
3500 +- return false;
3501 ++ return match;
3502 + }
3503 +
3504 + /*
3505 +diff --git a/fs/nfs/write.c b/fs/nfs/write.c
3506 +index daf355642845..07115b9b1ad2 100644
3507 +--- a/fs/nfs/write.c
3508 ++++ b/fs/nfs/write.c
3509 +@@ -1383,24 +1383,27 @@ static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr,
3510 + {
3511 + struct nfs_pgio_args *argp = &hdr->args;
3512 + struct nfs_pgio_res *resp = &hdr->res;
3513 ++ u64 size = argp->offset + resp->count;
3514 +
3515 + if (!(fattr->valid & NFS_ATTR_FATTR_SIZE))
3516 ++ fattr->size = size;
3517 ++ if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) {
3518 ++ fattr->valid &= ~NFS_ATTR_FATTR_SIZE;
3519 + return;
3520 +- if (argp->offset + resp->count != fattr->size)
3521 +- return;
3522 +- if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode))
3523 ++ }
3524 ++ if (size != fattr->size)
3525 + return;
3526 + /* Set attribute barrier */
3527 + nfs_fattr_set_barrier(fattr);
3528 ++ /* ...and update size */
3529 ++ fattr->valid |= NFS_ATTR_FATTR_SIZE;
3530 + }
3531 +
3532 + void nfs_writeback_update_inode(struct nfs_pgio_header *hdr)
3533 + {
3534 +- struct nfs_fattr *fattr = hdr->res.fattr;
3535 ++ struct nfs_fattr *fattr = &hdr->fattr;
3536 + struct inode *inode = hdr->inode;
3537 +
3538 +- if (fattr == NULL)
3539 +- return;
3540 + spin_lock(&inode->i_lock);
3541 + nfs_writeback_check_extend(hdr, fattr);
3542 + nfs_post_op_update_inode_force_wcc_locked(inode, fattr);
3543 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
3544 +index 6e13504f736e..397798368b1a 100644
3545 +--- a/fs/nfsd/nfs4state.c
3546 ++++ b/fs/nfsd/nfs4state.c
3547 +@@ -777,13 +777,16 @@ hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
3548 + list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
3549 + }
3550 +
3551 +-static void
3552 ++static bool
3553 + unhash_delegation_locked(struct nfs4_delegation *dp)
3554 + {
3555 + struct nfs4_file *fp = dp->dl_stid.sc_file;
3556 +
3557 + lockdep_assert_held(&state_lock);
3558 +
3559 ++ if (list_empty(&dp->dl_perfile))
3560 ++ return false;
3561 ++
3562 + dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
3563 + /* Ensure that deleg break won't try to requeue it */
3564 + ++dp->dl_time;
3565 +@@ -792,16 +795,21 @@ unhash_delegation_locked(struct nfs4_delegation *dp)
3566 + list_del_init(&dp->dl_recall_lru);
3567 + list_del_init(&dp->dl_perfile);
3568 + spin_unlock(&fp->fi_lock);
3569 ++ return true;
3570 + }
3571 +
3572 + static void destroy_delegation(struct nfs4_delegation *dp)
3573 + {
3574 ++ bool unhashed;
3575 ++
3576 + spin_lock(&state_lock);
3577 +- unhash_delegation_locked(dp);
3578 ++ unhashed = unhash_delegation_locked(dp);
3579 + spin_unlock(&state_lock);
3580 +- put_clnt_odstate(dp->dl_clnt_odstate);
3581 +- nfs4_put_deleg_lease(dp->dl_stid.sc_file);
3582 +- nfs4_put_stid(&dp->dl_stid);
3583 ++ if (unhashed) {
3584 ++ put_clnt_odstate(dp->dl_clnt_odstate);
3585 ++ nfs4_put_deleg_lease(dp->dl_stid.sc_file);
3586 ++ nfs4_put_stid(&dp->dl_stid);
3587 ++ }
3588 + }
3589 +
3590 + static void revoke_delegation(struct nfs4_delegation *dp)
3591 +@@ -1004,16 +1012,20 @@ static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
3592 + sop->so_ops->so_free(sop);
3593 + }
3594 +
3595 +-static void unhash_ol_stateid(struct nfs4_ol_stateid *stp)
3596 ++static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
3597 + {
3598 + struct nfs4_file *fp = stp->st_stid.sc_file;
3599 +
3600 + lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
3601 +
3602 ++ if (list_empty(&stp->st_perfile))
3603 ++ return false;
3604 ++
3605 + spin_lock(&fp->fi_lock);
3606 +- list_del(&stp->st_perfile);
3607 ++ list_del_init(&stp->st_perfile);
3608 + spin_unlock(&fp->fi_lock);
3609 + list_del(&stp->st_perstateowner);
3610 ++ return true;
3611 + }
3612 +
3613 + static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
3614 +@@ -1063,25 +1075,27 @@ static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
3615 + list_add(&stp->st_locks, reaplist);
3616 + }
3617 +
3618 +-static void unhash_lock_stateid(struct nfs4_ol_stateid *stp)
3619 ++static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
3620 + {
3621 + struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
3622 +
3623 + lockdep_assert_held(&oo->oo_owner.so_client->cl_lock);
3624 +
3625 + list_del_init(&stp->st_locks);
3626 +- unhash_ol_stateid(stp);
3627 + nfs4_unhash_stid(&stp->st_stid);
3628 ++ return unhash_ol_stateid(stp);
3629 + }
3630 +
3631 + static void release_lock_stateid(struct nfs4_ol_stateid *stp)
3632 + {
3633 + struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
3634 ++ bool unhashed;
3635 +
3636 + spin_lock(&oo->oo_owner.so_client->cl_lock);
3637 +- unhash_lock_stateid(stp);
3638 ++ unhashed = unhash_lock_stateid(stp);
3639 + spin_unlock(&oo->oo_owner.so_client->cl_lock);
3640 +- nfs4_put_stid(&stp->st_stid);
3641 ++ if (unhashed)
3642 ++ nfs4_put_stid(&stp->st_stid);
3643 + }
3644 +
3645 + static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
3646 +@@ -1129,7 +1143,7 @@ static void release_lockowner(struct nfs4_lockowner *lo)
3647 + while (!list_empty(&lo->lo_owner.so_stateids)) {
3648 + stp = list_first_entry(&lo->lo_owner.so_stateids,
3649 + struct nfs4_ol_stateid, st_perstateowner);
3650 +- unhash_lock_stateid(stp);
3651 ++ WARN_ON(!unhash_lock_stateid(stp));
3652 + put_ol_stateid_locked(stp, &reaplist);
3653 + }
3654 + spin_unlock(&clp->cl_lock);
3655 +@@ -1142,21 +1156,26 @@ static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
3656 + {
3657 + struct nfs4_ol_stateid *stp;
3658 +
3659 ++ lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
3660 ++
3661 + while (!list_empty(&open_stp->st_locks)) {
3662 + stp = list_entry(open_stp->st_locks.next,
3663 + struct nfs4_ol_stateid, st_locks);
3664 +- unhash_lock_stateid(stp);
3665 ++ WARN_ON(!unhash_lock_stateid(stp));
3666 + put_ol_stateid_locked(stp, reaplist);
3667 + }
3668 + }
3669 +
3670 +-static void unhash_open_stateid(struct nfs4_ol_stateid *stp,
3671 ++static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
3672 + struct list_head *reaplist)
3673 + {
3674 ++ bool unhashed;
3675 ++
3676 + lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
3677 +
3678 +- unhash_ol_stateid(stp);
3679 ++ unhashed = unhash_ol_stateid(stp);
3680 + release_open_stateid_locks(stp, reaplist);
3681 ++ return unhashed;
3682 + }
3683 +
3684 + static void release_open_stateid(struct nfs4_ol_stateid *stp)
3685 +@@ -1164,8 +1183,8 @@ static void release_open_stateid(struct nfs4_ol_stateid *stp)
3686 + LIST_HEAD(reaplist);
3687 +
3688 + spin_lock(&stp->st_stid.sc_client->cl_lock);
3689 +- unhash_open_stateid(stp, &reaplist);
3690 +- put_ol_stateid_locked(stp, &reaplist);
3691 ++ if (unhash_open_stateid(stp, &reaplist))
3692 ++ put_ol_stateid_locked(stp, &reaplist);
3693 + spin_unlock(&stp->st_stid.sc_client->cl_lock);
3694 + free_ol_stateid_reaplist(&reaplist);
3695 + }
3696 +@@ -1210,8 +1229,8 @@ static void release_openowner(struct nfs4_openowner *oo)
3697 + while (!list_empty(&oo->oo_owner.so_stateids)) {
3698 + stp = list_first_entry(&oo->oo_owner.so_stateids,
3699 + struct nfs4_ol_stateid, st_perstateowner);
3700 +- unhash_open_stateid(stp, &reaplist);
3701 +- put_ol_stateid_locked(stp, &reaplist);
3702 ++ if (unhash_open_stateid(stp, &reaplist))
3703 ++ put_ol_stateid_locked(stp, &reaplist);
3704 + }
3705 + spin_unlock(&clp->cl_lock);
3706 + free_ol_stateid_reaplist(&reaplist);
3707 +@@ -1714,7 +1733,7 @@ __destroy_client(struct nfs4_client *clp)
3708 + spin_lock(&state_lock);
3709 + while (!list_empty(&clp->cl_delegations)) {
3710 + dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
3711 +- unhash_delegation_locked(dp);
3712 ++ WARN_ON(!unhash_delegation_locked(dp));
3713 + list_add(&dp->dl_recall_lru, &reaplist);
3714 + }
3715 + spin_unlock(&state_lock);
3716 +@@ -4346,7 +4365,7 @@ nfs4_laundromat(struct nfsd_net *nn)
3717 + new_timeo = min(new_timeo, t);
3718 + break;
3719 + }
3720 +- unhash_delegation_locked(dp);
3721 ++ WARN_ON(!unhash_delegation_locked(dp));
3722 + list_add(&dp->dl_recall_lru, &reaplist);
3723 + }
3724 + spin_unlock(&state_lock);
3725 +@@ -4714,7 +4733,7 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3726 + if (check_for_locks(stp->st_stid.sc_file,
3727 + lockowner(stp->st_stateowner)))
3728 + break;
3729 +- unhash_lock_stateid(stp);
3730 ++ WARN_ON(!unhash_lock_stateid(stp));
3731 + spin_unlock(&cl->cl_lock);
3732 + nfs4_put_stid(s);
3733 + ret = nfs_ok;
3734 +@@ -4930,20 +4949,23 @@ out:
3735 + static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
3736 + {
3737 + struct nfs4_client *clp = s->st_stid.sc_client;
3738 ++ bool unhashed;
3739 + LIST_HEAD(reaplist);
3740 +
3741 + s->st_stid.sc_type = NFS4_CLOSED_STID;
3742 + spin_lock(&clp->cl_lock);
3743 +- unhash_open_stateid(s, &reaplist);
3744 ++ unhashed = unhash_open_stateid(s, &reaplist);
3745 +
3746 + if (clp->cl_minorversion) {
3747 +- put_ol_stateid_locked(s, &reaplist);
3748 ++ if (unhashed)
3749 ++ put_ol_stateid_locked(s, &reaplist);
3750 + spin_unlock(&clp->cl_lock);
3751 + free_ol_stateid_reaplist(&reaplist);
3752 + } else {
3753 + spin_unlock(&clp->cl_lock);
3754 + free_ol_stateid_reaplist(&reaplist);
3755 +- move_to_close_lru(s, clp->net);
3756 ++ if (unhashed)
3757 ++ move_to_close_lru(s, clp->net);
3758 + }
3759 + }
3760 +
3761 +@@ -5982,7 +6004,7 @@ nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid *lst,
3762 +
3763 + static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
3764 + struct list_head *collect,
3765 +- void (*func)(struct nfs4_ol_stateid *))
3766 ++ bool (*func)(struct nfs4_ol_stateid *))
3767 + {
3768 + struct nfs4_openowner *oop;
3769 + struct nfs4_ol_stateid *stp, *st_next;
3770 +@@ -5996,9 +6018,9 @@ static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
3771 + list_for_each_entry_safe(lst, lst_next,
3772 + &stp->st_locks, st_locks) {
3773 + if (func) {
3774 +- func(lst);
3775 +- nfsd_inject_add_lock_to_list(lst,
3776 +- collect);
3777 ++ if (func(lst))
3778 ++ nfsd_inject_add_lock_to_list(lst,
3779 ++ collect);
3780 + }
3781 + ++count;
3782 + /*
3783 +@@ -6268,7 +6290,7 @@ static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
3784 + continue;
3785 +
3786 + atomic_inc(&clp->cl_refcount);
3787 +- unhash_delegation_locked(dp);
3788 ++ WARN_ON(!unhash_delegation_locked(dp));
3789 + list_add(&dp->dl_recall_lru, victims);
3790 + }
3791 + ++count;
3792 +@@ -6598,7 +6620,7 @@ nfs4_state_shutdown_net(struct net *net)
3793 + spin_lock(&state_lock);
3794 + list_for_each_safe(pos, next, &nn->del_recall_lru) {
3795 + dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3796 +- unhash_delegation_locked(dp);
3797 ++ WARN_ON(!unhash_delegation_locked(dp));
3798 + list_add(&dp->dl_recall_lru, &reaplist);
3799 + }
3800 + spin_unlock(&state_lock);
3801 +diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
3802 +index d4d84451e0e6..3dd1b616b92b 100644
3803 +--- a/fs/nfsd/nfs4xdr.c
3804 ++++ b/fs/nfsd/nfs4xdr.c
3805 +@@ -2139,6 +2139,27 @@ nfsd4_encode_aclname(struct xdr_stream *xdr, struct svc_rqst *rqstp,
3806 + return nfsd4_encode_user(xdr, rqstp, ace->who_uid);
3807 + }
3808 +
3809 ++static inline __be32
3810 ++nfsd4_encode_layout_type(struct xdr_stream *xdr, enum pnfs_layouttype layout_type)
3811 ++{
3812 ++ __be32 *p;
3813 ++
3814 ++ if (layout_type) {
3815 ++ p = xdr_reserve_space(xdr, 8);
3816 ++ if (!p)
3817 ++ return nfserr_resource;
3818 ++ *p++ = cpu_to_be32(1);
3819 ++ *p++ = cpu_to_be32(layout_type);
3820 ++ } else {
3821 ++ p = xdr_reserve_space(xdr, 4);
3822 ++ if (!p)
3823 ++ return nfserr_resource;
3824 ++ *p++ = cpu_to_be32(0);
3825 ++ }
3826 ++
3827 ++ return 0;
3828 ++}
3829 ++
3830 + #define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \
3831 + FATTR4_WORD0_RDATTR_ERROR)
3832 + #define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID
3833 +@@ -2692,20 +2713,16 @@ out_acl:
3834 + p = xdr_encode_hyper(p, stat.ino);
3835 + }
3836 + #ifdef CONFIG_NFSD_PNFS
3837 +- if ((bmval1 & FATTR4_WORD1_FS_LAYOUT_TYPES) ||
3838 +- (bmval2 & FATTR4_WORD2_LAYOUT_TYPES)) {
3839 +- if (exp->ex_layout_type) {
3840 +- p = xdr_reserve_space(xdr, 8);
3841 +- if (!p)
3842 +- goto out_resource;
3843 +- *p++ = cpu_to_be32(1);
3844 +- *p++ = cpu_to_be32(exp->ex_layout_type);
3845 +- } else {
3846 +- p = xdr_reserve_space(xdr, 4);
3847 +- if (!p)
3848 +- goto out_resource;
3849 +- *p++ = cpu_to_be32(0);
3850 +- }
3851 ++ if (bmval1 & FATTR4_WORD1_FS_LAYOUT_TYPES) {
3852 ++ status = nfsd4_encode_layout_type(xdr, exp->ex_layout_type);
3853 ++ if (status)
3854 ++ goto out;
3855 ++ }
3856 ++
3857 ++ if (bmval2 & FATTR4_WORD2_LAYOUT_TYPES) {
3858 ++ status = nfsd4_encode_layout_type(xdr, exp->ex_layout_type);
3859 ++ if (status)
3860 ++ goto out;
3861 + }
3862 +
3863 + if (bmval2 & FATTR4_WORD2_LAYOUT_BLKSIZE) {
3864 +diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
3865 +index edb640ae9a94..eb1cebed3f36 100644
3866 +--- a/include/linux/jbd2.h
3867 ++++ b/include/linux/jbd2.h
3868 +@@ -1042,8 +1042,9 @@ void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
3869 + extern void jbd2_journal_commit_transaction(journal_t *);
3870 +
3871 + /* Checkpoint list management */
3872 +-void __jbd2_journal_clean_checkpoint_list(journal_t *journal);
3873 ++void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy);
3874 + int __jbd2_journal_remove_checkpoint(struct journal_head *);
3875 ++void jbd2_journal_destroy_checkpoint(journal_t *journal);
3876 + void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
3877 +
3878 +
3879 +diff --git a/include/linux/mm.h b/include/linux/mm.h
3880 +index 0755b9fd03a7..b2085582d44e 100644
3881 +--- a/include/linux/mm.h
3882 ++++ b/include/linux/mm.h
3883 +@@ -1002,6 +1002,34 @@ static inline int page_mapped(struct page *page)
3884 + }
3885 +
3886 + /*
3887 ++ * Return true only if the page has been allocated with
3888 ++ * ALLOC_NO_WATERMARKS and the low watermark was not
3889 ++ * met implying that the system is under some pressure.
3890 ++ */
3891 ++static inline bool page_is_pfmemalloc(struct page *page)
3892 ++{
3893 ++ /*
3894 ++ * Page index cannot be this large so this must be
3895 ++ * a pfmemalloc page.
3896 ++ */
3897 ++ return page->index == -1UL;
3898 ++}
3899 ++
3900 ++/*
3901 ++ * Only to be called by the page allocator on a freshly allocated
3902 ++ * page.
3903 ++ */
3904 ++static inline void set_page_pfmemalloc(struct page *page)
3905 ++{
3906 ++ page->index = -1UL;
3907 ++}
3908 ++
3909 ++static inline void clear_page_pfmemalloc(struct page *page)
3910 ++{
3911 ++ page->index = 0;
3912 ++}
3913 ++
3914 ++/*
3915 + * Different kinds of faults, as returned by handle_mm_fault().
3916 + * Used to decide whether a process gets delivered SIGBUS or
3917 + * just gets major/minor fault counters bumped up.
3918 +diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
3919 +index 8d37e26a1007..c0c6b33535fb 100644
3920 +--- a/include/linux/mm_types.h
3921 ++++ b/include/linux/mm_types.h
3922 +@@ -63,15 +63,6 @@ struct page {
3923 + union {
3924 + pgoff_t index; /* Our offset within mapping. */
3925 + void *freelist; /* sl[aou]b first free object */
3926 +- bool pfmemalloc; /* If set by the page allocator,
3927 +- * ALLOC_NO_WATERMARKS was set
3928 +- * and the low watermark was not
3929 +- * met implying that the system
3930 +- * is under some pressure. The
3931 +- * caller should try ensure
3932 +- * this page is only used to
3933 +- * free other pages.
3934 +- */
3935 + };
3936 +
3937 + union {
3938 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
3939 +index f15154a879c7..eb1c55b8255a 100644
3940 +--- a/include/linux/skbuff.h
3941 ++++ b/include/linux/skbuff.h
3942 +@@ -1590,20 +1590,16 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
3943 + skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3944 +
3945 + /*
3946 +- * Propagate page->pfmemalloc to the skb if we can. The problem is
3947 +- * that not all callers have unique ownership of the page. If
3948 +- * pfmemalloc is set, we check the mapping as a mapping implies
3949 +- * page->index is set (index and pfmemalloc share space).
3950 +- * If it's a valid mapping, we cannot use page->pfmemalloc but we
3951 +- * do not lose pfmemalloc information as the pages would not be
3952 +- * allocated using __GFP_MEMALLOC.
3953 ++ * Propagate page pfmemalloc to the skb if we can. The problem is
3954 ++ * that not all callers have unique ownership of the page but rely
3955 ++ * on page_is_pfmemalloc doing the right thing(tm).
3956 + */
3957 + frag->page.p = page;
3958 + frag->page_offset = off;
3959 + skb_frag_size_set(frag, size);
3960 +
3961 + page = compound_head(page);
3962 +- if (page->pfmemalloc && !page->mapping)
3963 ++ if (page_is_pfmemalloc(page))
3964 + skb->pfmemalloc = true;
3965 + }
3966 +
3967 +@@ -2250,7 +2246,7 @@ static inline struct page *dev_alloc_page(void)
3968 + static inline void skb_propagate_pfmemalloc(struct page *page,
3969 + struct sk_buff *skb)
3970 + {
3971 +- if (page && page->pfmemalloc)
3972 ++ if (page_is_pfmemalloc(page))
3973 + skb->pfmemalloc = true;
3974 + }
3975 +
3976 +diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h
3977 +index 7591788e9fbf..357e44c1a46b 100644
3978 +--- a/include/linux/sunrpc/xprtsock.h
3979 ++++ b/include/linux/sunrpc/xprtsock.h
3980 +@@ -42,6 +42,7 @@ struct sock_xprt {
3981 + /*
3982 + * Connection of transports
3983 + */
3984 ++ unsigned long sock_state;
3985 + struct delayed_work connect_worker;
3986 + struct sockaddr_storage srcaddr;
3987 + unsigned short srcport;
3988 +@@ -76,6 +77,8 @@ struct sock_xprt {
3989 + */
3990 + #define TCP_RPC_REPLY (1UL << 6)
3991 +
3992 ++#define XPRT_SOCK_CONNECTING 1U
3993 ++
3994 + #endif /* __KERNEL__ */
3995 +
3996 + #endif /* _LINUX_SUNRPC_XPRTSOCK_H */
3997 +diff --git a/include/net/act_api.h b/include/net/act_api.h
3998 +index 3ee4c92afd1b..931738bc5bba 100644
3999 +--- a/include/net/act_api.h
4000 ++++ b/include/net/act_api.h
4001 +@@ -99,7 +99,6 @@ struct tc_action_ops {
4002 +
4003 + int tcf_hash_search(struct tc_action *a, u32 index);
4004 + void tcf_hash_destroy(struct tc_action *a);
4005 +-int tcf_hash_release(struct tc_action *a, int bind);
4006 + u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo);
4007 + int tcf_hash_check(u32 index, struct tc_action *a, int bind);
4008 + int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
4009 +@@ -107,6 +106,13 @@ int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
4010 + void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est);
4011 + void tcf_hash_insert(struct tc_action *a);
4012 +
4013 ++int __tcf_hash_release(struct tc_action *a, bool bind, bool strict);
4014 ++
4015 ++static inline int tcf_hash_release(struct tc_action *a, bool bind)
4016 ++{
4017 ++ return __tcf_hash_release(a, bind, false);
4018 ++}
4019 ++
4020 + int tcf_register_action(struct tc_action_ops *a, unsigned int mask);
4021 + int tcf_unregister_action(struct tc_action_ops *a);
4022 + int tcf_action_destroy(struct list_head *actions, int bind);
4023 +diff --git a/include/net/ip.h b/include/net/ip.h
4024 +index d14af7edd197..f41fc497b21b 100644
4025 +--- a/include/net/ip.h
4026 ++++ b/include/net/ip.h
4027 +@@ -161,6 +161,7 @@ static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
4028 + }
4029 +
4030 + /* datagram.c */
4031 ++int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
4032 + int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
4033 +
4034 + void ip4_datagram_release_cb(struct sock *sk);
4035 +diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h
4036 +index 63deb8d9f82a..d298857cd845 100644
4037 +--- a/include/soc/tegra/mc.h
4038 ++++ b/include/soc/tegra/mc.h
4039 +@@ -59,6 +59,7 @@ struct tegra_smmu_soc {
4040 + bool supports_round_robin_arbitration;
4041 + bool supports_request_limit;
4042 +
4043 ++ unsigned int num_tlb_lines;
4044 + unsigned int num_asids;
4045 +
4046 + const struct tegra_smmu_ops *ops;
4047 +diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
4048 +index fd1a02cb3c82..003dca933803 100644
4049 +--- a/include/trace/events/sunrpc.h
4050 ++++ b/include/trace/events/sunrpc.h
4051 +@@ -529,18 +529,21 @@ TRACE_EVENT(svc_xprt_do_enqueue,
4052 +
4053 + TP_STRUCT__entry(
4054 + __field(struct svc_xprt *, xprt)
4055 +- __field(struct svc_rqst *, rqst)
4056 ++ __field_struct(struct sockaddr_storage, ss)
4057 ++ __field(int, pid)
4058 ++ __field(unsigned long, flags)
4059 + ),
4060 +
4061 + TP_fast_assign(
4062 + __entry->xprt = xprt;
4063 +- __entry->rqst = rqst;
4064 ++ xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
4065 ++ __entry->pid = rqst? rqst->rq_task->pid : 0;
4066 ++ __entry->flags = xprt ? xprt->xpt_flags : 0;
4067 + ),
4068 +
4069 + TP_printk("xprt=0x%p addr=%pIScp pid=%d flags=%s", __entry->xprt,
4070 +- (struct sockaddr *)&__entry->xprt->xpt_remote,
4071 +- __entry->rqst ? __entry->rqst->rq_task->pid : 0,
4072 +- show_svc_xprt_flags(__entry->xprt->xpt_flags))
4073 ++ (struct sockaddr *)&__entry->ss,
4074 ++ __entry->pid, show_svc_xprt_flags(__entry->flags))
4075 + );
4076 +
4077 + TRACE_EVENT(svc_xprt_dequeue,
4078 +@@ -589,16 +592,20 @@ TRACE_EVENT(svc_handle_xprt,
4079 + TP_STRUCT__entry(
4080 + __field(struct svc_xprt *, xprt)
4081 + __field(int, len)
4082 ++ __field_struct(struct sockaddr_storage, ss)
4083 ++ __field(unsigned long, flags)
4084 + ),
4085 +
4086 + TP_fast_assign(
4087 + __entry->xprt = xprt;
4088 ++ xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
4089 + __entry->len = len;
4090 ++ __entry->flags = xprt ? xprt->xpt_flags : 0;
4091 + ),
4092 +
4093 + TP_printk("xprt=0x%p addr=%pIScp len=%d flags=%s", __entry->xprt,
4094 +- (struct sockaddr *)&__entry->xprt->xpt_remote, __entry->len,
4095 +- show_svc_xprt_flags(__entry->xprt->xpt_flags))
4096 ++ (struct sockaddr *)&__entry->ss,
4097 ++ __entry->len, show_svc_xprt_flags(__entry->flags))
4098 + );
4099 + #endif /* _TRACE_SUNRPC_H */
4100 +
4101 +diff --git a/kernel/fork.c b/kernel/fork.c
4102 +index 03c1eaaa6ef5..8209fa2d36ef 100644
4103 +--- a/kernel/fork.c
4104 ++++ b/kernel/fork.c
4105 +@@ -1854,13 +1854,21 @@ static int check_unshare_flags(unsigned long unshare_flags)
4106 + CLONE_NEWUSER|CLONE_NEWPID))
4107 + return -EINVAL;
4108 + /*
4109 +- * Not implemented, but pretend it works if there is nothing to
4110 +- * unshare. Note that unsharing CLONE_THREAD or CLONE_SIGHAND
4111 +- * needs to unshare vm.
4112 ++ * Not implemented, but pretend it works if there is nothing
4113 ++ * to unshare. Note that unsharing the address space or the
4114 ++ * signal handlers also need to unshare the signal queues (aka
4115 ++ * CLONE_THREAD).
4116 + */
4117 + if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
4118 +- /* FIXME: get_task_mm() increments ->mm_users */
4119 +- if (atomic_read(&current->mm->mm_users) > 1)
4120 ++ if (!thread_group_empty(current))
4121 ++ return -EINVAL;
4122 ++ }
4123 ++ if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) {
4124 ++ if (atomic_read(&current->sighand->count) > 1)
4125 ++ return -EINVAL;
4126 ++ }
4127 ++ if (unshare_flags & CLONE_VM) {
4128 ++ if (!current_is_single_threaded())
4129 + return -EINVAL;
4130 + }
4131 +
4132 +@@ -1929,16 +1937,16 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
4133 + if (unshare_flags & CLONE_NEWUSER)
4134 + unshare_flags |= CLONE_THREAD | CLONE_FS;
4135 + /*
4136 +- * If unsharing a thread from a thread group, must also unshare vm.
4137 +- */
4138 +- if (unshare_flags & CLONE_THREAD)
4139 +- unshare_flags |= CLONE_VM;
4140 +- /*
4141 + * If unsharing vm, must also unshare signal handlers.
4142 + */
4143 + if (unshare_flags & CLONE_VM)
4144 + unshare_flags |= CLONE_SIGHAND;
4145 + /*
4146 ++ * If unsharing a signal handlers, must also unshare the signal queues.
4147 ++ */
4148 ++ if (unshare_flags & CLONE_SIGHAND)
4149 ++ unshare_flags |= CLONE_THREAD;
4150 ++ /*
4151 + * If unsharing namespace, must also unshare filesystem information.
4152 + */
4153 + if (unshare_flags & CLONE_NEWNS)
4154 +diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
4155 +index 6dd0335ea61b..0234361b24b8 100644
4156 +--- a/lib/decompress_bunzip2.c
4157 ++++ b/lib/decompress_bunzip2.c
4158 +@@ -743,12 +743,12 @@ exit_0:
4159 + }
4160 +
4161 + #ifdef PREBOOT
4162 +-STATIC int INIT decompress(unsigned char *buf, long len,
4163 ++STATIC int INIT __decompress(unsigned char *buf, long len,
4164 + long (*fill)(void*, unsigned long),
4165 + long (*flush)(void*, unsigned long),
4166 +- unsigned char *outbuf,
4167 ++ unsigned char *outbuf, long olen,
4168 + long *pos,
4169 +- void(*error)(char *x))
4170 ++ void (*error)(char *x))
4171 + {
4172 + return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error);
4173 + }
4174 +diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
4175 +index d4c7891635ec..555c06bf20da 100644
4176 +--- a/lib/decompress_inflate.c
4177 ++++ b/lib/decompress_inflate.c
4178 +@@ -1,4 +1,5 @@
4179 + #ifdef STATIC
4180 ++#define PREBOOT
4181 + /* Pre-boot environment: included */
4182 +
4183 + /* prevent inclusion of _LINUX_KERNEL_H in pre-boot environment: lots
4184 +@@ -33,23 +34,23 @@ static long INIT nofill(void *buffer, unsigned long len)
4185 + }
4186 +
4187 + /* Included from initramfs et al code */
4188 +-STATIC int INIT gunzip(unsigned char *buf, long len,
4189 ++STATIC int INIT __gunzip(unsigned char *buf, long len,
4190 + long (*fill)(void*, unsigned long),
4191 + long (*flush)(void*, unsigned long),
4192 +- unsigned char *out_buf,
4193 ++ unsigned char *out_buf, long out_len,
4194 + long *pos,
4195 + void(*error)(char *x)) {
4196 + u8 *zbuf;
4197 + struct z_stream_s *strm;
4198 + int rc;
4199 +- size_t out_len;
4200 +
4201 + rc = -1;
4202 + if (flush) {
4203 + out_len = 0x8000; /* 32 K */
4204 + out_buf = malloc(out_len);
4205 + } else {
4206 +- out_len = ((size_t)~0) - (size_t)out_buf; /* no limit */
4207 ++ if (!out_len)
4208 ++ out_len = ((size_t)~0) - (size_t)out_buf; /* no limit */
4209 + }
4210 + if (!out_buf) {
4211 + error("Out of memory while allocating output buffer");
4212 +@@ -181,4 +182,24 @@ gunzip_nomem1:
4213 + return rc; /* returns Z_OK (0) if successful */
4214 + }
4215 +
4216 +-#define decompress gunzip
4217 ++#ifndef PREBOOT
4218 ++STATIC int INIT gunzip(unsigned char *buf, long len,
4219 ++ long (*fill)(void*, unsigned long),
4220 ++ long (*flush)(void*, unsigned long),
4221 ++ unsigned char *out_buf,
4222 ++ long *pos,
4223 ++ void (*error)(char *x))
4224 ++{
4225 ++ return __gunzip(buf, len, fill, flush, out_buf, 0, pos, error);
4226 ++}
4227 ++#else
4228 ++STATIC int INIT __decompress(unsigned char *buf, long len,
4229 ++ long (*fill)(void*, unsigned long),
4230 ++ long (*flush)(void*, unsigned long),
4231 ++ unsigned char *out_buf, long out_len,
4232 ++ long *pos,
4233 ++ void (*error)(char *x))
4234 ++{
4235 ++ return __gunzip(buf, len, fill, flush, out_buf, out_len, pos, error);
4236 ++}
4237 ++#endif
4238 +diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c
4239 +index 40f66ebe57b7..036fc882cd72 100644
4240 +--- a/lib/decompress_unlz4.c
4241 ++++ b/lib/decompress_unlz4.c
4242 +@@ -196,12 +196,12 @@ exit_0:
4243 + }
4244 +
4245 + #ifdef PREBOOT
4246 +-STATIC int INIT decompress(unsigned char *buf, long in_len,
4247 ++STATIC int INIT __decompress(unsigned char *buf, long in_len,
4248 + long (*fill)(void*, unsigned long),
4249 + long (*flush)(void*, unsigned long),
4250 +- unsigned char *output,
4251 ++ unsigned char *output, long out_len,
4252 + long *posp,
4253 +- void(*error)(char *x)
4254 ++ void (*error)(char *x)
4255 + )
4256 + {
4257 + return unlz4(buf, in_len - 4, fill, flush, output, posp, error);
4258 +diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
4259 +index 0be83af62b88..decb64629c14 100644
4260 +--- a/lib/decompress_unlzma.c
4261 ++++ b/lib/decompress_unlzma.c
4262 +@@ -667,13 +667,12 @@ exit_0:
4263 + }
4264 +
4265 + #ifdef PREBOOT
4266 +-STATIC int INIT decompress(unsigned char *buf, long in_len,
4267 ++STATIC int INIT __decompress(unsigned char *buf, long in_len,
4268 + long (*fill)(void*, unsigned long),
4269 + long (*flush)(void*, unsigned long),
4270 +- unsigned char *output,
4271 ++ unsigned char *output, long out_len,
4272 + long *posp,
4273 +- void(*error)(char *x)
4274 +- )
4275 ++ void (*error)(char *x))
4276 + {
4277 + return unlzma(buf, in_len - 4, fill, flush, output, posp, error);
4278 + }
4279 +diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c
4280 +index b94a31bdd87d..f4c158e3a022 100644
4281 +--- a/lib/decompress_unlzo.c
4282 ++++ b/lib/decompress_unlzo.c
4283 +@@ -31,6 +31,7 @@
4284 + */
4285 +
4286 + #ifdef STATIC
4287 ++#define PREBOOT
4288 + #include "lzo/lzo1x_decompress_safe.c"
4289 + #else
4290 + #include <linux/decompress/unlzo.h>
4291 +@@ -287,4 +288,14 @@ exit:
4292 + return ret;
4293 + }
4294 +
4295 +-#define decompress unlzo
4296 ++#ifdef PREBOOT
4297 ++STATIC int INIT __decompress(unsigned char *buf, long len,
4298 ++ long (*fill)(void*, unsigned long),
4299 ++ long (*flush)(void*, unsigned long),
4300 ++ unsigned char *out_buf, long olen,
4301 ++ long *pos,
4302 ++ void (*error)(char *x))
4303 ++{
4304 ++ return unlzo(buf, len, fill, flush, out_buf, pos, error);
4305 ++}
4306 ++#endif
4307 +diff --git a/lib/decompress_unxz.c b/lib/decompress_unxz.c
4308 +index b07a78340e9d..25d59a95bd66 100644
4309 +--- a/lib/decompress_unxz.c
4310 ++++ b/lib/decompress_unxz.c
4311 +@@ -394,4 +394,14 @@ error_alloc_state:
4312 + * This macro is used by architecture-specific files to decompress
4313 + * the kernel image.
4314 + */
4315 +-#define decompress unxz
4316 ++#ifdef XZ_PREBOOT
4317 ++STATIC int INIT __decompress(unsigned char *buf, long len,
4318 ++ long (*fill)(void*, unsigned long),
4319 ++ long (*flush)(void*, unsigned long),
4320 ++ unsigned char *out_buf, long olen,
4321 ++ long *pos,
4322 ++ void (*error)(char *x))
4323 ++{
4324 ++ return unxz(buf, len, fill, flush, out_buf, pos, error);
4325 ++}
4326 ++#endif
4327 +diff --git a/lib/rhashtable.c b/lib/rhashtable.c
4328 +index 8609378e6505..cf910e48f8f2 100644
4329 +--- a/lib/rhashtable.c
4330 ++++ b/lib/rhashtable.c
4331 +@@ -612,6 +612,8 @@ next:
4332 + iter->skip = 0;
4333 + }
4334 +
4335 ++ iter->p = NULL;
4336 ++
4337 + /* Ensure we see any new tables. */
4338 + smp_rmb();
4339 +
4340 +@@ -622,8 +624,6 @@ next:
4341 + return ERR_PTR(-EAGAIN);
4342 + }
4343 +
4344 +- iter->p = NULL;
4345 +-
4346 + out:
4347 +
4348 + return obj;
4349 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
4350 +index ebffa0e4a9c0..18490f3bd7f1 100644
4351 +--- a/mm/page_alloc.c
4352 ++++ b/mm/page_alloc.c
4353 +@@ -983,12 +983,15 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
4354 + set_page_owner(page, order, gfp_flags);
4355 +
4356 + /*
4357 +- * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was necessary to
4358 ++ * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
4359 + * allocate the page. The expectation is that the caller is taking
4360 + * steps that will free more memory. The caller should avoid the page
4361 + * being used for !PFMEMALLOC purposes.
4362 + */
4363 +- page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
4364 ++ if (alloc_flags & ALLOC_NO_WATERMARKS)
4365 ++ set_page_pfmemalloc(page);
4366 ++ else
4367 ++ clear_page_pfmemalloc(page);
4368 +
4369 + return 0;
4370 + }
4371 +diff --git a/mm/slab.c b/mm/slab.c
4372 +index 7eb38dd1cefa..3dd2d1ff9d5d 100644
4373 +--- a/mm/slab.c
4374 ++++ b/mm/slab.c
4375 +@@ -1602,7 +1602,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
4376 + }
4377 +
4378 + /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
4379 +- if (unlikely(page->pfmemalloc))
4380 ++ if (page_is_pfmemalloc(page))
4381 + pfmemalloc_active = true;
4382 +
4383 + nr_pages = (1 << cachep->gfporder);
4384 +@@ -1613,7 +1613,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
4385 + add_zone_page_state(page_zone(page),
4386 + NR_SLAB_UNRECLAIMABLE, nr_pages);
4387 + __SetPageSlab(page);
4388 +- if (page->pfmemalloc)
4389 ++ if (page_is_pfmemalloc(page))
4390 + SetPageSlabPfmemalloc(page);
4391 +
4392 + if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
4393 +diff --git a/mm/slub.c b/mm/slub.c
4394 +index 54c0876b43d5..08342c523a85 100644
4395 +--- a/mm/slub.c
4396 ++++ b/mm/slub.c
4397 +@@ -1427,7 +1427,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
4398 + inc_slabs_node(s, page_to_nid(page), page->objects);
4399 + page->slab_cache = s;
4400 + __SetPageSlab(page);
4401 +- if (page->pfmemalloc)
4402 ++ if (page_is_pfmemalloc(page))
4403 + SetPageSlabPfmemalloc(page);
4404 +
4405 + start = page_address(page);
4406 +diff --git a/mm/vmscan.c b/mm/vmscan.c
4407 +index 0d024fc8aa8e..1a17bd7c0ce5 100644
4408 +--- a/mm/vmscan.c
4409 ++++ b/mm/vmscan.c
4410 +@@ -1153,7 +1153,7 @@ cull_mlocked:
4411 + if (PageSwapCache(page))
4412 + try_to_free_swap(page);
4413 + unlock_page(page);
4414 +- putback_lru_page(page);
4415 ++ list_add(&page->lru, &ret_pages);
4416 + continue;
4417 +
4418 + activate_locked:
4419 +diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
4420 +index e97572b5d2cc..0ff6e1bbca91 100644
4421 +--- a/net/bridge/br_forward.c
4422 ++++ b/net/bridge/br_forward.c
4423 +@@ -42,6 +42,7 @@ int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb)
4424 + } else {
4425 + skb_push(skb, ETH_HLEN);
4426 + br_drop_fake_rtable(skb);
4427 ++ skb_sender_cpu_clear(skb);
4428 + dev_queue_xmit(skb);
4429 + }
4430 +
4431 +diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
4432 +index e29ad70b3000..d1f910c0d586 100644
4433 +--- a/net/bridge/br_mdb.c
4434 ++++ b/net/bridge/br_mdb.c
4435 +@@ -348,7 +348,6 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
4436 + return -ENOMEM;
4437 + rcu_assign_pointer(*pp, p);
4438 +
4439 +- br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
4440 + return 0;
4441 + }
4442 +
4443 +@@ -371,6 +370,7 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
4444 + if (!p || p->br != br || p->state == BR_STATE_DISABLED)
4445 + return -EINVAL;
4446 +
4447 ++ memset(&ip, 0, sizeof(ip));
4448 + ip.proto = entry->addr.proto;
4449 + if (ip.proto == htons(ETH_P_IP))
4450 + ip.u.ip4 = entry->addr.u.ip4;
4451 +@@ -417,6 +417,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
4452 + if (!netif_running(br->dev) || br->multicast_disabled)
4453 + return -EINVAL;
4454 +
4455 ++ memset(&ip, 0, sizeof(ip));
4456 + ip.proto = entry->addr.proto;
4457 + if (ip.proto == htons(ETH_P_IP)) {
4458 + if (timer_pending(&br->ip4_other_query.timer))
4459 +diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
4460 +index 4b5c236998ff..a7559ef312bd 100644
4461 +--- a/net/bridge/br_netlink.c
4462 ++++ b/net/bridge/br_netlink.c
4463 +@@ -112,6 +112,8 @@ static inline size_t br_port_info_size(void)
4464 + + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */
4465 + + nla_total_size(1) /* IFLA_BRPORT_LEARNING */
4466 + + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */
4467 ++ + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */
4468 ++ + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */
4469 + + 0;
4470 + }
4471 +
4472 +@@ -504,6 +506,8 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
4473 + [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
4474 + [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
4475 + [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
4476 ++ [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 },
4477 ++ [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
4478 + };
4479 +
4480 + /* Change the state of the port and notify spanning tree */
4481 +@@ -711,9 +715,17 @@ static int br_port_slave_changelink(struct net_device *brdev,
4482 + struct nlattr *tb[],
4483 + struct nlattr *data[])
4484 + {
4485 ++ struct net_bridge *br = netdev_priv(brdev);
4486 ++ int ret;
4487 ++
4488 + if (!data)
4489 + return 0;
4490 +- return br_setport(br_port_get_rtnl(dev), data);
4491 ++
4492 ++ spin_lock_bh(&br->lock);
4493 ++ ret = br_setport(br_port_get_rtnl(dev), data);
4494 ++ spin_unlock_bh(&br->lock);
4495 ++
4496 ++ return ret;
4497 + }
4498 +
4499 + static int br_port_fill_slave_info(struct sk_buff *skb,
4500 +diff --git a/net/core/datagram.c b/net/core/datagram.c
4501 +index b80fb91bb3f7..617088aee21d 100644
4502 +--- a/net/core/datagram.c
4503 ++++ b/net/core/datagram.c
4504 +@@ -131,6 +131,35 @@ out_noerr:
4505 + goto out;
4506 + }
4507 +
4508 ++static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
4509 ++{
4510 ++ struct sk_buff *nskb;
4511 ++
4512 ++ if (skb->peeked)
4513 ++ return skb;
4514 ++
4515 ++ /* We have to unshare an skb before modifying it. */
4516 ++ if (!skb_shared(skb))
4517 ++ goto done;
4518 ++
4519 ++ nskb = skb_clone(skb, GFP_ATOMIC);
4520 ++ if (!nskb)
4521 ++ return ERR_PTR(-ENOMEM);
4522 ++
4523 ++ skb->prev->next = nskb;
4524 ++ skb->next->prev = nskb;
4525 ++ nskb->prev = skb->prev;
4526 ++ nskb->next = skb->next;
4527 ++
4528 ++ consume_skb(skb);
4529 ++ skb = nskb;
4530 ++
4531 ++done:
4532 ++ skb->peeked = 1;
4533 ++
4534 ++ return skb;
4535 ++}
4536 ++
4537 + /**
4538 + * __skb_recv_datagram - Receive a datagram skbuff
4539 + * @sk: socket
4540 +@@ -165,7 +194,9 @@ out_noerr:
4541 + struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
4542 + int *peeked, int *off, int *err)
4543 + {
4544 ++ struct sk_buff_head *queue = &sk->sk_receive_queue;
4545 + struct sk_buff *skb, *last;
4546 ++ unsigned long cpu_flags;
4547 + long timeo;
4548 + /*
4549 + * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
4550 +@@ -184,8 +215,6 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
4551 + * Look at current nfs client by the way...
4552 + * However, this function was correct in any case. 8)
4553 + */
4554 +- unsigned long cpu_flags;
4555 +- struct sk_buff_head *queue = &sk->sk_receive_queue;
4556 + int _off = *off;
4557 +
4558 + last = (struct sk_buff *)queue;
4559 +@@ -199,7 +228,12 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
4560 + _off -= skb->len;
4561 + continue;
4562 + }
4563 +- skb->peeked = 1;
4564 ++
4565 ++ skb = skb_set_peeked(skb);
4566 ++ error = PTR_ERR(skb);
4567 ++ if (IS_ERR(skb))
4568 ++ goto unlock_err;
4569 ++
4570 + atomic_inc(&skb->users);
4571 + } else
4572 + __skb_unlink(skb, queue);
4573 +@@ -223,6 +257,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
4574 +
4575 + return NULL;
4576 +
4577 ++unlock_err:
4578 ++ spin_unlock_irqrestore(&queue->lock, cpu_flags);
4579 + no_packet:
4580 + *err = error;
4581 + return NULL;
4582 +@@ -622,7 +658,8 @@ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
4583 + !skb->csum_complete_sw)
4584 + netdev_rx_csum_fault(skb->dev);
4585 + }
4586 +- skb->csum_valid = !sum;
4587 ++ if (!skb_shared(skb))
4588 ++ skb->csum_valid = !sum;
4589 + return sum;
4590 + }
4591 + EXPORT_SYMBOL(__skb_checksum_complete_head);
4592 +@@ -642,11 +679,13 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb)
4593 + netdev_rx_csum_fault(skb->dev);
4594 + }
4595 +
4596 +- /* Save full packet checksum */
4597 +- skb->csum = csum;
4598 +- skb->ip_summed = CHECKSUM_COMPLETE;
4599 +- skb->csum_complete_sw = 1;
4600 +- skb->csum_valid = !sum;
4601 ++ if (!skb_shared(skb)) {
4602 ++ /* Save full packet checksum */
4603 ++ skb->csum = csum;
4604 ++ skb->ip_summed = CHECKSUM_COMPLETE;
4605 ++ skb->csum_complete_sw = 1;
4606 ++ skb->csum_valid = !sum;
4607 ++ }
4608 +
4609 + return sum;
4610 + }
4611 +diff --git a/net/core/dev.c b/net/core/dev.c
4612 +index aa82f9ab6a36..a42b232805a5 100644
4613 +--- a/net/core/dev.c
4614 ++++ b/net/core/dev.c
4615 +@@ -672,10 +672,6 @@ int dev_get_iflink(const struct net_device *dev)
4616 + if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
4617 + return dev->netdev_ops->ndo_get_iflink(dev);
4618 +
4619 +- /* If dev->rtnl_link_ops is set, it's a virtual interface. */
4620 +- if (dev->rtnl_link_ops)
4621 +- return 0;
4622 +-
4623 + return dev->ifindex;
4624 + }
4625 + EXPORT_SYMBOL(dev_get_iflink);
4626 +@@ -3341,6 +3337,8 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
4627 + local_irq_save(flags);
4628 +
4629 + rps_lock(sd);
4630 ++ if (!netif_running(skb->dev))
4631 ++ goto drop;
4632 + qlen = skb_queue_len(&sd->input_pkt_queue);
4633 + if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
4634 + if (qlen) {
4635 +@@ -3362,6 +3360,7 @@ enqueue:
4636 + goto enqueue;
4637 + }
4638 +
4639 ++drop:
4640 + sd->dropped++;
4641 + rps_unlock(sd);
4642 +
4643 +@@ -3667,8 +3666,6 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
4644 +
4645 + pt_prev = NULL;
4646 +
4647 +- rcu_read_lock();
4648 +-
4649 + another_round:
4650 + skb->skb_iif = skb->dev->ifindex;
4651 +
4652 +@@ -3678,7 +3675,7 @@ another_round:
4653 + skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
4654 + skb = skb_vlan_untag(skb);
4655 + if (unlikely(!skb))
4656 +- goto unlock;
4657 ++ goto out;
4658 + }
4659 +
4660 + #ifdef CONFIG_NET_CLS_ACT
4661 +@@ -3708,7 +3705,7 @@ skip_taps:
4662 + if (static_key_false(&ingress_needed)) {
4663 + skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
4664 + if (!skb)
4665 +- goto unlock;
4666 ++ goto out;
4667 + }
4668 +
4669 + skb->tc_verd = 0;
4670 +@@ -3725,7 +3722,7 @@ ncls:
4671 + if (vlan_do_receive(&skb))
4672 + goto another_round;
4673 + else if (unlikely(!skb))
4674 +- goto unlock;
4675 ++ goto out;
4676 + }
4677 +
4678 + rx_handler = rcu_dereference(skb->dev->rx_handler);
4679 +@@ -3737,7 +3734,7 @@ ncls:
4680 + switch (rx_handler(&skb)) {
4681 + case RX_HANDLER_CONSUMED:
4682 + ret = NET_RX_SUCCESS;
4683 +- goto unlock;
4684 ++ goto out;
4685 + case RX_HANDLER_ANOTHER:
4686 + goto another_round;
4687 + case RX_HANDLER_EXACT:
4688 +@@ -3791,8 +3788,7 @@ drop:
4689 + ret = NET_RX_DROP;
4690 + }
4691 +
4692 +-unlock:
4693 +- rcu_read_unlock();
4694 ++out:
4695 + return ret;
4696 + }
4697 +
4698 +@@ -3823,29 +3819,30 @@ static int __netif_receive_skb(struct sk_buff *skb)
4699 +
4700 + static int netif_receive_skb_internal(struct sk_buff *skb)
4701 + {
4702 ++ int ret;
4703 ++
4704 + net_timestamp_check(netdev_tstamp_prequeue, skb);
4705 +
4706 + if (skb_defer_rx_timestamp(skb))
4707 + return NET_RX_SUCCESS;
4708 +
4709 ++ rcu_read_lock();
4710 ++
4711 + #ifdef CONFIG_RPS
4712 + if (static_key_false(&rps_needed)) {
4713 + struct rps_dev_flow voidflow, *rflow = &voidflow;
4714 +- int cpu, ret;
4715 +-
4716 +- rcu_read_lock();
4717 +-
4718 +- cpu = get_rps_cpu(skb->dev, skb, &rflow);
4719 ++ int cpu = get_rps_cpu(skb->dev, skb, &rflow);
4720 +
4721 + if (cpu >= 0) {
4722 + ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4723 + rcu_read_unlock();
4724 + return ret;
4725 + }
4726 +- rcu_read_unlock();
4727 + }
4728 + #endif
4729 +- return __netif_receive_skb(skb);
4730 ++ ret = __netif_receive_skb(skb);
4731 ++ rcu_read_unlock();
4732 ++ return ret;
4733 + }
4734 +
4735 + /**
4736 +@@ -4390,8 +4387,10 @@ static int process_backlog(struct napi_struct *napi, int quota)
4737 + struct sk_buff *skb;
4738 +
4739 + while ((skb = __skb_dequeue(&sd->process_queue))) {
4740 ++ rcu_read_lock();
4741 + local_irq_enable();
4742 + __netif_receive_skb(skb);
4743 ++ rcu_read_unlock();
4744 + local_irq_disable();
4745 + input_queue_head_incr(sd);
4746 + if (++work >= quota) {
4747 +@@ -6027,6 +6026,7 @@ static void rollback_registered_many(struct list_head *head)
4748 + unlist_netdevice(dev);
4749 +
4750 + dev->reg_state = NETREG_UNREGISTERING;
4751 ++ on_each_cpu(flush_backlog, dev, 1);
4752 + }
4753 +
4754 + synchronize_net();
4755 +@@ -6297,7 +6297,8 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
4756 + struct netdev_queue *tx;
4757 + size_t sz = count * sizeof(*tx);
4758 +
4759 +- BUG_ON(count < 1 || count > 0xffff);
4760 ++ if (count < 1 || count > 0xffff)
4761 ++ return -EINVAL;
4762 +
4763 + tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
4764 + if (!tx) {
4765 +@@ -6650,8 +6651,6 @@ void netdev_run_todo(void)
4766 +
4767 + dev->reg_state = NETREG_UNREGISTERED;
4768 +
4769 +- on_each_cpu(flush_backlog, dev, 1);
4770 +-
4771 + netdev_wait_allrefs(dev);
4772 +
4773 + /* paranoia */
4774 +diff --git a/net/core/pktgen.c b/net/core/pktgen.c
4775 +index 508155b283dd..043ea1867d0f 100644
4776 +--- a/net/core/pktgen.c
4777 ++++ b/net/core/pktgen.c
4778 +@@ -3490,8 +3490,10 @@ static int pktgen_thread_worker(void *arg)
4779 + pktgen_rem_thread(t);
4780 +
4781 + /* Wait for kthread_stop */
4782 +- while (!kthread_should_stop()) {
4783 ++ for (;;) {
4784 + set_current_state(TASK_INTERRUPTIBLE);
4785 ++ if (kthread_should_stop())
4786 ++ break;
4787 + schedule();
4788 + }
4789 + __set_current_state(TASK_RUNNING);
4790 +diff --git a/net/core/request_sock.c b/net/core/request_sock.c
4791 +index 87b22c0bc08c..b42f0e26f89e 100644
4792 +--- a/net/core/request_sock.c
4793 ++++ b/net/core/request_sock.c
4794 +@@ -103,10 +103,16 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
4795 + spin_lock_bh(&queue->syn_wait_lock);
4796 + while ((req = lopt->syn_table[i]) != NULL) {
4797 + lopt->syn_table[i] = req->dl_next;
4798 ++ /* Because of following del_timer_sync(),
4799 ++ * we must release the spinlock here
4800 ++ * or risk a dead lock.
4801 ++ */
4802 ++ spin_unlock_bh(&queue->syn_wait_lock);
4803 + atomic_inc(&lopt->qlen_dec);
4804 +- if (del_timer(&req->rsk_timer))
4805 ++ if (del_timer_sync(&req->rsk_timer))
4806 + reqsk_put(req);
4807 + reqsk_put(req);
4808 ++ spin_lock_bh(&queue->syn_wait_lock);
4809 + }
4810 + spin_unlock_bh(&queue->syn_wait_lock);
4811 + }
4812 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
4813 +index 8de36824018d..fe95cb704aaa 100644
4814 +--- a/net/core/rtnetlink.c
4815 ++++ b/net/core/rtnetlink.c
4816 +@@ -1287,10 +1287,6 @@ static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
4817 + [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED },
4818 + };
4819 +
4820 +-static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = {
4821 +- [IFLA_VF_INFO] = { .type = NLA_NESTED },
4822 +-};
4823 +-
4824 + static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
4825 + [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
4826 + [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
4827 +@@ -1437,96 +1433,98 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
4828 + return 0;
4829 + }
4830 +
4831 +-static int do_setvfinfo(struct net_device *dev, struct nlattr *attr)
4832 ++static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
4833 + {
4834 +- int rem, err = -EINVAL;
4835 +- struct nlattr *vf;
4836 + const struct net_device_ops *ops = dev->netdev_ops;
4837 ++ int err = -EINVAL;
4838 +
4839 +- nla_for_each_nested(vf, attr, rem) {
4840 +- switch (nla_type(vf)) {
4841 +- case IFLA_VF_MAC: {
4842 +- struct ifla_vf_mac *ivm;
4843 +- ivm = nla_data(vf);
4844 +- err = -EOPNOTSUPP;
4845 +- if (ops->ndo_set_vf_mac)
4846 +- err = ops->ndo_set_vf_mac(dev, ivm->vf,
4847 +- ivm->mac);
4848 +- break;
4849 +- }
4850 +- case IFLA_VF_VLAN: {
4851 +- struct ifla_vf_vlan *ivv;
4852 +- ivv = nla_data(vf);
4853 +- err = -EOPNOTSUPP;
4854 +- if (ops->ndo_set_vf_vlan)
4855 +- err = ops->ndo_set_vf_vlan(dev, ivv->vf,
4856 +- ivv->vlan,
4857 +- ivv->qos);
4858 +- break;
4859 +- }
4860 +- case IFLA_VF_TX_RATE: {
4861 +- struct ifla_vf_tx_rate *ivt;
4862 +- struct ifla_vf_info ivf;
4863 +- ivt = nla_data(vf);
4864 +- err = -EOPNOTSUPP;
4865 +- if (ops->ndo_get_vf_config)
4866 +- err = ops->ndo_get_vf_config(dev, ivt->vf,
4867 +- &ivf);
4868 +- if (err)
4869 +- break;
4870 +- err = -EOPNOTSUPP;
4871 +- if (ops->ndo_set_vf_rate)
4872 +- err = ops->ndo_set_vf_rate(dev, ivt->vf,
4873 +- ivf.min_tx_rate,
4874 +- ivt->rate);
4875 +- break;
4876 +- }
4877 +- case IFLA_VF_RATE: {
4878 +- struct ifla_vf_rate *ivt;
4879 +- ivt = nla_data(vf);
4880 +- err = -EOPNOTSUPP;
4881 +- if (ops->ndo_set_vf_rate)
4882 +- err = ops->ndo_set_vf_rate(dev, ivt->vf,
4883 +- ivt->min_tx_rate,
4884 +- ivt->max_tx_rate);
4885 +- break;
4886 +- }
4887 +- case IFLA_VF_SPOOFCHK: {
4888 +- struct ifla_vf_spoofchk *ivs;
4889 +- ivs = nla_data(vf);
4890 +- err = -EOPNOTSUPP;
4891 +- if (ops->ndo_set_vf_spoofchk)
4892 +- err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
4893 +- ivs->setting);
4894 +- break;
4895 +- }
4896 +- case IFLA_VF_LINK_STATE: {
4897 +- struct ifla_vf_link_state *ivl;
4898 +- ivl = nla_data(vf);
4899 +- err = -EOPNOTSUPP;
4900 +- if (ops->ndo_set_vf_link_state)
4901 +- err = ops->ndo_set_vf_link_state(dev, ivl->vf,
4902 +- ivl->link_state);
4903 +- break;
4904 +- }
4905 +- case IFLA_VF_RSS_QUERY_EN: {
4906 +- struct ifla_vf_rss_query_en *ivrssq_en;
4907 ++ if (tb[IFLA_VF_MAC]) {
4908 ++ struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
4909 +
4910 +- ivrssq_en = nla_data(vf);
4911 +- err = -EOPNOTSUPP;
4912 +- if (ops->ndo_set_vf_rss_query_en)
4913 +- err = ops->ndo_set_vf_rss_query_en(dev,
4914 +- ivrssq_en->vf,
4915 +- ivrssq_en->setting);
4916 +- break;
4917 +- }
4918 +- default:
4919 +- err = -EINVAL;
4920 +- break;
4921 +- }
4922 +- if (err)
4923 +- break;
4924 ++ err = -EOPNOTSUPP;
4925 ++ if (ops->ndo_set_vf_mac)
4926 ++ err = ops->ndo_set_vf_mac(dev, ivm->vf,
4927 ++ ivm->mac);
4928 ++ if (err < 0)
4929 ++ return err;
4930 ++ }
4931 ++
4932 ++ if (tb[IFLA_VF_VLAN]) {
4933 ++ struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
4934 ++
4935 ++ err = -EOPNOTSUPP;
4936 ++ if (ops->ndo_set_vf_vlan)
4937 ++ err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
4938 ++ ivv->qos);
4939 ++ if (err < 0)
4940 ++ return err;
4941 ++ }
4942 ++
4943 ++ if (tb[IFLA_VF_TX_RATE]) {
4944 ++ struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
4945 ++ struct ifla_vf_info ivf;
4946 ++
4947 ++ err = -EOPNOTSUPP;
4948 ++ if (ops->ndo_get_vf_config)
4949 ++ err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
4950 ++ if (err < 0)
4951 ++ return err;
4952 ++
4953 ++ err = -EOPNOTSUPP;
4954 ++ if (ops->ndo_set_vf_rate)
4955 ++ err = ops->ndo_set_vf_rate(dev, ivt->vf,
4956 ++ ivf.min_tx_rate,
4957 ++ ivt->rate);
4958 ++ if (err < 0)
4959 ++ return err;
4960 ++ }
4961 ++
4962 ++ if (tb[IFLA_VF_RATE]) {
4963 ++ struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
4964 ++
4965 ++ err = -EOPNOTSUPP;
4966 ++ if (ops->ndo_set_vf_rate)
4967 ++ err = ops->ndo_set_vf_rate(dev, ivt->vf,
4968 ++ ivt->min_tx_rate,
4969 ++ ivt->max_tx_rate);
4970 ++ if (err < 0)
4971 ++ return err;
4972 + }
4973 ++
4974 ++ if (tb[IFLA_VF_SPOOFCHK]) {
4975 ++ struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
4976 ++
4977 ++ err = -EOPNOTSUPP;
4978 ++ if (ops->ndo_set_vf_spoofchk)
4979 ++ err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
4980 ++ ivs->setting);
4981 ++ if (err < 0)
4982 ++ return err;
4983 ++ }
4984 ++
4985 ++ if (tb[IFLA_VF_LINK_STATE]) {
4986 ++ struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
4987 ++
4988 ++ err = -EOPNOTSUPP;
4989 ++ if (ops->ndo_set_vf_link_state)
4990 ++ err = ops->ndo_set_vf_link_state(dev, ivl->vf,
4991 ++ ivl->link_state);
4992 ++ if (err < 0)
4993 ++ return err;
4994 ++ }
4995 ++
4996 ++ if (tb[IFLA_VF_RSS_QUERY_EN]) {
4997 ++ struct ifla_vf_rss_query_en *ivrssq_en;
4998 ++
4999 ++ err = -EOPNOTSUPP;
5000 ++ ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
5001 ++ if (ops->ndo_set_vf_rss_query_en)
5002 ++ err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
5003 ++ ivrssq_en->setting);
5004 ++ if (err < 0)
5005 ++ return err;
5006 ++ }
5007 ++
5008 + return err;
5009 + }
5010 +
5011 +@@ -1722,14 +1720,21 @@ static int do_setlink(const struct sk_buff *skb,
5012 + }
5013 +
5014 + if (tb[IFLA_VFINFO_LIST]) {
5015 ++ struct nlattr *vfinfo[IFLA_VF_MAX + 1];
5016 + struct nlattr *attr;
5017 + int rem;
5018 ++
5019 + nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
5020 +- if (nla_type(attr) != IFLA_VF_INFO) {
5021 ++ if (nla_type(attr) != IFLA_VF_INFO ||
5022 ++ nla_len(attr) < NLA_HDRLEN) {
5023 + err = -EINVAL;
5024 + goto errout;
5025 + }
5026 +- err = do_setvfinfo(dev, attr);
5027 ++ err = nla_parse_nested(vfinfo, IFLA_VF_MAX, attr,
5028 ++ ifla_vf_policy);
5029 ++ if (err < 0)
5030 ++ goto errout;
5031 ++ err = do_setvfinfo(dev, vfinfo);
5032 + if (err < 0)
5033 + goto errout;
5034 + status |= DO_SETLINK_NOTIFY;
5035 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
5036 +index 41ec02242ea7..a2e4e47b2839 100644
5037 +--- a/net/core/skbuff.c
5038 ++++ b/net/core/skbuff.c
5039 +@@ -340,7 +340,7 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
5040 +
5041 + if (skb && frag_size) {
5042 + skb->head_frag = 1;
5043 +- if (virt_to_head_page(data)->pfmemalloc)
5044 ++ if (page_is_pfmemalloc(virt_to_head_page(data)))
5045 + skb->pfmemalloc = 1;
5046 + }
5047 + return skb;
5048 +diff --git a/net/dsa/slave.c b/net/dsa/slave.c
5049 +index 827cda560a55..57978c5b2c91 100644
5050 +--- a/net/dsa/slave.c
5051 ++++ b/net/dsa/slave.c
5052 +@@ -732,7 +732,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p,
5053 + return -ENODEV;
5054 +
5055 + /* Use already configured phy mode */
5056 +- p->phy_interface = p->phy->interface;
5057 ++ if (p->phy_interface == PHY_INTERFACE_MODE_NA)
5058 ++ p->phy_interface = p->phy->interface;
5059 + phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
5060 + p->phy_interface);
5061 +
5062 +diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
5063 +index 90c0e8386116..574fad9cca05 100644
5064 +--- a/net/ipv4/datagram.c
5065 ++++ b/net/ipv4/datagram.c
5066 +@@ -20,7 +20,7 @@
5067 + #include <net/route.h>
5068 + #include <net/tcp_states.h>
5069 +
5070 +-int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
5071 ++int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
5072 + {
5073 + struct inet_sock *inet = inet_sk(sk);
5074 + struct sockaddr_in *usin = (struct sockaddr_in *) uaddr;
5075 +@@ -39,8 +39,6 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
5076 +
5077 + sk_dst_reset(sk);
5078 +
5079 +- lock_sock(sk);
5080 +-
5081 + oif = sk->sk_bound_dev_if;
5082 + saddr = inet->inet_saddr;
5083 + if (ipv4_is_multicast(usin->sin_addr.s_addr)) {
5084 +@@ -82,9 +80,19 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
5085 + sk_dst_set(sk, &rt->dst);
5086 + err = 0;
5087 + out:
5088 +- release_sock(sk);
5089 + return err;
5090 + }
5091 ++EXPORT_SYMBOL(__ip4_datagram_connect);
5092 ++
5093 ++int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
5094 ++{
5095 ++ int res;
5096 ++
5097 ++ lock_sock(sk);
5098 ++ res = __ip4_datagram_connect(sk, uaddr, addr_len);
5099 ++ release_sock(sk);
5100 ++ return res;
5101 ++}
5102 + EXPORT_SYMBOL(ip4_datagram_connect);
5103 +
5104 + /* Because UDP xmit path can manipulate sk_dst_cache without holding
5105 +diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
5106 +index 09b62e17dd8c..0ca933db1b41 100644
5107 +--- a/net/ipv4/fib_trie.c
5108 ++++ b/net/ipv4/fib_trie.c
5109 +@@ -1780,8 +1780,6 @@ void fib_table_flush_external(struct fib_table *tb)
5110 + if (hlist_empty(&n->leaf)) {
5111 + put_child_root(pn, n->key, NULL);
5112 + node_free(n);
5113 +- } else {
5114 +- leaf_pull_suffix(pn, n);
5115 + }
5116 + }
5117 + }
5118 +@@ -1852,8 +1850,6 @@ int fib_table_flush(struct fib_table *tb)
5119 + if (hlist_empty(&n->leaf)) {
5120 + put_child_root(pn, n->key, NULL);
5121 + node_free(n);
5122 +- } else {
5123 +- leaf_pull_suffix(pn, n);
5124 + }
5125 + }
5126 +
5127 +@@ -2457,7 +2453,7 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
5128 + key = l->key + 1;
5129 + iter->pos++;
5130 +
5131 +- if (pos-- <= 0)
5132 ++ if (--pos <= 0)
5133 + break;
5134 +
5135 + l = NULL;
5136 +diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
5137 +index 8976ca423a07..b27fc401c6a9 100644
5138 +--- a/net/ipv4/inet_connection_sock.c
5139 ++++ b/net/ipv4/inet_connection_sock.c
5140 +@@ -584,7 +584,7 @@ static bool reqsk_queue_unlink(struct request_sock_queue *queue,
5141 + }
5142 +
5143 + spin_unlock(&queue->syn_wait_lock);
5144 +- if (del_timer(&req->rsk_timer))
5145 ++ if (del_timer_sync(&req->rsk_timer))
5146 + reqsk_put(req);
5147 + return found;
5148 + }
5149 +diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
5150 +index cc1da6d9cb35..cae22a1a8777 100644
5151 +--- a/net/ipv4/ip_fragment.c
5152 ++++ b/net/ipv4/ip_fragment.c
5153 +@@ -342,7 +342,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
5154 + ihl = ip_hdrlen(skb);
5155 +
5156 + /* Determine the position of this fragment. */
5157 +- end = offset + skb->len - ihl;
5158 ++ end = offset + skb->len - skb_network_offset(skb) - ihl;
5159 + err = -EINVAL;
5160 +
5161 + /* Is this the final fragment? */
5162 +@@ -372,7 +372,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
5163 + goto err;
5164 +
5165 + err = -ENOMEM;
5166 +- if (!pskb_pull(skb, ihl))
5167 ++ if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
5168 + goto err;
5169 +
5170 + err = pskb_trim_rcsum(skb, end - offset);
5171 +@@ -613,6 +613,9 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
5172 + iph->frag_off = qp->q.max_size ? htons(IP_DF) : 0;
5173 + iph->tot_len = htons(len);
5174 + iph->tos |= ecn;
5175 ++
5176 ++ ip_send_check(iph);
5177 ++
5178 + IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
5179 + qp->q.fragments = NULL;
5180 + qp->q.fragments_tail = NULL;
5181 +diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
5182 +index 4c2c3ba4ba65..626d9e56a6bd 100644
5183 +--- a/net/ipv4/ip_tunnel.c
5184 ++++ b/net/ipv4/ip_tunnel.c
5185 +@@ -586,7 +586,8 @@ int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
5186 + EXPORT_SYMBOL(ip_tunnel_encap);
5187 +
5188 + static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
5189 +- struct rtable *rt, __be16 df)
5190 ++ struct rtable *rt, __be16 df,
5191 ++ const struct iphdr *inner_iph)
5192 + {
5193 + struct ip_tunnel *tunnel = netdev_priv(dev);
5194 + int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len;
5195 +@@ -603,7 +604,8 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
5196 +
5197 + if (skb->protocol == htons(ETH_P_IP)) {
5198 + if (!skb_is_gso(skb) &&
5199 +- (df & htons(IP_DF)) && mtu < pkt_size) {
5200 ++ (inner_iph->frag_off & htons(IP_DF)) &&
5201 ++ mtu < pkt_size) {
5202 + memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
5203 + icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
5204 + return -E2BIG;
5205 +@@ -737,7 +739,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
5206 + goto tx_error;
5207 + }
5208 +
5209 +- if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off)) {
5210 ++ if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph)) {
5211 + ip_rt_put(rt);
5212 + goto tx_error;
5213 + }
5214 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
5215 +index fc1c658ec6c1..441ca6f38981 100644
5216 +--- a/net/ipv4/tcp_ipv4.c
5217 ++++ b/net/ipv4/tcp_ipv4.c
5218 +@@ -1348,7 +1348,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
5219 + req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
5220 + if (req) {
5221 + nsk = tcp_check_req(sk, skb, req, false);
5222 +- if (!nsk)
5223 ++ if (!nsk || nsk == sk)
5224 + reqsk_put(req);
5225 + return nsk;
5226 + }
5227 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
5228 +index 83aa604f9273..1b8c5ba7d5f7 100644
5229 +--- a/net/ipv4/udp.c
5230 ++++ b/net/ipv4/udp.c
5231 +@@ -1995,12 +1995,19 @@ void udp_v4_early_demux(struct sk_buff *skb)
5232 +
5233 + skb->sk = sk;
5234 + skb->destructor = sock_efree;
5235 +- dst = sk->sk_rx_dst;
5236 ++ dst = READ_ONCE(sk->sk_rx_dst);
5237 +
5238 + if (dst)
5239 + dst = dst_check(dst, 0);
5240 +- if (dst)
5241 +- skb_dst_set_noref(skb, dst);
5242 ++ if (dst) {
5243 ++ /* DST_NOCACHE can not be used without taking a reference */
5244 ++ if (dst->flags & DST_NOCACHE) {
5245 ++ if (likely(atomic_inc_not_zero(&dst->__refcnt)))
5246 ++ skb_dst_set(skb, dst);
5247 ++ } else {
5248 ++ skb_dst_set_noref(skb, dst);
5249 ++ }
5250 ++ }
5251 + }
5252 +
5253 + int udp_rcv(struct sk_buff *skb)
5254 +diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
5255 +index 62d908e64eeb..b10a88986a98 100644
5256 +--- a/net/ipv6/datagram.c
5257 ++++ b/net/ipv6/datagram.c
5258 +@@ -40,7 +40,7 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a)
5259 + return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0);
5260 + }
5261 +
5262 +-int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
5263 ++static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
5264 + {
5265 + struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
5266 + struct inet_sock *inet = inet_sk(sk);
5267 +@@ -56,7 +56,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
5268 + if (usin->sin6_family == AF_INET) {
5269 + if (__ipv6_only_sock(sk))
5270 + return -EAFNOSUPPORT;
5271 +- err = ip4_datagram_connect(sk, uaddr, addr_len);
5272 ++ err = __ip4_datagram_connect(sk, uaddr, addr_len);
5273 + goto ipv4_connected;
5274 + }
5275 +
5276 +@@ -98,9 +98,9 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
5277 + sin.sin_addr.s_addr = daddr->s6_addr32[3];
5278 + sin.sin_port = usin->sin6_port;
5279 +
5280 +- err = ip4_datagram_connect(sk,
5281 +- (struct sockaddr *) &sin,
5282 +- sizeof(sin));
5283 ++ err = __ip4_datagram_connect(sk,
5284 ++ (struct sockaddr *) &sin,
5285 ++ sizeof(sin));
5286 +
5287 + ipv4_connected:
5288 + if (err)
5289 +@@ -204,6 +204,16 @@ out:
5290 + fl6_sock_release(flowlabel);
5291 + return err;
5292 + }
5293 ++
5294 ++int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
5295 ++{
5296 ++ int res;
5297 ++
5298 ++ lock_sock(sk);
5299 ++ res = __ip6_datagram_connect(sk, uaddr, addr_len);
5300 ++ release_sock(sk);
5301 ++ return res;
5302 ++}
5303 + EXPORT_SYMBOL_GPL(ip6_datagram_connect);
5304 +
5305 + int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *uaddr,
5306 +diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
5307 +index f2e464eba5ef..57990c929cd8 100644
5308 +--- a/net/ipv6/ip6_input.c
5309 ++++ b/net/ipv6/ip6_input.c
5310 +@@ -331,10 +331,10 @@ int ip6_mc_input(struct sk_buff *skb)
5311 + if (offset < 0)
5312 + goto out;
5313 +
5314 +- if (!ipv6_is_mld(skb, nexthdr, offset))
5315 +- goto out;
5316 ++ if (ipv6_is_mld(skb, nexthdr, offset))
5317 ++ deliver = true;
5318 +
5319 +- deliver = true;
5320 ++ goto out;
5321 + }
5322 + /* unknown RA - process it normally */
5323 + }
5324 +diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
5325 +index e893cd18612f..08b62047c67f 100644
5326 +--- a/net/ipv6/ip6_offload.c
5327 ++++ b/net/ipv6/ip6_offload.c
5328 +@@ -292,8 +292,6 @@ static struct packet_offload ipv6_packet_offload __read_mostly = {
5329 + static const struct net_offload sit_offload = {
5330 + .callbacks = {
5331 + .gso_segment = ipv6_gso_segment,
5332 +- .gro_receive = ipv6_gro_receive,
5333 +- .gro_complete = ipv6_gro_complete,
5334 + },
5335 + };
5336 +
5337 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
5338 +index 3adffb300238..e541d68dba8b 100644
5339 +--- a/net/ipv6/tcp_ipv6.c
5340 ++++ b/net/ipv6/tcp_ipv6.c
5341 +@@ -946,7 +946,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
5342 + &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
5343 + if (req) {
5344 + nsk = tcp_check_req(sk, skb, req, false);
5345 +- if (!nsk)
5346 ++ if (!nsk || nsk == sk)
5347 + reqsk_put(req);
5348 + return nsk;
5349 + }
5350 +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
5351 +index 667111ee6a20..5787f15a3a12 100644
5352 +--- a/net/mac80211/tx.c
5353 ++++ b/net/mac80211/tx.c
5354 +@@ -301,9 +301,6 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
5355 + if (tx->sdata->vif.type == NL80211_IFTYPE_WDS)
5356 + return TX_CONTINUE;
5357 +
5358 +- if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
5359 +- return TX_CONTINUE;
5360 +-
5361 + if (tx->flags & IEEE80211_TX_PS_BUFFERED)
5362 + return TX_CONTINUE;
5363 +
5364 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
5365 +index bf6e76643f78..4856d975492d 100644
5366 +--- a/net/netlink/af_netlink.c
5367 ++++ b/net/netlink/af_netlink.c
5368 +@@ -355,25 +355,52 @@ err1:
5369 + return NULL;
5370 + }
5371 +
5372 ++
5373 ++static void
5374 ++__netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec,
5375 ++ unsigned int order)
5376 ++{
5377 ++ struct netlink_sock *nlk = nlk_sk(sk);
5378 ++ struct sk_buff_head *queue;
5379 ++ struct netlink_ring *ring;
5380 ++
5381 ++ queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
5382 ++ ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
5383 ++
5384 ++ spin_lock_bh(&queue->lock);
5385 ++
5386 ++ ring->frame_max = req->nm_frame_nr - 1;
5387 ++ ring->head = 0;
5388 ++ ring->frame_size = req->nm_frame_size;
5389 ++ ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
5390 ++
5391 ++ swap(ring->pg_vec_len, req->nm_block_nr);
5392 ++ swap(ring->pg_vec_order, order);
5393 ++ swap(ring->pg_vec, pg_vec);
5394 ++
5395 ++ __skb_queue_purge(queue);
5396 ++ spin_unlock_bh(&queue->lock);
5397 ++
5398 ++ WARN_ON(atomic_read(&nlk->mapped));
5399 ++
5400 ++ if (pg_vec)
5401 ++ free_pg_vec(pg_vec, order, req->nm_block_nr);
5402 ++}
5403 ++
5404 + static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
5405 +- bool closing, bool tx_ring)
5406 ++ bool tx_ring)
5407 + {
5408 + struct netlink_sock *nlk = nlk_sk(sk);
5409 + struct netlink_ring *ring;
5410 +- struct sk_buff_head *queue;
5411 + void **pg_vec = NULL;
5412 + unsigned int order = 0;
5413 +- int err;
5414 +
5415 + ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
5416 +- queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
5417 +
5418 +- if (!closing) {
5419 +- if (atomic_read(&nlk->mapped))
5420 +- return -EBUSY;
5421 +- if (atomic_read(&ring->pending))
5422 +- return -EBUSY;
5423 +- }
5424 ++ if (atomic_read(&nlk->mapped))
5425 ++ return -EBUSY;
5426 ++ if (atomic_read(&ring->pending))
5427 ++ return -EBUSY;
5428 +
5429 + if (req->nm_block_nr) {
5430 + if (ring->pg_vec != NULL)
5431 +@@ -405,31 +432,19 @@ static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
5432 + return -EINVAL;
5433 + }
5434 +
5435 +- err = -EBUSY;
5436 + mutex_lock(&nlk->pg_vec_lock);
5437 +- if (closing || atomic_read(&nlk->mapped) == 0) {
5438 +- err = 0;
5439 +- spin_lock_bh(&queue->lock);
5440 +-
5441 +- ring->frame_max = req->nm_frame_nr - 1;
5442 +- ring->head = 0;
5443 +- ring->frame_size = req->nm_frame_size;
5444 +- ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
5445 +-
5446 +- swap(ring->pg_vec_len, req->nm_block_nr);
5447 +- swap(ring->pg_vec_order, order);
5448 +- swap(ring->pg_vec, pg_vec);
5449 +-
5450 +- __skb_queue_purge(queue);
5451 +- spin_unlock_bh(&queue->lock);
5452 +-
5453 +- WARN_ON(atomic_read(&nlk->mapped));
5454 ++ if (atomic_read(&nlk->mapped) == 0) {
5455 ++ __netlink_set_ring(sk, req, tx_ring, pg_vec, order);
5456 ++ mutex_unlock(&nlk->pg_vec_lock);
5457 ++ return 0;
5458 + }
5459 ++
5460 + mutex_unlock(&nlk->pg_vec_lock);
5461 +
5462 + if (pg_vec)
5463 + free_pg_vec(pg_vec, order, req->nm_block_nr);
5464 +- return err;
5465 ++
5466 ++ return -EBUSY;
5467 + }
5468 +
5469 + static void netlink_mm_open(struct vm_area_struct *vma)
5470 +@@ -898,10 +913,10 @@ static void netlink_sock_destruct(struct sock *sk)
5471 +
5472 + memset(&req, 0, sizeof(req));
5473 + if (nlk->rx_ring.pg_vec)
5474 +- netlink_set_ring(sk, &req, true, false);
5475 ++ __netlink_set_ring(sk, &req, false, NULL, 0);
5476 + memset(&req, 0, sizeof(req));
5477 + if (nlk->tx_ring.pg_vec)
5478 +- netlink_set_ring(sk, &req, true, true);
5479 ++ __netlink_set_ring(sk, &req, true, NULL, 0);
5480 + }
5481 + #endif /* CONFIG_NETLINK_MMAP */
5482 +
5483 +@@ -1079,6 +1094,11 @@ static int netlink_insert(struct sock *sk, u32 portid)
5484 +
5485 + err = __netlink_insert(table, sk);
5486 + if (err) {
5487 ++ /* In case the hashtable backend returns with -EBUSY
5488 ++ * from here, it must not escape to the caller.
5489 ++ */
5490 ++ if (unlikely(err == -EBUSY))
5491 ++ err = -EOVERFLOW;
5492 + if (err == -EEXIST)
5493 + err = -EADDRINUSE;
5494 + nlk_sk(sk)->portid = 0;
5495 +@@ -2197,7 +2217,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
5496 + return -EINVAL;
5497 + if (copy_from_user(&req, optval, sizeof(req)))
5498 + return -EFAULT;
5499 +- err = netlink_set_ring(sk, &req, false,
5500 ++ err = netlink_set_ring(sk, &req,
5501 + optname == NETLINK_TX_RING);
5502 + break;
5503 + }
5504 +diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
5505 +index ed54ec533836..b33fed6d1584 100644
5506 +--- a/net/nfc/nci/hci.c
5507 ++++ b/net/nfc/nci/hci.c
5508 +@@ -233,7 +233,7 @@ int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate, u8 cmd,
5509 + r = nci_request(ndev, nci_hci_send_data_req, (unsigned long)&data,
5510 + msecs_to_jiffies(NCI_DATA_TIMEOUT));
5511 +
5512 +- if (r == NCI_STATUS_OK)
5513 ++ if (r == NCI_STATUS_OK && skb)
5514 + *skb = conn_info->rx_skb;
5515 +
5516 + return r;
5517 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
5518 +index fe1610ddeacf..e1ea5d43b01e 100644
5519 +--- a/net/packet/af_packet.c
5520 ++++ b/net/packet/af_packet.c
5521 +@@ -2307,7 +2307,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
5522 + }
5523 + tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
5524 + addr, hlen);
5525 +- if (tp_len > dev->mtu + dev->hard_header_len) {
5526 ++ if (likely(tp_len >= 0) &&
5527 ++ tp_len > dev->mtu + dev->hard_header_len) {
5528 + struct ethhdr *ehdr;
5529 + /* Earlier code assumed this would be a VLAN pkt,
5530 + * double-check this now that we have the actual
5531 +@@ -2688,7 +2689,7 @@ static int packet_release(struct socket *sock)
5532 + static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
5533 + {
5534 + struct packet_sock *po = pkt_sk(sk);
5535 +- const struct net_device *dev_curr;
5536 ++ struct net_device *dev_curr;
5537 + __be16 proto_curr;
5538 + bool need_rehook;
5539 +
5540 +@@ -2712,15 +2713,13 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
5541 +
5542 + po->num = proto;
5543 + po->prot_hook.type = proto;
5544 +-
5545 +- if (po->prot_hook.dev)
5546 +- dev_put(po->prot_hook.dev);
5547 +-
5548 + po->prot_hook.dev = dev;
5549 +
5550 + po->ifindex = dev ? dev->ifindex : 0;
5551 + packet_cached_dev_assign(po, dev);
5552 + }
5553 ++ if (dev_curr)
5554 ++ dev_put(dev_curr);
5555 +
5556 + if (proto == 0 || !need_rehook)
5557 + goto out_unlock;
5558 +diff --git a/net/rds/info.c b/net/rds/info.c
5559 +index 9a6b4f66187c..140a44a5f7b7 100644
5560 +--- a/net/rds/info.c
5561 ++++ b/net/rds/info.c
5562 +@@ -176,7 +176,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval,
5563 +
5564 + /* check for all kinds of wrapping and the like */
5565 + start = (unsigned long)optval;
5566 +- if (len < 0 || len + PAGE_SIZE - 1 < len || start + len < start) {
5567 ++ if (len < 0 || len > INT_MAX - PAGE_SIZE + 1 || start + len < start) {
5568 + ret = -EINVAL;
5569 + goto out;
5570 + }
5571 +diff --git a/net/sched/act_api.c b/net/sched/act_api.c
5572 +index 3d43e4979f27..f8d9c2a2c451 100644
5573 +--- a/net/sched/act_api.c
5574 ++++ b/net/sched/act_api.c
5575 +@@ -45,7 +45,7 @@ void tcf_hash_destroy(struct tc_action *a)
5576 + }
5577 + EXPORT_SYMBOL(tcf_hash_destroy);
5578 +
5579 +-int tcf_hash_release(struct tc_action *a, int bind)
5580 ++int __tcf_hash_release(struct tc_action *a, bool bind, bool strict)
5581 + {
5582 + struct tcf_common *p = a->priv;
5583 + int ret = 0;
5584 +@@ -53,7 +53,7 @@ int tcf_hash_release(struct tc_action *a, int bind)
5585 + if (p) {
5586 + if (bind)
5587 + p->tcfc_bindcnt--;
5588 +- else if (p->tcfc_bindcnt > 0)
5589 ++ else if (strict && p->tcfc_bindcnt > 0)
5590 + return -EPERM;
5591 +
5592 + p->tcfc_refcnt--;
5593 +@@ -64,9 +64,10 @@ int tcf_hash_release(struct tc_action *a, int bind)
5594 + ret = 1;
5595 + }
5596 + }
5597 ++
5598 + return ret;
5599 + }
5600 +-EXPORT_SYMBOL(tcf_hash_release);
5601 ++EXPORT_SYMBOL(__tcf_hash_release);
5602 +
5603 + static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
5604 + struct tc_action *a)
5605 +@@ -136,7 +137,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a)
5606 + head = &hinfo->htab[tcf_hash(i, hinfo->hmask)];
5607 + hlist_for_each_entry_safe(p, n, head, tcfc_head) {
5608 + a->priv = p;
5609 +- ret = tcf_hash_release(a, 0);
5610 ++ ret = __tcf_hash_release(a, false, true);
5611 + if (ret == ACT_P_DELETED) {
5612 + module_put(a->ops->owner);
5613 + n_i++;
5614 +@@ -413,7 +414,7 @@ int tcf_action_destroy(struct list_head *actions, int bind)
5615 + int ret = 0;
5616 +
5617 + list_for_each_entry_safe(a, tmp, actions, list) {
5618 +- ret = tcf_hash_release(a, bind);
5619 ++ ret = __tcf_hash_release(a, bind, true);
5620 + if (ret == ACT_P_DELETED)
5621 + module_put(a->ops->owner);
5622 + else if (ret < 0)
5623 +diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
5624 +index dc6a2d324bd8..521ffca91228 100644
5625 +--- a/net/sched/act_bpf.c
5626 ++++ b/net/sched/act_bpf.c
5627 +@@ -27,9 +27,10 @@
5628 + struct tcf_bpf_cfg {
5629 + struct bpf_prog *filter;
5630 + struct sock_filter *bpf_ops;
5631 +- char *bpf_name;
5632 ++ const char *bpf_name;
5633 + u32 bpf_fd;
5634 + u16 bpf_num_ops;
5635 ++ bool is_ebpf;
5636 + };
5637 +
5638 + static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
5639 +@@ -200,6 +201,7 @@ static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
5640 + cfg->bpf_ops = bpf_ops;
5641 + cfg->bpf_num_ops = bpf_num_ops;
5642 + cfg->filter = fp;
5643 ++ cfg->is_ebpf = false;
5644 +
5645 + return 0;
5646 + }
5647 +@@ -234,18 +236,40 @@ static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
5648 + cfg->bpf_fd = bpf_fd;
5649 + cfg->bpf_name = name;
5650 + cfg->filter = fp;
5651 ++ cfg->is_ebpf = true;
5652 +
5653 + return 0;
5654 + }
5655 +
5656 ++static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg)
5657 ++{
5658 ++ if (cfg->is_ebpf)
5659 ++ bpf_prog_put(cfg->filter);
5660 ++ else
5661 ++ bpf_prog_destroy(cfg->filter);
5662 ++
5663 ++ kfree(cfg->bpf_ops);
5664 ++ kfree(cfg->bpf_name);
5665 ++}
5666 ++
5667 ++static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
5668 ++ struct tcf_bpf_cfg *cfg)
5669 ++{
5670 ++ cfg->is_ebpf = tcf_bpf_is_ebpf(prog);
5671 ++ cfg->filter = prog->filter;
5672 ++
5673 ++ cfg->bpf_ops = prog->bpf_ops;
5674 ++ cfg->bpf_name = prog->bpf_name;
5675 ++}
5676 ++
5677 + static int tcf_bpf_init(struct net *net, struct nlattr *nla,
5678 + struct nlattr *est, struct tc_action *act,
5679 + int replace, int bind)
5680 + {
5681 + struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
5682 ++ struct tcf_bpf_cfg cfg, old;
5683 + struct tc_act_bpf *parm;
5684 + struct tcf_bpf *prog;
5685 +- struct tcf_bpf_cfg cfg;
5686 + bool is_bpf, is_ebpf;
5687 + int ret;
5688 +
5689 +@@ -294,6 +318,9 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
5690 + prog = to_bpf(act);
5691 + spin_lock_bh(&prog->tcf_lock);
5692 +
5693 ++ if (ret != ACT_P_CREATED)
5694 ++ tcf_bpf_prog_fill_cfg(prog, &old);
5695 ++
5696 + prog->bpf_ops = cfg.bpf_ops;
5697 + prog->bpf_name = cfg.bpf_name;
5698 +
5699 +@@ -309,29 +336,22 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
5700 +
5701 + if (ret == ACT_P_CREATED)
5702 + tcf_hash_insert(act);
5703 ++ else
5704 ++ tcf_bpf_cfg_cleanup(&old);
5705 +
5706 + return ret;
5707 +
5708 + destroy_fp:
5709 +- if (is_ebpf)
5710 +- bpf_prog_put(cfg.filter);
5711 +- else
5712 +- bpf_prog_destroy(cfg.filter);
5713 +-
5714 +- kfree(cfg.bpf_ops);
5715 +- kfree(cfg.bpf_name);
5716 +-
5717 ++ tcf_bpf_cfg_cleanup(&cfg);
5718 + return ret;
5719 + }
5720 +
5721 + static void tcf_bpf_cleanup(struct tc_action *act, int bind)
5722 + {
5723 +- const struct tcf_bpf *prog = act->priv;
5724 ++ struct tcf_bpf_cfg tmp;
5725 +
5726 +- if (tcf_bpf_is_ebpf(prog))
5727 +- bpf_prog_put(prog->filter);
5728 +- else
5729 +- bpf_prog_destroy(prog->filter);
5730 ++ tcf_bpf_prog_fill_cfg(act->priv, &tmp);
5731 ++ tcf_bpf_cfg_cleanup(&tmp);
5732 + }
5733 +
5734 + static struct tc_action_ops act_bpf_ops __read_mostly = {
5735 +diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
5736 +index 91bd9c19471d..c0b86f2bfe22 100644
5737 +--- a/net/sched/cls_bpf.c
5738 ++++ b/net/sched/cls_bpf.c
5739 +@@ -364,7 +364,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
5740 + goto errout;
5741 +
5742 + if (oldprog) {
5743 +- list_replace_rcu(&prog->link, &oldprog->link);
5744 ++ list_replace_rcu(&oldprog->link, &prog->link);
5745 + tcf_unbind_filter(tp, &oldprog->res);
5746 + call_rcu(&oldprog->rcu, __cls_bpf_delete_prog);
5747 + } else {
5748 +diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
5749 +index a620c4e288a5..75df923f5c03 100644
5750 +--- a/net/sched/cls_flow.c
5751 ++++ b/net/sched/cls_flow.c
5752 +@@ -419,6 +419,8 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
5753 + if (!fnew)
5754 + goto err2;
5755 +
5756 ++ tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
5757 ++
5758 + fold = (struct flow_filter *)*arg;
5759 + if (fold) {
5760 + err = -EINVAL;
5761 +@@ -480,7 +482,6 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
5762 + fnew->mask = ~0U;
5763 + fnew->tp = tp;
5764 + get_random_bytes(&fnew->hashrnd, 4);
5765 +- tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
5766 + }
5767 +
5768 + fnew->perturb_timer.function = flow_perturbation;
5769 +@@ -520,7 +521,7 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
5770 + if (*arg == 0)
5771 + list_add_tail_rcu(&fnew->list, &head->filters);
5772 + else
5773 +- list_replace_rcu(&fnew->list, &fold->list);
5774 ++ list_replace_rcu(&fold->list, &fnew->list);
5775 +
5776 + *arg = (unsigned long)fnew;
5777 +
5778 +diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
5779 +index c244c45b78d7..9291598b5aad 100644
5780 +--- a/net/sched/sch_fq_codel.c
5781 ++++ b/net/sched/sch_fq_codel.c
5782 +@@ -162,10 +162,10 @@ static unsigned int fq_codel_drop(struct Qdisc *sch)
5783 + skb = dequeue_head(flow);
5784 + len = qdisc_pkt_len(skb);
5785 + q->backlogs[idx] -= len;
5786 +- kfree_skb(skb);
5787 + sch->q.qlen--;
5788 + qdisc_qstats_drop(sch);
5789 + qdisc_qstats_backlog_dec(sch, skb);
5790 ++ kfree_skb(skb);
5791 + flow->dropped++;
5792 + return idx;
5793 + }
5794 +diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
5795 +index 1d4fe24af06a..d109d308ec3a 100644
5796 +--- a/net/sunrpc/xprt.c
5797 ++++ b/net/sunrpc/xprt.c
5798 +@@ -611,6 +611,7 @@ static void xprt_autoclose(struct work_struct *work)
5799 + xprt->ops->close(xprt);
5800 + clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
5801 + xprt_release_write(xprt, NULL);
5802 ++ wake_up_bit(&xprt->state, XPRT_LOCKED);
5803 + }
5804 +
5805 + /**
5806 +@@ -720,6 +721,7 @@ void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
5807 + xprt->ops->release_xprt(xprt, NULL);
5808 + out:
5809 + spin_unlock_bh(&xprt->transport_lock);
5810 ++ wake_up_bit(&xprt->state, XPRT_LOCKED);
5811 + }
5812 +
5813 + /**
5814 +@@ -1389,6 +1391,10 @@ out:
5815 + static void xprt_destroy(struct rpc_xprt *xprt)
5816 + {
5817 + dprintk("RPC: destroying transport %p\n", xprt);
5818 ++
5819 ++ /* Exclude transport connect/disconnect handlers */
5820 ++ wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
5821 ++
5822 + del_timer_sync(&xprt->timer);
5823 +
5824 + rpc_xprt_debugfs_unregister(xprt);
5825 +diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
5826 +index 66891e32c5e3..5e3ad598d3f5 100644
5827 +--- a/net/sunrpc/xprtsock.c
5828 ++++ b/net/sunrpc/xprtsock.c
5829 +@@ -834,6 +834,7 @@ static void xs_reset_transport(struct sock_xprt *transport)
5830 + sk->sk_user_data = NULL;
5831 +
5832 + xs_restore_old_callbacks(transport, sk);
5833 ++ xprt_clear_connected(xprt);
5834 + write_unlock_bh(&sk->sk_callback_lock);
5835 + xs_sock_reset_connection_flags(xprt);
5836 +
5837 +@@ -1433,6 +1434,7 @@ out:
5838 + static void xs_tcp_state_change(struct sock *sk)
5839 + {
5840 + struct rpc_xprt *xprt;
5841 ++ struct sock_xprt *transport;
5842 +
5843 + read_lock_bh(&sk->sk_callback_lock);
5844 + if (!(xprt = xprt_from_sock(sk)))
5845 +@@ -1444,13 +1446,12 @@ static void xs_tcp_state_change(struct sock *sk)
5846 + sock_flag(sk, SOCK_ZAPPED),
5847 + sk->sk_shutdown);
5848 +
5849 ++ transport = container_of(xprt, struct sock_xprt, xprt);
5850 + trace_rpc_socket_state_change(xprt, sk->sk_socket);
5851 + switch (sk->sk_state) {
5852 + case TCP_ESTABLISHED:
5853 + spin_lock(&xprt->transport_lock);
5854 + if (!xprt_test_and_set_connected(xprt)) {
5855 +- struct sock_xprt *transport = container_of(xprt,
5856 +- struct sock_xprt, xprt);
5857 +
5858 + /* Reset TCP record info */
5859 + transport->tcp_offset = 0;
5860 +@@ -1459,6 +1460,8 @@ static void xs_tcp_state_change(struct sock *sk)
5861 + transport->tcp_flags =
5862 + TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID;
5863 + xprt->connect_cookie++;
5864 ++ clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
5865 ++ xprt_clear_connecting(xprt);
5866 +
5867 + xprt_wake_pending_tasks(xprt, -EAGAIN);
5868 + }
5869 +@@ -1494,6 +1497,9 @@ static void xs_tcp_state_change(struct sock *sk)
5870 + smp_mb__after_atomic();
5871 + break;
5872 + case TCP_CLOSE:
5873 ++ if (test_and_clear_bit(XPRT_SOCK_CONNECTING,
5874 ++ &transport->sock_state))
5875 ++ xprt_clear_connecting(xprt);
5876 + xs_sock_mark_closed(xprt);
5877 + }
5878 + out:
5879 +@@ -2110,6 +2116,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
5880 + /* Tell the socket layer to start connecting... */
5881 + xprt->stat.connect_count++;
5882 + xprt->stat.connect_start = jiffies;
5883 ++ set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
5884 + ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
5885 + switch (ret) {
5886 + case 0:
5887 +@@ -2174,7 +2181,6 @@ static void xs_tcp_setup_socket(struct work_struct *work)
5888 + case -EINPROGRESS:
5889 + case -EALREADY:
5890 + xprt_unlock_connect(xprt, transport);
5891 +- xprt_clear_connecting(xprt);
5892 + return;
5893 + case -EINVAL:
5894 + /* Happens, for instance, if the user specified a link
5895 +@@ -2216,13 +2222,14 @@ static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
5896 +
5897 + WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport));
5898 +
5899 +- /* Start by resetting any existing state */
5900 +- xs_reset_transport(transport);
5901 +-
5902 +- if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) {
5903 ++ if (transport->sock != NULL) {
5904 + dprintk("RPC: xs_connect delayed xprt %p for %lu "
5905 + "seconds\n",
5906 + xprt, xprt->reestablish_timeout / HZ);
5907 ++
5908 ++ /* Start by resetting any existing state */
5909 ++ xs_reset_transport(transport);
5910 ++
5911 + queue_delayed_work(rpciod_workqueue,
5912 + &transport->connect_worker,
5913 + xprt->reestablish_timeout);
5914 +diff --git a/net/tipc/socket.c b/net/tipc/socket.c
5915 +index f485600c4507..20cc6df07157 100644
5916 +--- a/net/tipc/socket.c
5917 ++++ b/net/tipc/socket.c
5918 +@@ -2009,6 +2009,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
5919 + res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1);
5920 + if (res)
5921 + goto exit;
5922 ++ security_sk_clone(sock->sk, new_sock->sk);
5923 +
5924 + new_sk = new_sock->sk;
5925 + new_tsock = tipc_sk(new_sk);
5926 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5927 +index 91f6928560e1..6fe862594e9b 100644
5928 +--- a/sound/pci/hda/patch_realtek.c
5929 ++++ b/sound/pci/hda/patch_realtek.c
5930 +@@ -1134,7 +1134,7 @@ static const struct hda_fixup alc880_fixups[] = {
5931 + /* override all pins as BIOS on old Amilo is broken */
5932 + .type = HDA_FIXUP_PINS,
5933 + .v.pins = (const struct hda_pintbl[]) {
5934 +- { 0x14, 0x0121411f }, /* HP */
5935 ++ { 0x14, 0x0121401f }, /* HP */
5936 + { 0x15, 0x99030120 }, /* speaker */
5937 + { 0x16, 0x99030130 }, /* bass speaker */
5938 + { 0x17, 0x411111f0 }, /* N/A */
5939 +@@ -1154,7 +1154,7 @@ static const struct hda_fixup alc880_fixups[] = {
5940 + /* almost compatible with FUJITSU, but no bass and SPDIF */
5941 + .type = HDA_FIXUP_PINS,
5942 + .v.pins = (const struct hda_pintbl[]) {
5943 +- { 0x14, 0x0121411f }, /* HP */
5944 ++ { 0x14, 0x0121401f }, /* HP */
5945 + { 0x15, 0x99030120 }, /* speaker */
5946 + { 0x16, 0x411111f0 }, /* N/A */
5947 + { 0x17, 0x411111f0 }, /* N/A */
5948 +@@ -1363,7 +1363,7 @@ static const struct snd_pci_quirk alc880_fixup_tbl[] = {
5949 + SND_PCI_QUIRK(0x161f, 0x203d, "W810", ALC880_FIXUP_W810),
5950 + SND_PCI_QUIRK(0x161f, 0x205d, "Medion Rim 2150", ALC880_FIXUP_MEDION_RIM),
5951 + SND_PCI_QUIRK(0x1631, 0xe011, "PB 13201056", ALC880_FIXUP_6ST_AUTOMUTE),
5952 +- SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_FIXUP_F1734),
5953 ++ SND_PCI_QUIRK(0x1734, 0x107c, "FSC Amilo M1437", ALC880_FIXUP_FUJITSU),
5954 + SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FIXUP_FUJITSU),
5955 + SND_PCI_QUIRK(0x1734, 0x10ac, "FSC AMILO Xi 1526", ALC880_FIXUP_F1734),
5956 + SND_PCI_QUIRK(0x1734, 0x10b0, "FSC Amilo Pi1556", ALC880_FIXUP_FUJITSU),
5957 +@@ -5118,8 +5118,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5958 + SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
5959 + SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5960 + SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5961 +- SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
5962 + SND_PCI_QUIRK(0x1028, 0x06db, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
5963 ++ SND_PCI_QUIRK(0x1028, 0x06dd, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
5964 ++ SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
5965 ++ SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
5966 ++ SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
5967 + SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5968 + SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5969 + SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
5970 +@@ -6454,6 +6457,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
5971 + SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
5972 + SND_PCI_QUIRK(0x1028, 0x05fe, "Dell XPS 15", ALC668_FIXUP_DELL_XPS13),
5973 + SND_PCI_QUIRK(0x1028, 0x060a, "Dell XPS 13", ALC668_FIXUP_DELL_XPS13),
5974 ++ SND_PCI_QUIRK(0x1028, 0x060d, "Dell M3800", ALC668_FIXUP_DELL_XPS13),
5975 + SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
5976 + SND_PCI_QUIRK(0x1028, 0x0626, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
5977 + SND_PCI_QUIRK(0x1028, 0x0696, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
5978 +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
5979 +index 8b7e391dd0b8..cd8ed2e393a2 100644
5980 +--- a/sound/usb/mixer.c
5981 ++++ b/sound/usb/mixer.c
5982 +@@ -2522,7 +2522,7 @@ static int restore_mixer_value(struct usb_mixer_elem_list *list)
5983 + for (c = 0; c < MAX_CHANNELS; c++) {
5984 + if (!(cval->cmask & (1 << c)))
5985 + continue;
5986 +- if (cval->cached & (1 << c)) {
5987 ++ if (cval->cached & (1 << (c + 1))) {
5988 + err = snd_usb_set_cur_mix_value(cval, c + 1, idx,
5989 + cval->cache_val[idx]);
5990 + if (err < 0)