Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Thu, 30 Nov 2017 12:19:28
Message-Id: 1512043986.c76265ce51a4f08cd3ac3598b0896bb2adf153d5.alicef@gentoo
1 commit: c76265ce51a4f08cd3ac3598b0896bb2adf153d5
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Thu Nov 30 12:13:06 2017 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Thu Nov 30 12:13:06 2017 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c76265ce
7
8 linux kernel 4.9.66
9
10 0000_README | 4 +
11 1065_linux-4.9.66.patch | 4726 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 4730 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 98ea34c..045a3d8 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -303,6 +303,10 @@ Patch: 1064_linux-4.9.65.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.9.65
21
22 +Patch: 1065_linux-4.9.66.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.9.66
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1065_linux-4.9.66.patch b/1065_linux-4.9.66.patch
31 new file mode 100644
32 index 0000000..f277f12
33 --- /dev/null
34 +++ b/1065_linux-4.9.66.patch
35 @@ -0,0 +1,4726 @@
36 +diff --git a/Makefile b/Makefile
37 +index 87a641515e9c..8e62f9e2a08c 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 9
43 +-SUBLEVEL = 65
44 ++SUBLEVEL = 66
45 + EXTRAVERSION =
46 + NAME = Roaring Lionus
47 +
48 +diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
49 +index 9fe8e241335c..e1f6f0daa847 100644
50 +--- a/arch/arm/mm/dump.c
51 ++++ b/arch/arm/mm/dump.c
52 +@@ -126,8 +126,8 @@ static const struct prot_bits section_bits[] = {
53 + .val = PMD_SECT_USER,
54 + .set = "USR",
55 + }, {
56 +- .mask = L_PMD_SECT_RDONLY,
57 +- .val = L_PMD_SECT_RDONLY,
58 ++ .mask = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
59 ++ .val = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
60 + .set = "ro",
61 + .clear = "RW",
62 + #elif __LINUX_ARM_ARCH__ >= 6
63 +diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
64 +index 370581aeb871..4c587ad8bfe3 100644
65 +--- a/arch/arm/mm/init.c
66 ++++ b/arch/arm/mm/init.c
67 +@@ -619,8 +619,8 @@ static struct section_perm ro_perms[] = {
68 + .start = (unsigned long)_stext,
69 + .end = (unsigned long)__init_begin,
70 + #ifdef CONFIG_ARM_LPAE
71 +- .mask = ~L_PMD_SECT_RDONLY,
72 +- .prot = L_PMD_SECT_RDONLY,
73 ++ .mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
74 ++ .prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
75 + #else
76 + .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
77 + .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
78 +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
79 +index 61e214015b38..7acd3c5c7643 100644
80 +--- a/arch/arm64/include/asm/pgtable.h
81 ++++ b/arch/arm64/include/asm/pgtable.h
82 +@@ -91,6 +91,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
83 + ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
84 + #define pte_valid_young(pte) \
85 + ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
86 ++#define pte_valid_user(pte) \
87 ++ ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
88 +
89 + /*
90 + * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
91 +@@ -100,6 +102,18 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
92 + #define pte_accessible(mm, pte) \
93 + (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
94 +
95 ++/*
96 ++ * p??_access_permitted() is true for valid user mappings (subject to the
97 ++ * write permission check) other than user execute-only which do not have the
98 ++ * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set.
99 ++ */
100 ++#define pte_access_permitted(pte, write) \
101 ++ (pte_valid_user(pte) && (!(write) || pte_write(pte)))
102 ++#define pmd_access_permitted(pmd, write) \
103 ++ (pte_access_permitted(pmd_pte(pmd), (write)))
104 ++#define pud_access_permitted(pud, write) \
105 ++ (pte_access_permitted(pud_pte(pud), (write)))
106 ++
107 + static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
108 + {
109 + pte_val(pte) &= ~pgprot_val(prot);
110 +diff --git a/arch/mips/bcm47xx/leds.c b/arch/mips/bcm47xx/leds.c
111 +index d20ae63eb3c2..46abe9e4e0e0 100644
112 +--- a/arch/mips/bcm47xx/leds.c
113 ++++ b/arch/mips/bcm47xx/leds.c
114 +@@ -330,7 +330,7 @@ bcm47xx_leds_linksys_wrt54g3gv2[] __initconst = {
115 + /* Verified on: WRT54GS V1.0 */
116 + static const struct gpio_led
117 + bcm47xx_leds_linksys_wrt54g_type_0101[] __initconst = {
118 +- BCM47XX_GPIO_LED(0, "green", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF),
119 ++ BCM47XX_GPIO_LED(0, "green", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
120 + BCM47XX_GPIO_LED(1, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON),
121 + BCM47XX_GPIO_LED(7, "green", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF),
122 + };
123 +diff --git a/arch/mips/boot/dts/brcm/Makefile b/arch/mips/boot/dts/brcm/Makefile
124 +index d61bc2aebf69..7d90a8710425 100644
125 +--- a/arch/mips/boot/dts/brcm/Makefile
126 ++++ b/arch/mips/boot/dts/brcm/Makefile
127 +@@ -22,7 +22,6 @@ dtb-$(CONFIG_DT_NONE) += \
128 + bcm63268-comtrend-vr-3032u.dtb \
129 + bcm93384wvg.dtb \
130 + bcm93384wvg_viper.dtb \
131 +- bcm96358nb4ser.dtb \
132 + bcm96368mvwg.dtb \
133 + bcm9ejtagprb.dtb \
134 + bcm97125cbmb.dtb \
135 +diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
136 +index 83054f79f72a..8333ce90b172 100644
137 +--- a/arch/mips/include/asm/asmmacro.h
138 ++++ b/arch/mips/include/asm/asmmacro.h
139 +@@ -19,6 +19,9 @@
140 + #include <asm/asmmacro-64.h>
141 + #endif
142 +
143 ++/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
144 ++#undef fp
145 ++
146 + /*
147 + * Helper macros for generating raw instruction encodings.
148 + */
149 +@@ -105,6 +108,7 @@
150 + .macro fpu_save_16odd thread
151 + .set push
152 + .set mips64r2
153 ++ .set fp=64
154 + SET_HARDFLOAT
155 + sdc1 $f1, THREAD_FPR1(\thread)
156 + sdc1 $f3, THREAD_FPR3(\thread)
157 +@@ -163,6 +167,7 @@
158 + .macro fpu_restore_16odd thread
159 + .set push
160 + .set mips64r2
161 ++ .set fp=64
162 + SET_HARDFLOAT
163 + ldc1 $f1, THREAD_FPR1(\thread)
164 + ldc1 $f3, THREAD_FPR3(\thread)
165 +@@ -234,9 +239,6 @@
166 + .endm
167 +
168 + #ifdef TOOLCHAIN_SUPPORTS_MSA
169 +-/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
170 +-#undef fp
171 +-
172 + .macro _cfcmsa rd, cs
173 + .set push
174 + .set mips32r2
175 +diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
176 +index 3de026034c35..11890e6e4093 100644
177 +--- a/arch/mips/kernel/ptrace.c
178 ++++ b/arch/mips/kernel/ptrace.c
179 +@@ -647,6 +647,19 @@ static const struct user_regset_view user_mips64_view = {
180 + .n = ARRAY_SIZE(mips64_regsets),
181 + };
182 +
183 ++#ifdef CONFIG_MIPS32_N32
184 ++
185 ++static const struct user_regset_view user_mipsn32_view = {
186 ++ .name = "mipsn32",
187 ++ .e_flags = EF_MIPS_ABI2,
188 ++ .e_machine = ELF_ARCH,
189 ++ .ei_osabi = ELF_OSABI,
190 ++ .regsets = mips64_regsets,
191 ++ .n = ARRAY_SIZE(mips64_regsets),
192 ++};
193 ++
194 ++#endif /* CONFIG_MIPS32_N32 */
195 ++
196 + #endif /* CONFIG_64BIT */
197 +
198 + const struct user_regset_view *task_user_regset_view(struct task_struct *task)
199 +@@ -657,6 +670,10 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
200 + #ifdef CONFIG_MIPS32_O32
201 + if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
202 + return &user_mips_view;
203 ++#endif
204 ++#ifdef CONFIG_MIPS32_N32
205 ++ if (test_tsk_thread_flag(task, TIF_32BIT_ADDR))
206 ++ return &user_mipsn32_view;
207 + #endif
208 + return &user_mips64_view;
209 + #endif
210 +diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c
211 +index 628c5132b3d8..a7962f79c4fe 100644
212 +--- a/arch/mips/pci/pci-mt7620.c
213 ++++ b/arch/mips/pci/pci-mt7620.c
214 +@@ -121,7 +121,7 @@ static int wait_pciephy_busy(void)
215 + else
216 + break;
217 + if (retry++ > WAITRETRY_MAX) {
218 +- printk(KERN_WARN "PCIE-PHY retry failed.\n");
219 ++ pr_warn("PCIE-PHY retry failed.\n");
220 + return -1;
221 + }
222 + }
223 +diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
224 +index 6f892c1f3ad7..0696142048d5 100644
225 +--- a/arch/mips/ralink/mt7620.c
226 ++++ b/arch/mips/ralink/mt7620.c
227 +@@ -141,8 +141,8 @@ static struct rt2880_pmx_func i2c_grp_mt7628[] = {
228 + FUNC("i2c", 0, 4, 2),
229 + };
230 +
231 +-static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 36, 1) };
232 +-static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 37, 1) };
233 ++static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("refclk", 0, 37, 1) };
234 ++static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 36, 1) };
235 + static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) };
236 + static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) };
237 +
238 +diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
239 +index 41e60a9c7db2..e775f80ae28c 100644
240 +--- a/arch/parisc/kernel/syscall.S
241 ++++ b/arch/parisc/kernel/syscall.S
242 +@@ -690,15 +690,15 @@ cas_action:
243 + /* ELF32 Process entry path */
244 + lws_compare_and_swap_2:
245 + #ifdef CONFIG_64BIT
246 +- /* Clip the input registers */
247 ++ /* Clip the input registers. We don't need to clip %r23 as we
248 ++ only use it for word operations */
249 + depdi 0, 31, 32, %r26
250 + depdi 0, 31, 32, %r25
251 + depdi 0, 31, 32, %r24
252 +- depdi 0, 31, 32, %r23
253 + #endif
254 +
255 + /* Check the validity of the size pointer */
256 +- subi,>>= 4, %r23, %r0
257 ++ subi,>>= 3, %r23, %r0
258 + b,n lws_exit_nosys
259 +
260 + /* Jump to the functions which will load the old and new values into
261 +diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
262 +index bbe77aed198d..3600c0d99ae9 100644
263 +--- a/arch/powerpc/kernel/signal.c
264 ++++ b/arch/powerpc/kernel/signal.c
265 +@@ -102,7 +102,7 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
266 + static void do_signal(struct task_struct *tsk)
267 + {
268 + sigset_t *oldset = sigmask_to_save();
269 +- struct ksignal ksig;
270 ++ struct ksignal ksig = { .sig = 0 };
271 + int ret;
272 + int is32 = is_32bit_task();
273 +
274 +diff --git a/arch/s390/include/asm/asm-prototypes.h b/arch/s390/include/asm/asm-prototypes.h
275 +new file mode 100644
276 +index 000000000000..2c3413b0ca52
277 +--- /dev/null
278 ++++ b/arch/s390/include/asm/asm-prototypes.h
279 +@@ -0,0 +1,8 @@
280 ++#ifndef _ASM_S390_PROTOTYPES_H
281 ++
282 ++#include <linux/kvm_host.h>
283 ++#include <linux/ftrace.h>
284 ++#include <asm/fpu/api.h>
285 ++#include <asm-generic/asm-prototypes.h>
286 ++
287 ++#endif /* _ASM_S390_PROTOTYPES_H */
288 +diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
289 +index 12d45f0cfdd9..dde6b52359c5 100644
290 +--- a/arch/s390/include/asm/switch_to.h
291 ++++ b/arch/s390/include/asm/switch_to.h
292 +@@ -34,8 +34,8 @@ static inline void restore_access_regs(unsigned int *acrs)
293 + save_access_regs(&prev->thread.acrs[0]); \
294 + save_ri_cb(prev->thread.ri_cb); \
295 + } \
296 ++ update_cr_regs(next); \
297 + if (next->mm) { \
298 +- update_cr_regs(next); \
299 + set_cpu_flag(CIF_FPU); \
300 + restore_access_regs(&next->thread.acrs[0]); \
301 + restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
302 +diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
303 +index c74c59236f44..aaf9dab3c193 100644
304 +--- a/arch/s390/kernel/dis.c
305 ++++ b/arch/s390/kernel/dis.c
306 +@@ -1548,6 +1548,7 @@ static struct s390_insn opcode_e7[] = {
307 + { "vfsq", 0xce, INSTR_VRR_VV000MM },
308 + { "vfs", 0xe2, INSTR_VRR_VVV00MM },
309 + { "vftci", 0x4a, INSTR_VRI_VVIMM },
310 ++ { "", 0, INSTR_INVALID }
311 + };
312 +
313 + static struct s390_insn opcode_eb[] = {
314 +@@ -1953,7 +1954,7 @@ void show_code(struct pt_regs *regs)
315 + {
316 + char *mode = user_mode(regs) ? "User" : "Krnl";
317 + unsigned char code[64];
318 +- char buffer[64], *ptr;
319 ++ char buffer[128], *ptr;
320 + mm_segment_t old_fs;
321 + unsigned long addr;
322 + int start, end, opsize, hops, i;
323 +@@ -2016,7 +2017,7 @@ void show_code(struct pt_regs *regs)
324 + start += opsize;
325 + pr_cont("%s", buffer);
326 + ptr = buffer;
327 +- ptr += sprintf(ptr, "\n ");
328 ++ ptr += sprintf(ptr, "\n\t ");
329 + hops++;
330 + }
331 + pr_cont("\n");
332 +diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
333 +index 0c196861bc38..29d87444a655 100644
334 +--- a/arch/s390/kernel/early.c
335 ++++ b/arch/s390/kernel/early.c
336 +@@ -345,8 +345,10 @@ static __init void detect_machine_facilities(void)
337 + S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
338 + if (test_facility(40))
339 + S390_lowcore.machine_flags |= MACHINE_FLAG_LPP;
340 +- if (test_facility(50) && test_facility(73))
341 ++ if (test_facility(50) && test_facility(73)) {
342 + S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
343 ++ __ctl_set_bit(0, 55);
344 ++ }
345 + if (test_facility(51))
346 + S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
347 + if (test_facility(129)) {
348 +diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
349 +index bba4fa74b321..172fe1121d99 100644
350 +--- a/arch/s390/kernel/process.c
351 ++++ b/arch/s390/kernel/process.c
352 +@@ -120,6 +120,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
353 + memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
354 + memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
355 + clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
356 ++ p->thread.per_flags = 0;
357 + /* Initialize per thread user and system timer values */
358 + ti = task_thread_info(p);
359 + ti->user_timer = 0;
360 +diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
361 +index fffa0e5462af..70cdb03d4acd 100644
362 +--- a/arch/s390/kernel/runtime_instr.c
363 ++++ b/arch/s390/kernel/runtime_instr.c
364 +@@ -47,11 +47,13 @@ void exit_thread_runtime_instr(void)
365 + {
366 + struct task_struct *task = current;
367 +
368 ++ preempt_disable();
369 + if (!task->thread.ri_cb)
370 + return;
371 + disable_runtime_instr();
372 + kfree(task->thread.ri_cb);
373 + task->thread.ri_cb = NULL;
374 ++ preempt_enable();
375 + }
376 +
377 + SYSCALL_DEFINE1(s390_runtime_instr, int, command)
378 +@@ -62,9 +64,7 @@ SYSCALL_DEFINE1(s390_runtime_instr, int, command)
379 + return -EOPNOTSUPP;
380 +
381 + if (command == S390_RUNTIME_INSTR_STOP) {
382 +- preempt_disable();
383 + exit_thread_runtime_instr();
384 +- preempt_enable();
385 + return 0;
386 + }
387 +
388 +diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
389 +index e7b0e7ff4c58..be9df513141e 100644
390 +--- a/arch/x86/entry/entry_64.S
391 ++++ b/arch/x86/entry/entry_64.S
392 +@@ -54,15 +54,19 @@ ENTRY(native_usergs_sysret64)
393 + ENDPROC(native_usergs_sysret64)
394 + #endif /* CONFIG_PARAVIRT */
395 +
396 +-.macro TRACE_IRQS_IRETQ
397 ++.macro TRACE_IRQS_FLAGS flags:req
398 + #ifdef CONFIG_TRACE_IRQFLAGS
399 +- bt $9, EFLAGS(%rsp) /* interrupts off? */
400 ++ bt $9, \flags /* interrupts off? */
401 + jnc 1f
402 + TRACE_IRQS_ON
403 + 1:
404 + #endif
405 + .endm
406 +
407 ++.macro TRACE_IRQS_IRETQ
408 ++ TRACE_IRQS_FLAGS EFLAGS(%rsp)
409 ++.endm
410 ++
411 + /*
412 + * When dynamic function tracer is enabled it will add a breakpoint
413 + * to all locations that it is about to modify, sync CPUs, update
414 +@@ -868,11 +872,13 @@ idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0
415 + ENTRY(native_load_gs_index)
416 + pushfq
417 + DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
418 ++ TRACE_IRQS_OFF
419 + SWAPGS
420 + .Lgs_change:
421 + movl %edi, %gs
422 + 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
423 + SWAPGS
424 ++ TRACE_IRQS_FLAGS (%rsp)
425 + popfq
426 + ret
427 + END(native_load_gs_index)
428 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
429 +index 8ca1eca5038d..4fbf0c94f2d1 100644
430 +--- a/arch/x86/kvm/svm.c
431 ++++ b/arch/x86/kvm/svm.c
432 +@@ -3583,6 +3583,13 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
433 + u32 ecx = msr->index;
434 + u64 data = msr->data;
435 + switch (ecx) {
436 ++ case MSR_IA32_CR_PAT:
437 ++ if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
438 ++ return 1;
439 ++ vcpu->arch.pat = data;
440 ++ svm->vmcb->save.g_pat = data;
441 ++ mark_dirty(svm->vmcb, VMCB_NPT);
442 ++ break;
443 + case MSR_IA32_TSC:
444 + kvm_write_tsc(vcpu, msr);
445 + break;
446 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
447 +index a8ae57acb6f6..0f0b27d96f27 100644
448 +--- a/arch/x86/kvm/vmx.c
449 ++++ b/arch/x86/kvm/vmx.c
450 +@@ -10715,6 +10715,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
451 + vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
452 + vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
453 + vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
454 ++ vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF);
455 ++ vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF);
456 +
457 + /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */
458 + if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
459 +diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
460 +index 767be7c76034..1754e094bc28 100644
461 +--- a/arch/x86/lib/x86-opcode-map.txt
462 ++++ b/arch/x86/lib/x86-opcode-map.txt
463 +@@ -896,7 +896,7 @@ EndTable
464 +
465 + GrpTable: Grp3_1
466 + 0: TEST Eb,Ib
467 +-1:
468 ++1: TEST Eb,Ib
469 + 2: NOT Eb
470 + 3: NEG Eb
471 + 4: MUL AL,Eb
472 +diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
473 +index 1dd796025472..8b5ff88aa4f8 100644
474 +--- a/arch/x86/mm/fault.c
475 ++++ b/arch/x86/mm/fault.c
476 +@@ -1393,7 +1393,17 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
477 + * make sure we exit gracefully rather than endlessly redo
478 + * the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if
479 + * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked.
480 ++ *
481 ++ * Note that handle_userfault() may also release and reacquire mmap_sem
482 ++ * (and not return with VM_FAULT_RETRY), when returning to userland to
483 ++ * repeat the page fault later with a VM_FAULT_NOPAGE retval
484 ++ * (potentially after handling any pending signal during the return to
485 ++ * userland). The return to userland is identified whenever
486 ++ * FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags.
487 ++ * Thus we have to be careful about not touching vma after handling the
488 ++ * fault, so we read the pkey beforehand.
489 + */
490 ++ pkey = vma_pkey(vma);
491 + fault = handle_mm_fault(vma, address, flags);
492 + major |= fault & VM_FAULT_MAJOR;
493 +
494 +@@ -1420,7 +1430,6 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
495 + return;
496 + }
497 +
498 +- pkey = vma_pkey(vma);
499 + up_read(&mm->mmap_sem);
500 + if (unlikely(fault & VM_FAULT_ERROR)) {
501 + mm_fault_error(regs, error_code, address, &pkey, fault);
502 +diff --git a/block/blk-core.c b/block/blk-core.c
503 +index 95379fc83805..b1c76aa73492 100644
504 +--- a/block/blk-core.c
505 ++++ b/block/blk-core.c
506 +@@ -282,6 +282,7 @@ EXPORT_SYMBOL(blk_stop_queue);
507 + void blk_sync_queue(struct request_queue *q)
508 + {
509 + del_timer_sync(&q->timeout);
510 ++ cancel_work_sync(&q->timeout_work);
511 +
512 + if (q->mq_ops) {
513 + struct blk_mq_hw_ctx *hctx;
514 +@@ -720,6 +721,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
515 + setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
516 + laptop_mode_timer_fn, (unsigned long) q);
517 + setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
518 ++ INIT_WORK(&q->timeout_work, NULL);
519 + INIT_LIST_HEAD(&q->queue_head);
520 + INIT_LIST_HEAD(&q->timeout_list);
521 + INIT_LIST_HEAD(&q->icq_list);
522 +diff --git a/block/blk-timeout.c b/block/blk-timeout.c
523 +index a30441a200c0..220661a50f58 100644
524 +--- a/block/blk-timeout.c
525 ++++ b/block/blk-timeout.c
526 +@@ -135,8 +135,6 @@ void blk_timeout_work(struct work_struct *work)
527 + struct request *rq, *tmp;
528 + int next_set = 0;
529 +
530 +- if (blk_queue_enter(q, true))
531 +- return;
532 + spin_lock_irqsave(q->queue_lock, flags);
533 +
534 + list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
535 +@@ -146,7 +144,6 @@ void blk_timeout_work(struct work_struct *work)
536 + mod_timer(&q->timeout, round_jiffies_up(next));
537 +
538 + spin_unlock_irqrestore(q->queue_lock, flags);
539 +- blk_queue_exit(q);
540 + }
541 +
542 + /**
543 +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
544 +index 51874695a730..c3bcb7f5986e 100644
545 +--- a/drivers/acpi/ec.c
546 ++++ b/drivers/acpi/ec.c
547 +@@ -482,8 +482,11 @@ static inline void __acpi_ec_enable_event(struct acpi_ec *ec)
548 + {
549 + if (!test_and_set_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
550 + ec_log_drv("event unblocked");
551 +- if (!test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
552 +- advance_transaction(ec);
553 ++ /*
554 ++ * Unconditionally invoke this once after enabling the event
555 ++ * handling mechanism to detect the pending events.
556 ++ */
557 ++ advance_transaction(ec);
558 + }
559 +
560 + static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
561 +@@ -1458,11 +1461,10 @@ static int ec_install_handlers(struct acpi_ec *ec, bool handle_events)
562 + if (test_bit(EC_FLAGS_STARTED, &ec->flags) &&
563 + ec->reference_count >= 1)
564 + acpi_ec_enable_gpe(ec, true);
565 +-
566 +- /* EC is fully operational, allow queries */
567 +- acpi_ec_enable_event(ec);
568 + }
569 + }
570 ++ /* EC is fully operational, allow queries */
571 ++ acpi_ec_enable_event(ec);
572 +
573 + return 0;
574 + }
575 +diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
576 +index 0e1ec37070d1..6475a1343483 100644
577 +--- a/drivers/ata/libata-eh.c
578 ++++ b/drivers/ata/libata-eh.c
579 +@@ -2329,8 +2329,8 @@ static void ata_eh_link_autopsy(struct ata_link *link)
580 + if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
581 + eflags |= ATA_EFLAG_DUBIOUS_XFER;
582 + ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
583 ++ trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
584 + }
585 +- trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
586 + DPRINTK("EXIT\n");
587 + }
588 +
589 +diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c
590 +index b52c617947ad..69379443e5eb 100644
591 +--- a/drivers/base/power/opp/of.c
592 ++++ b/drivers/base/power/opp/of.c
593 +@@ -348,6 +348,7 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
594 + if (ret) {
595 + dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
596 + ret);
597 ++ of_node_put(np);
598 + goto free_table;
599 + }
600 + }
601 +diff --git a/drivers/clk/qcom/gcc-ipq4019.c b/drivers/clk/qcom/gcc-ipq4019.c
602 +index b593065de8db..8ab6ce4d976f 100644
603 +--- a/drivers/clk/qcom/gcc-ipq4019.c
604 ++++ b/drivers/clk/qcom/gcc-ipq4019.c
605 +@@ -525,10 +525,20 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
606 + };
607 +
608 + static const struct freq_tbl ftbl_gcc_apps_clk[] = {
609 +- F(48000000, P_XO, 1, 0, 0),
610 ++ F(48000000, P_XO, 1, 0, 0),
611 + F(200000000, P_FEPLL200, 1, 0, 0),
612 ++ F(384000000, P_DDRPLLAPSS, 1, 0, 0),
613 ++ F(413000000, P_DDRPLLAPSS, 1, 0, 0),
614 ++ F(448000000, P_DDRPLLAPSS, 1, 0, 0),
615 ++ F(488000000, P_DDRPLLAPSS, 1, 0, 0),
616 + F(500000000, P_FEPLL500, 1, 0, 0),
617 +- F(626000000, P_DDRPLLAPSS, 1, 0, 0),
618 ++ F(512000000, P_DDRPLLAPSS, 1, 0, 0),
619 ++ F(537000000, P_DDRPLLAPSS, 1, 0, 0),
620 ++ F(565000000, P_DDRPLLAPSS, 1, 0, 0),
621 ++ F(597000000, P_DDRPLLAPSS, 1, 0, 0),
622 ++ F(632000000, P_DDRPLLAPSS, 1, 0, 0),
623 ++ F(672000000, P_DDRPLLAPSS, 1, 0, 0),
624 ++ F(716000000, P_DDRPLLAPSS, 1, 0, 0),
625 + { }
626 + };
627 +
628 +diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
629 +index 0cca3601d99e..df97e25aec76 100644
630 +--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
631 ++++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
632 +@@ -468,8 +468,8 @@ static SUNXI_CCU_MUX_WITH_GATE(daudio0_clk, "daudio0", daudio_parents,
633 + static SUNXI_CCU_MUX_WITH_GATE(daudio1_clk, "daudio1", daudio_parents,
634 + 0x0b4, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
635 +
636 +-static SUNXI_CCU_M_WITH_GATE(spdif_clk, "spdif", "pll-audio",
637 +- 0x0c0, 0, 4, BIT(31), CLK_SET_RATE_PARENT);
638 ++static SUNXI_CCU_MUX_WITH_GATE(spdif_clk, "spdif", daudio_parents,
639 ++ 0x0c0, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
640 +
641 + static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M",
642 + 0x0cc, BIT(8), 0);
643 +diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
644 +index 9bd1f78a0547..e1dc4e5b34e1 100644
645 +--- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
646 ++++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
647 +@@ -752,6 +752,13 @@ static const struct sunxi_ccu_desc sun8i_a33_ccu_desc = {
648 + .num_resets = ARRAY_SIZE(sun8i_a33_ccu_resets),
649 + };
650 +
651 ++static struct ccu_mux_nb sun8i_a33_cpu_nb = {
652 ++ .common = &cpux_clk.common,
653 ++ .cm = &cpux_clk.mux,
654 ++ .delay_us = 1, /* > 8 clock cycles at 24 MHz */
655 ++ .bypass_index = 1, /* index of 24 MHz oscillator */
656 ++};
657 ++
658 + static void __init sun8i_a33_ccu_setup(struct device_node *node)
659 + {
660 + void __iomem *reg;
661 +@@ -775,6 +782,9 @@ static void __init sun8i_a33_ccu_setup(struct device_node *node)
662 + writel(val, reg + SUN8I_A33_PLL_MIPI_REG);
663 +
664 + sunxi_ccu_probe(node, reg, &sun8i_a33_ccu_desc);
665 ++
666 ++ ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
667 ++ &sun8i_a33_cpu_nb);
668 + }
669 + CLK_OF_DECLARE(sun8i_a33_ccu, "allwinner,sun8i-a33-ccu",
670 + sun8i_a33_ccu_setup);
671 +diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
672 +index c77333230bdf..7d060ffe8975 100644
673 +--- a/drivers/clk/ti/clk-dra7-atl.c
674 ++++ b/drivers/clk/ti/clk-dra7-atl.c
675 +@@ -265,8 +265,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
676 +
677 + /* Get configuration for the ATL instances */
678 + snprintf(prop, sizeof(prop), "atl%u", i);
679 +- of_node_get(node);
680 +- cfg_node = of_find_node_by_name(node, prop);
681 ++ cfg_node = of_get_child_by_name(node, prop);
682 + if (cfg_node) {
683 + ret = of_property_read_u32(cfg_node, "bws",
684 + &cdesc->bws);
685 +diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
686 +index e423d33decd4..36291840a12c 100644
687 +--- a/drivers/crypto/marvell/cesa.h
688 ++++ b/drivers/crypto/marvell/cesa.h
689 +@@ -273,7 +273,8 @@ struct mv_cesa_op_ctx {
690 + #define CESA_TDMA_SRC_IN_SRAM BIT(30)
691 + #define CESA_TDMA_END_OF_REQ BIT(29)
692 + #define CESA_TDMA_BREAK_CHAIN BIT(28)
693 +-#define CESA_TDMA_TYPE_MSK GENMASK(27, 0)
694 ++#define CESA_TDMA_SET_STATE BIT(27)
695 ++#define CESA_TDMA_TYPE_MSK GENMASK(26, 0)
696 + #define CESA_TDMA_DUMMY 0
697 + #define CESA_TDMA_DATA 1
698 + #define CESA_TDMA_OP 2
699 +diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
700 +index 77712b375b84..662cf4ddb04b 100644
701 +--- a/drivers/crypto/marvell/hash.c
702 ++++ b/drivers/crypto/marvell/hash.c
703 +@@ -280,13 +280,32 @@ static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
704 + sreq->offset = 0;
705 + }
706 +
707 ++static void mv_cesa_ahash_dma_step(struct ahash_request *req)
708 ++{
709 ++ struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
710 ++ struct mv_cesa_req *base = &creq->base;
711 ++
712 ++ /* We must explicitly set the digest state. */
713 ++ if (base->chain.first->flags & CESA_TDMA_SET_STATE) {
714 ++ struct mv_cesa_engine *engine = base->engine;
715 ++ int i;
716 ++
717 ++ /* Set the hash state in the IVDIG regs. */
718 ++ for (i = 0; i < ARRAY_SIZE(creq->state); i++)
719 ++ writel_relaxed(creq->state[i], engine->regs +
720 ++ CESA_IVDIG(i));
721 ++ }
722 ++
723 ++ mv_cesa_dma_step(base);
724 ++}
725 ++
726 + static void mv_cesa_ahash_step(struct crypto_async_request *req)
727 + {
728 + struct ahash_request *ahashreq = ahash_request_cast(req);
729 + struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
730 +
731 + if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
732 +- mv_cesa_dma_step(&creq->base);
733 ++ mv_cesa_ahash_dma_step(ahashreq);
734 + else
735 + mv_cesa_ahash_std_step(ahashreq);
736 + }
737 +@@ -562,11 +581,15 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
738 + struct mv_cesa_ahash_dma_iter iter;
739 + struct mv_cesa_op_ctx *op = NULL;
740 + unsigned int frag_len;
741 ++ bool set_state = false;
742 + int ret;
743 +
744 + basereq->chain.first = NULL;
745 + basereq->chain.last = NULL;
746 +
747 ++ if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
748 ++ set_state = true;
749 ++
750 + if (creq->src_nents) {
751 + ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
752 + DMA_TO_DEVICE);
753 +@@ -650,6 +673,15 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
754 + basereq->chain.last->flags |= (CESA_TDMA_END_OF_REQ |
755 + CESA_TDMA_BREAK_CHAIN);
756 +
757 ++ if (set_state) {
758 ++ /*
759 ++ * Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
760 ++ * let the step logic know that the IVDIG registers should be
761 ++ * explicitly set before launching a TDMA chain.
762 ++ */
763 ++ basereq->chain.first->flags |= CESA_TDMA_SET_STATE;
764 ++ }
765 ++
766 + return 0;
767 +
768 + err_free_tdma:
769 +diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c
770 +index 9fd7a5fbaa1b..0cda6e3f2b4b 100644
771 +--- a/drivers/crypto/marvell/tdma.c
772 ++++ b/drivers/crypto/marvell/tdma.c
773 +@@ -112,7 +112,14 @@ void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
774 + last->next = dreq->chain.first;
775 + engine->chain.last = dreq->chain.last;
776 +
777 +- if (!(last->flags & CESA_TDMA_BREAK_CHAIN))
778 ++ /*
779 ++ * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
780 ++ * the last element of the current chain, or if the request
781 ++ * being queued needs the IV regs to be set before lauching
782 ++ * the request.
783 ++ */
784 ++ if (!(last->flags & CESA_TDMA_BREAK_CHAIN) &&
785 ++ !(dreq->chain.first->flags & CESA_TDMA_SET_STATE))
786 + last->next_dma = dreq->chain.first->cur_dma;
787 + }
788 + }
789 +diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c
790 +index 245d759d5ffc..6059d81e701a 100644
791 +--- a/drivers/dma/zx296702_dma.c
792 ++++ b/drivers/dma/zx296702_dma.c
793 +@@ -813,6 +813,7 @@ static int zx_dma_probe(struct platform_device *op)
794 + INIT_LIST_HEAD(&d->slave.channels);
795 + dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
796 + dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
797 ++ dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
798 + dma_cap_set(DMA_PRIVATE, d->slave.cap_mask);
799 + d->slave.dev = &op->dev;
800 + d->slave.device_free_chan_resources = zx_dma_free_chan_resources;
801 +diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
802 +index 1ef85b0c2b1f..d27e9361e236 100644
803 +--- a/drivers/gpio/gpio-mockup.c
804 ++++ b/drivers/gpio/gpio-mockup.c
805 +@@ -126,7 +126,7 @@ static int mockup_gpio_probe(struct platform_device *pdev)
806 + int i;
807 + int base;
808 + int ngpio;
809 +- char chip_name[sizeof(GPIO_NAME) + 3];
810 ++ char *chip_name;
811 +
812 + if (gpio_mockup_params_nr < 2)
813 + return -EINVAL;
814 +@@ -146,8 +146,12 @@ static int mockup_gpio_probe(struct platform_device *pdev)
815 + ngpio = gpio_mockup_ranges[i * 2 + 1] - base;
816 +
817 + if (ngpio >= 0) {
818 +- sprintf(chip_name, "%s-%c", GPIO_NAME,
819 +- pins_name_start + i);
820 ++ chip_name = devm_kasprintf(dev, GFP_KERNEL,
821 ++ "%s-%c", GPIO_NAME,
822 ++ pins_name_start + i);
823 ++ if (!chip_name)
824 ++ return -ENOMEM;
825 ++
826 + ret = mockup_gpio_add(dev, &cntr[i],
827 + chip_name, base, ngpio);
828 + } else {
829 +diff --git a/drivers/gpu/drm/armada/Makefile b/drivers/gpu/drm/armada/Makefile
830 +index ffd673615772..26412d2f8c98 100644
831 +--- a/drivers/gpu/drm/armada/Makefile
832 ++++ b/drivers/gpu/drm/armada/Makefile
833 +@@ -4,3 +4,5 @@ armada-y += armada_510.o
834 + armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o
835 +
836 + obj-$(CONFIG_DRM_ARMADA) := armada.o
837 ++
838 ++CFLAGS_armada_trace.o := -I$(src)
839 +diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
840 +index ee07bb4a57b7..11f54df0c19b 100644
841 +--- a/drivers/gpu/drm/drm_mm.c
842 ++++ b/drivers/gpu/drm/drm_mm.c
843 +@@ -348,14 +348,12 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
844 +
845 + BUG_ON(!hole_node->hole_follows || node->allocated);
846 +
847 +- if (adj_start < start)
848 +- adj_start = start;
849 +- if (adj_end > end)
850 +- adj_end = end;
851 +-
852 + if (mm->color_adjust)
853 + mm->color_adjust(hole_node, color, &adj_start, &adj_end);
854 +
855 ++ adj_start = max(adj_start, start);
856 ++ adj_end = min(adj_end, end);
857 ++
858 + if (flags & DRM_MM_CREATE_TOP)
859 + adj_start = adj_end - size;
860 +
861 +@@ -566,17 +564,15 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
862 + flags & DRM_MM_SEARCH_BELOW) {
863 + u64 hole_size = adj_end - adj_start;
864 +
865 +- if (adj_start < start)
866 +- adj_start = start;
867 +- if (adj_end > end)
868 +- adj_end = end;
869 +-
870 + if (mm->color_adjust) {
871 + mm->color_adjust(entry, color, &adj_start, &adj_end);
872 + if (adj_end <= adj_start)
873 + continue;
874 + }
875 +
876 ++ adj_start = max(adj_start, start);
877 ++ adj_end = min(adj_end, end);
878 ++
879 + if (!check_free_hole(adj_start, adj_end, size, alignment))
880 + continue;
881 +
882 +diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
883 +index 3ce9ba30d827..a19ec06f9e42 100644
884 +--- a/drivers/gpu/drm/i915/intel_drv.h
885 ++++ b/drivers/gpu/drm/i915/intel_drv.h
886 +@@ -457,6 +457,7 @@ struct intel_crtc_scaler_state {
887 +
888 + struct intel_pipe_wm {
889 + struct intel_wm_level wm[5];
890 ++ struct intel_wm_level raw_wm[5];
891 + uint32_t linetime;
892 + bool fbc_wm_enabled;
893 + bool pipe_enabled;
894 +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
895 +index 277a8026460b..49de4760cc16 100644
896 +--- a/drivers/gpu/drm/i915/intel_pm.c
897 ++++ b/drivers/gpu/drm/i915/intel_pm.c
898 +@@ -27,7 +27,6 @@
899 +
900 + #include <linux/cpufreq.h>
901 + #include <drm/drm_plane_helper.h>
902 +-#include <drm/drm_atomic_helper.h>
903 + #include "i915_drv.h"
904 + #include "intel_drv.h"
905 + #include "../../../platform/x86/intel_ips.h"
906 +@@ -2018,9 +2017,9 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
907 + const struct intel_crtc *intel_crtc,
908 + int level,
909 + struct intel_crtc_state *cstate,
910 +- const struct intel_plane_state *pristate,
911 +- const struct intel_plane_state *sprstate,
912 +- const struct intel_plane_state *curstate,
913 ++ struct intel_plane_state *pristate,
914 ++ struct intel_plane_state *sprstate,
915 ++ struct intel_plane_state *curstate,
916 + struct intel_wm_level *result)
917 + {
918 + uint16_t pri_latency = dev_priv->wm.pri_latency[level];
919 +@@ -2342,24 +2341,28 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
920 + struct intel_pipe_wm *pipe_wm;
921 + struct drm_device *dev = state->dev;
922 + const struct drm_i915_private *dev_priv = to_i915(dev);
923 +- struct drm_plane *plane;
924 +- const struct drm_plane_state *plane_state;
925 +- const struct intel_plane_state *pristate = NULL;
926 +- const struct intel_plane_state *sprstate = NULL;
927 +- const struct intel_plane_state *curstate = NULL;
928 ++ struct intel_plane *intel_plane;
929 ++ struct intel_plane_state *pristate = NULL;
930 ++ struct intel_plane_state *sprstate = NULL;
931 ++ struct intel_plane_state *curstate = NULL;
932 + int level, max_level = ilk_wm_max_level(dev), usable_level;
933 + struct ilk_wm_maximums max;
934 +
935 + pipe_wm = &cstate->wm.ilk.optimal;
936 +
937 +- drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &cstate->base) {
938 +- const struct intel_plane_state *ps = to_intel_plane_state(plane_state);
939 ++ for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
940 ++ struct intel_plane_state *ps;
941 +
942 +- if (plane->type == DRM_PLANE_TYPE_PRIMARY)
943 ++ ps = intel_atomic_get_existing_plane_state(state,
944 ++ intel_plane);
945 ++ if (!ps)
946 ++ continue;
947 ++
948 ++ if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
949 + pristate = ps;
950 +- else if (plane->type == DRM_PLANE_TYPE_OVERLAY)
951 ++ else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
952 + sprstate = ps;
953 +- else if (plane->type == DRM_PLANE_TYPE_CURSOR)
954 ++ else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
955 + curstate = ps;
956 + }
957 +
958 +@@ -2381,9 +2384,11 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
959 + if (pipe_wm->sprites_scaled)
960 + usable_level = 0;
961 +
962 +- memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
963 + ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
964 +- pristate, sprstate, curstate, &pipe_wm->wm[0]);
965 ++ pristate, sprstate, curstate, &pipe_wm->raw_wm[0]);
966 ++
967 ++ memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
968 ++ pipe_wm->wm[0] = pipe_wm->raw_wm[0];
969 +
970 + if (IS_HASWELL(dev) || IS_BROADWELL(dev))
971 + pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
972 +@@ -2393,8 +2398,8 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
973 +
974 + ilk_compute_wm_reg_maximums(dev, 1, &max);
975 +
976 +- for (level = 1; level <= usable_level; level++) {
977 +- struct intel_wm_level *wm = &pipe_wm->wm[level];
978 ++ for (level = 1; level <= max_level; level++) {
979 ++ struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
980 +
981 + ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
982 + pristate, sprstate, curstate, wm);
983 +@@ -2404,10 +2409,13 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
984 + * register maximums since such watermarks are
985 + * always invalid.
986 + */
987 +- if (!ilk_validate_wm_level(level, &max, wm)) {
988 +- memset(wm, 0, sizeof(*wm));
989 +- break;
990 +- }
991 ++ if (level > usable_level)
992 ++ continue;
993 ++
994 ++ if (ilk_validate_wm_level(level, &max, wm))
995 ++ pipe_wm->wm[level] = *wm;
996 ++ else
997 ++ usable_level = level;
998 + }
999 +
1000 + return 0;
1001 +diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
1002 +index cf83f6507ec8..48dfc163233e 100644
1003 +--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
1004 ++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
1005 +@@ -321,7 +321,8 @@ static void mtk_drm_unbind(struct device *dev)
1006 + {
1007 + struct mtk_drm_private *private = dev_get_drvdata(dev);
1008 +
1009 +- drm_put_dev(private->drm);
1010 ++ drm_dev_unregister(private->drm);
1011 ++ drm_dev_unref(private->drm);
1012 + private->drm = NULL;
1013 + }
1014 +
1015 +diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
1016 +index 6e6c59a661b6..223944a3ba18 100644
1017 +--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
1018 ++++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
1019 +@@ -172,7 +172,7 @@ int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
1020 + ret = sun4i_backend_drm_format_to_layer(plane, fb->pixel_format, &val);
1021 + if (ret) {
1022 + DRM_DEBUG_DRIVER("Invalid format\n");
1023 +- return val;
1024 ++ return ret;
1025 + }
1026 +
1027 + regmap_update_bits(backend->regs, SUN4I_BACKEND_ATTCTL_REG1(layer),
1028 +diff --git a/drivers/iio/light/cm3232.c b/drivers/iio/light/cm3232.c
1029 +index fe89b6823217..263e97235ea0 100644
1030 +--- a/drivers/iio/light/cm3232.c
1031 ++++ b/drivers/iio/light/cm3232.c
1032 +@@ -119,7 +119,7 @@ static int cm3232_reg_init(struct cm3232_chip *chip)
1033 + if (ret < 0)
1034 + dev_err(&chip->client->dev, "Error writing reg_cmd\n");
1035 +
1036 +- return 0;
1037 ++ return ret;
1038 + }
1039 +
1040 + /**
1041 +diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
1042 +index 1eee8f7e75ca..84f91858b5e6 100644
1043 +--- a/drivers/infiniband/ulp/srp/ib_srp.c
1044 ++++ b/drivers/infiniband/ulp/srp/ib_srp.c
1045 +@@ -648,12 +648,19 @@ static void srp_path_rec_completion(int status,
1046 + static int srp_lookup_path(struct srp_rdma_ch *ch)
1047 + {
1048 + struct srp_target_port *target = ch->target;
1049 +- int ret;
1050 ++ int ret = -ENODEV;
1051 +
1052 + ch->path.numb_path = 1;
1053 +
1054 + init_completion(&ch->done);
1055 +
1056 ++ /*
1057 ++ * Avoid that the SCSI host can be removed by srp_remove_target()
1058 ++ * before srp_path_rec_completion() is called.
1059 ++ */
1060 ++ if (!scsi_host_get(target->scsi_host))
1061 ++ goto out;
1062 ++
1063 + ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
1064 + target->srp_host->srp_dev->dev,
1065 + target->srp_host->port,
1066 +@@ -667,18 +674,24 @@ static int srp_lookup_path(struct srp_rdma_ch *ch)
1067 + GFP_KERNEL,
1068 + srp_path_rec_completion,
1069 + ch, &ch->path_query);
1070 +- if (ch->path_query_id < 0)
1071 +- return ch->path_query_id;
1072 ++ ret = ch->path_query_id;
1073 ++ if (ret < 0)
1074 ++ goto put;
1075 +
1076 + ret = wait_for_completion_interruptible(&ch->done);
1077 + if (ret < 0)
1078 +- return ret;
1079 ++ goto put;
1080 +
1081 +- if (ch->status < 0)
1082 ++ ret = ch->status;
1083 ++ if (ret < 0)
1084 + shost_printk(KERN_WARNING, target->scsi_host,
1085 + PFX "Path record query failed\n");
1086 +
1087 +- return ch->status;
1088 ++put:
1089 ++ scsi_host_put(target->scsi_host);
1090 ++
1091 ++out:
1092 ++ return ret;
1093 + }
1094 +
1095 + static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
1096 +diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
1097 +index 0b1f69ed2e92..b9748970df4a 100644
1098 +--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
1099 ++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
1100 +@@ -2750,7 +2750,7 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
1101 + {
1102 + const char *p;
1103 + unsigned len, count, leading_zero_bytes;
1104 +- int ret, rc;
1105 ++ int ret;
1106 +
1107 + p = name;
1108 + if (strncasecmp(p, "0x", 2) == 0)
1109 +@@ -2762,10 +2762,9 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
1110 + count = min(len / 2, 16U);
1111 + leading_zero_bytes = 16 - count;
1112 + memset(i_port_id, 0, leading_zero_bytes);
1113 +- rc = hex2bin(i_port_id + leading_zero_bytes, p, count);
1114 +- if (rc < 0)
1115 +- pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc);
1116 +- ret = 0;
1117 ++ ret = hex2bin(i_port_id + leading_zero_bytes, p, count);
1118 ++ if (ret < 0)
1119 ++ pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", ret);
1120 + out:
1121 + return ret;
1122 + }
1123 +diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
1124 +index 24d388d74011..a37576a1798d 100644
1125 +--- a/drivers/irqchip/irq-gic-v3.c
1126 ++++ b/drivers/irqchip/irq-gic-v3.c
1127 +@@ -1022,18 +1022,18 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
1128 + int nr_parts;
1129 + struct partition_affinity *parts;
1130 +
1131 +- parts_node = of_find_node_by_name(gic_node, "ppi-partitions");
1132 ++ parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
1133 + if (!parts_node)
1134 + return;
1135 +
1136 + nr_parts = of_get_child_count(parts_node);
1137 +
1138 + if (!nr_parts)
1139 +- return;
1140 ++ goto out_put_node;
1141 +
1142 + parts = kzalloc(sizeof(*parts) * nr_parts, GFP_KERNEL);
1143 + if (WARN_ON(!parts))
1144 +- return;
1145 ++ goto out_put_node;
1146 +
1147 + for_each_child_of_node(parts_node, child_part) {
1148 + struct partition_affinity *part;
1149 +@@ -1100,6 +1100,9 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
1150 +
1151 + gic_data.ppi_descs[i] = desc;
1152 + }
1153 ++
1154 ++out_put_node:
1155 ++ of_node_put(parts_node);
1156 + }
1157 +
1158 + static void __init gic_of_setup_kvm_info(struct device_node *node)
1159 +diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
1160 +index ca4abe1ccd8d..3fba31cea66e 100644
1161 +--- a/drivers/md/bcache/alloc.c
1162 ++++ b/drivers/md/bcache/alloc.c
1163 +@@ -404,7 +404,8 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
1164 +
1165 + finish_wait(&ca->set->bucket_wait, &w);
1166 + out:
1167 +- wake_up_process(ca->alloc_thread);
1168 ++ if (ca->alloc_thread)
1169 ++ wake_up_process(ca->alloc_thread);
1170 +
1171 + trace_bcache_alloc(ca, reserve);
1172 +
1173 +diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
1174 +index 8bf9667ff46b..7643f72adb1c 100644
1175 +--- a/drivers/md/dm-bufio.c
1176 ++++ b/drivers/md/dm-bufio.c
1177 +@@ -937,7 +937,8 @@ static void __get_memory_limit(struct dm_bufio_client *c,
1178 + buffers = c->minimum_buffers;
1179 +
1180 + *limit_buffers = buffers;
1181 +- *threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
1182 ++ *threshold_buffers = mult_frac(buffers,
1183 ++ DM_BUFIO_WRITEBACK_PERCENT, 100);
1184 + }
1185 +
1186 + /*
1187 +@@ -1856,19 +1857,15 @@ static int __init dm_bufio_init(void)
1188 + memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
1189 + memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
1190 +
1191 +- mem = (__u64)((totalram_pages - totalhigh_pages) *
1192 +- DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT;
1193 ++ mem = (__u64)mult_frac(totalram_pages - totalhigh_pages,
1194 ++ DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
1195 +
1196 + if (mem > ULONG_MAX)
1197 + mem = ULONG_MAX;
1198 +
1199 + #ifdef CONFIG_MMU
1200 +- /*
1201 +- * Get the size of vmalloc space the same way as VMALLOC_TOTAL
1202 +- * in fs/proc/internal.h
1203 +- */
1204 +- if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100)
1205 +- mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100;
1206 ++ if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
1207 ++ mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
1208 + #endif
1209 +
1210 + dm_bufio_default_cache_size = mem;
1211 +diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
1212 +index 40ceba1fe8be..1609d4971104 100644
1213 +--- a/drivers/md/dm-core.h
1214 ++++ b/drivers/md/dm-core.h
1215 +@@ -29,7 +29,6 @@ struct dm_kobject_holder {
1216 + * DM targets must _not_ deference a mapped_device to directly access its members!
1217 + */
1218 + struct mapped_device {
1219 +- struct srcu_struct io_barrier;
1220 + struct mutex suspend_lock;
1221 +
1222 + /*
1223 +@@ -127,6 +126,8 @@ struct mapped_device {
1224 + struct blk_mq_tag_set *tag_set;
1225 + bool use_blk_mq:1;
1226 + bool init_tio_pdu:1;
1227 ++
1228 ++ struct srcu_struct io_barrier;
1229 + };
1230 +
1231 + void dm_init_md_queue(struct mapped_device *md);
1232 +diff --git a/drivers/md/dm.c b/drivers/md/dm.c
1233 +index e66f4040d84b..c5522551122f 100644
1234 +--- a/drivers/md/dm.c
1235 ++++ b/drivers/md/dm.c
1236 +@@ -21,6 +21,7 @@
1237 + #include <linux/delay.h>
1238 + #include <linux/wait.h>
1239 + #include <linux/pr.h>
1240 ++#include <linux/vmalloc.h>
1241 +
1242 + #define DM_MSG_PREFIX "core"
1243 +
1244 +@@ -1511,7 +1512,7 @@ static struct mapped_device *alloc_dev(int minor)
1245 + struct mapped_device *md;
1246 + void *old_md;
1247 +
1248 +- md = kzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
1249 ++ md = vzalloc_node(sizeof(*md), numa_node_id);
1250 + if (!md) {
1251 + DMWARN("unable to allocate device, out of memory.");
1252 + return NULL;
1253 +@@ -1605,7 +1606,7 @@ static struct mapped_device *alloc_dev(int minor)
1254 + bad_minor:
1255 + module_put(THIS_MODULE);
1256 + bad_module_get:
1257 +- kfree(md);
1258 ++ kvfree(md);
1259 + return NULL;
1260 + }
1261 +
1262 +@@ -1624,7 +1625,7 @@ static void free_dev(struct mapped_device *md)
1263 + free_minor(minor);
1264 +
1265 + module_put(THIS_MODULE);
1266 +- kfree(md);
1267 ++ kvfree(md);
1268 + }
1269 +
1270 + static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
1271 +@@ -2514,11 +2515,15 @@ struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
1272 +
1273 + md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
1274 +
1275 +- if (test_bit(DMF_FREEING, &md->flags) ||
1276 +- dm_deleting_md(md))
1277 +- return NULL;
1278 +-
1279 ++ spin_lock(&_minor_lock);
1280 ++ if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
1281 ++ md = NULL;
1282 ++ goto out;
1283 ++ }
1284 + dm_get(md);
1285 ++out:
1286 ++ spin_unlock(&_minor_lock);
1287 ++
1288 + return md;
1289 + }
1290 +
1291 +diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
1292 +index b49f80cb49c9..d9a5710532f4 100644
1293 +--- a/drivers/media/rc/ir-lirc-codec.c
1294 ++++ b/drivers/media/rc/ir-lirc-codec.c
1295 +@@ -286,11 +286,14 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
1296 + if (!dev->max_timeout)
1297 + return -ENOSYS;
1298 +
1299 ++ /* Check for multiply overflow */
1300 ++ if (val > U32_MAX / 1000)
1301 ++ return -EINVAL;
1302 ++
1303 + tmp = val * 1000;
1304 +
1305 +- if (tmp < dev->min_timeout ||
1306 +- tmp > dev->max_timeout)
1307 +- return -EINVAL;
1308 ++ if (tmp < dev->min_timeout || tmp > dev->max_timeout)
1309 ++ return -EINVAL;
1310 +
1311 + if (dev->s_timeout)
1312 + ret = dev->s_timeout(dev, tmp);
1313 +diff --git a/drivers/media/usb/as102/as102_fw.c b/drivers/media/usb/as102/as102_fw.c
1314 +index 5a28ce3a1d49..38dbc128340d 100644
1315 +--- a/drivers/media/usb/as102/as102_fw.c
1316 ++++ b/drivers/media/usb/as102/as102_fw.c
1317 +@@ -101,18 +101,23 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
1318 + unsigned char *cmd,
1319 + const struct firmware *firmware) {
1320 +
1321 +- struct as10x_fw_pkt_t fw_pkt;
1322 ++ struct as10x_fw_pkt_t *fw_pkt;
1323 + int total_read_bytes = 0, errno = 0;
1324 + unsigned char addr_has_changed = 0;
1325 +
1326 ++ fw_pkt = kmalloc(sizeof(*fw_pkt), GFP_KERNEL);
1327 ++ if (!fw_pkt)
1328 ++ return -ENOMEM;
1329 ++
1330 ++
1331 + for (total_read_bytes = 0; total_read_bytes < firmware->size; ) {
1332 + int read_bytes = 0, data_len = 0;
1333 +
1334 + /* parse intel hex line */
1335 + read_bytes = parse_hex_line(
1336 + (u8 *) (firmware->data + total_read_bytes),
1337 +- fw_pkt.raw.address,
1338 +- fw_pkt.raw.data,
1339 ++ fw_pkt->raw.address,
1340 ++ fw_pkt->raw.data,
1341 + &data_len,
1342 + &addr_has_changed);
1343 +
1344 +@@ -122,28 +127,28 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
1345 + /* detect the end of file */
1346 + total_read_bytes += read_bytes;
1347 + if (total_read_bytes == firmware->size) {
1348 +- fw_pkt.u.request[0] = 0x00;
1349 +- fw_pkt.u.request[1] = 0x03;
1350 ++ fw_pkt->u.request[0] = 0x00;
1351 ++ fw_pkt->u.request[1] = 0x03;
1352 +
1353 + /* send EOF command */
1354 + errno = bus_adap->ops->upload_fw_pkt(bus_adap,
1355 + (uint8_t *)
1356 +- &fw_pkt, 2, 0);
1357 ++ fw_pkt, 2, 0);
1358 + if (errno < 0)
1359 + goto error;
1360 + } else {
1361 + if (!addr_has_changed) {
1362 + /* prepare command to send */
1363 +- fw_pkt.u.request[0] = 0x00;
1364 +- fw_pkt.u.request[1] = 0x01;
1365 ++ fw_pkt->u.request[0] = 0x00;
1366 ++ fw_pkt->u.request[1] = 0x01;
1367 +
1368 +- data_len += sizeof(fw_pkt.u.request);
1369 +- data_len += sizeof(fw_pkt.raw.address);
1370 ++ data_len += sizeof(fw_pkt->u.request);
1371 ++ data_len += sizeof(fw_pkt->raw.address);
1372 +
1373 + /* send cmd to device */
1374 + errno = bus_adap->ops->upload_fw_pkt(bus_adap,
1375 + (uint8_t *)
1376 +- &fw_pkt,
1377 ++ fw_pkt,
1378 + data_len,
1379 + 0);
1380 + if (errno < 0)
1381 +@@ -152,6 +157,7 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
1382 + }
1383 + }
1384 + error:
1385 ++ kfree(fw_pkt);
1386 + return (errno == 0) ? total_read_bytes : errno;
1387 + }
1388 +
1389 +diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
1390 +index be9e3335dcb7..921cf1edb3b1 100644
1391 +--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
1392 ++++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
1393 +@@ -1622,7 +1622,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
1394 + nr = dev->devno;
1395 +
1396 + assoc_desc = udev->actconfig->intf_assoc[0];
1397 +- if (assoc_desc->bFirstInterface != ifnum) {
1398 ++ if (!assoc_desc || assoc_desc->bFirstInterface != ifnum) {
1399 + dev_err(d, "Not found matching IAD interface\n");
1400 + retval = -ENODEV;
1401 + goto err_if;
1402 +diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
1403 +index adc2147fcff7..bd6884223a0d 100644
1404 +--- a/drivers/media/v4l2-core/v4l2-ctrls.c
1405 ++++ b/drivers/media/v4l2-core/v4l2-ctrls.c
1406 +@@ -1219,6 +1219,16 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
1407 + }
1408 + EXPORT_SYMBOL(v4l2_ctrl_fill);
1409 +
1410 ++static u32 user_flags(const struct v4l2_ctrl *ctrl)
1411 ++{
1412 ++ u32 flags = ctrl->flags;
1413 ++
1414 ++ if (ctrl->is_ptr)
1415 ++ flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD;
1416 ++
1417 ++ return flags;
1418 ++}
1419 ++
1420 + static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes)
1421 + {
1422 + memset(ev->reserved, 0, sizeof(ev->reserved));
1423 +@@ -1226,7 +1236,7 @@ static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 change
1424 + ev->id = ctrl->id;
1425 + ev->u.ctrl.changes = changes;
1426 + ev->u.ctrl.type = ctrl->type;
1427 +- ev->u.ctrl.flags = ctrl->flags;
1428 ++ ev->u.ctrl.flags = user_flags(ctrl);
1429 + if (ctrl->is_ptr)
1430 + ev->u.ctrl.value64 = 0;
1431 + else
1432 +@@ -2550,10 +2560,8 @@ int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctr
1433 + else
1434 + qc->id = ctrl->id;
1435 + strlcpy(qc->name, ctrl->name, sizeof(qc->name));
1436 +- qc->flags = ctrl->flags;
1437 ++ qc->flags = user_flags(ctrl);
1438 + qc->type = ctrl->type;
1439 +- if (ctrl->is_ptr)
1440 +- qc->flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD;
1441 + qc->elem_size = ctrl->elem_size;
1442 + qc->elems = ctrl->elems;
1443 + qc->nr_of_dims = ctrl->nr_of_dims;
1444 +diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c
1445 +index dbf256217b3e..ada2d88fd4c7 100644
1446 +--- a/drivers/mtd/nand/mtk_ecc.c
1447 ++++ b/drivers/mtd/nand/mtk_ecc.c
1448 +@@ -116,6 +116,11 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id)
1449 + op = ECC_DECODE;
1450 + dec = readw(ecc->regs + ECC_DECDONE);
1451 + if (dec & ecc->sectors) {
1452 ++ /*
1453 ++ * Clear decode IRQ status once again to ensure that
1454 ++ * there will be no extra IRQ.
1455 ++ */
1456 ++ readw(ecc->regs + ECC_DECIRQ_STA);
1457 + ecc->sectors = 0;
1458 + complete(&ecc->done);
1459 + } else {
1460 +@@ -131,8 +136,6 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id)
1461 + }
1462 + }
1463 +
1464 +- writel(0, ecc->regs + ECC_IRQ_REG(op));
1465 +-
1466 + return IRQ_HANDLED;
1467 + }
1468 +
1469 +@@ -342,6 +345,12 @@ void mtk_ecc_disable(struct mtk_ecc *ecc)
1470 +
1471 + /* disable it */
1472 + mtk_ecc_wait_idle(ecc, op);
1473 ++ if (op == ECC_DECODE)
1474 ++ /*
1475 ++ * Clear decode IRQ status in case there is a timeout to wait
1476 ++ * decode IRQ.
1477 ++ */
1478 ++ readw(ecc->regs + ECC_DECIRQ_STA);
1479 + writew(0, ecc->regs + ECC_IRQ_REG(op));
1480 + writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
1481 +
1482 +diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
1483 +index 31a6ee307d80..a77cfd74a92e 100644
1484 +--- a/drivers/mtd/nand/nand_base.c
1485 ++++ b/drivers/mtd/nand/nand_base.c
1486 +@@ -2935,15 +2935,18 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
1487 + size_t *retlen, const uint8_t *buf)
1488 + {
1489 + struct nand_chip *chip = mtd_to_nand(mtd);
1490 ++ int chipnr = (int)(to >> chip->chip_shift);
1491 + struct mtd_oob_ops ops;
1492 + int ret;
1493 +
1494 +- /* Wait for the device to get ready */
1495 +- panic_nand_wait(mtd, chip, 400);
1496 +-
1497 + /* Grab the device */
1498 + panic_nand_get_device(chip, mtd, FL_WRITING);
1499 +
1500 ++ chip->select_chip(mtd, chipnr);
1501 ++
1502 ++ /* Wait for the device to get ready */
1503 ++ panic_nand_wait(mtd, chip, 400);
1504 ++
1505 + memset(&ops, 0, sizeof(ops));
1506 + ops.len = len;
1507 + ops.datbuf = (uint8_t *)buf;
1508 +diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
1509 +index c178cb0dd219..f3a516b3f108 100644
1510 +--- a/drivers/mtd/nand/omap2.c
1511 ++++ b/drivers/mtd/nand/omap2.c
1512 +@@ -1133,129 +1133,172 @@ static u8 bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2,
1513 + 0x97, 0x79, 0xe5, 0x24, 0xb5};
1514 +
1515 + /**
1516 +- * omap_calculate_ecc_bch - Generate bytes of ECC bytes
1517 ++ * _omap_calculate_ecc_bch - Generate ECC bytes for one sector
1518 + * @mtd: MTD device structure
1519 + * @dat: The pointer to data on which ecc is computed
1520 + * @ecc_code: The ecc_code buffer
1521 ++ * @i: The sector number (for a multi sector page)
1522 + *
1523 +- * Support calculating of BCH4/8 ecc vectors for the page
1524 ++ * Support calculating of BCH4/8/16 ECC vectors for one sector
1525 ++ * within a page. Sector number is in @i.
1526 + */
1527 +-static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd,
1528 +- const u_char *dat, u_char *ecc_calc)
1529 ++static int _omap_calculate_ecc_bch(struct mtd_info *mtd,
1530 ++ const u_char *dat, u_char *ecc_calc, int i)
1531 + {
1532 + struct omap_nand_info *info = mtd_to_omap(mtd);
1533 + int eccbytes = info->nand.ecc.bytes;
1534 + struct gpmc_nand_regs *gpmc_regs = &info->reg;
1535 + u8 *ecc_code;
1536 +- unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4;
1537 ++ unsigned long bch_val1, bch_val2, bch_val3, bch_val4;
1538 + u32 val;
1539 +- int i, j;
1540 ++ int j;
1541 ++
1542 ++ ecc_code = ecc_calc;
1543 ++ switch (info->ecc_opt) {
1544 ++ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
1545 ++ case OMAP_ECC_BCH8_CODE_HW:
1546 ++ bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
1547 ++ bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
1548 ++ bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
1549 ++ bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
1550 ++ *ecc_code++ = (bch_val4 & 0xFF);
1551 ++ *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
1552 ++ *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
1553 ++ *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
1554 ++ *ecc_code++ = (bch_val3 & 0xFF);
1555 ++ *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
1556 ++ *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
1557 ++ *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
1558 ++ *ecc_code++ = (bch_val2 & 0xFF);
1559 ++ *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
1560 ++ *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
1561 ++ *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
1562 ++ *ecc_code++ = (bch_val1 & 0xFF);
1563 ++ break;
1564 ++ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
1565 ++ case OMAP_ECC_BCH4_CODE_HW:
1566 ++ bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
1567 ++ bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
1568 ++ *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
1569 ++ *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
1570 ++ *ecc_code++ = ((bch_val2 & 0xF) << 4) |
1571 ++ ((bch_val1 >> 28) & 0xF);
1572 ++ *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
1573 ++ *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
1574 ++ *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
1575 ++ *ecc_code++ = ((bch_val1 & 0xF) << 4);
1576 ++ break;
1577 ++ case OMAP_ECC_BCH16_CODE_HW:
1578 ++ val = readl(gpmc_regs->gpmc_bch_result6[i]);
1579 ++ ecc_code[0] = ((val >> 8) & 0xFF);
1580 ++ ecc_code[1] = ((val >> 0) & 0xFF);
1581 ++ val = readl(gpmc_regs->gpmc_bch_result5[i]);
1582 ++ ecc_code[2] = ((val >> 24) & 0xFF);
1583 ++ ecc_code[3] = ((val >> 16) & 0xFF);
1584 ++ ecc_code[4] = ((val >> 8) & 0xFF);
1585 ++ ecc_code[5] = ((val >> 0) & 0xFF);
1586 ++ val = readl(gpmc_regs->gpmc_bch_result4[i]);
1587 ++ ecc_code[6] = ((val >> 24) & 0xFF);
1588 ++ ecc_code[7] = ((val >> 16) & 0xFF);
1589 ++ ecc_code[8] = ((val >> 8) & 0xFF);
1590 ++ ecc_code[9] = ((val >> 0) & 0xFF);
1591 ++ val = readl(gpmc_regs->gpmc_bch_result3[i]);
1592 ++ ecc_code[10] = ((val >> 24) & 0xFF);
1593 ++ ecc_code[11] = ((val >> 16) & 0xFF);
1594 ++ ecc_code[12] = ((val >> 8) & 0xFF);
1595 ++ ecc_code[13] = ((val >> 0) & 0xFF);
1596 ++ val = readl(gpmc_regs->gpmc_bch_result2[i]);
1597 ++ ecc_code[14] = ((val >> 24) & 0xFF);
1598 ++ ecc_code[15] = ((val >> 16) & 0xFF);
1599 ++ ecc_code[16] = ((val >> 8) & 0xFF);
1600 ++ ecc_code[17] = ((val >> 0) & 0xFF);
1601 ++ val = readl(gpmc_regs->gpmc_bch_result1[i]);
1602 ++ ecc_code[18] = ((val >> 24) & 0xFF);
1603 ++ ecc_code[19] = ((val >> 16) & 0xFF);
1604 ++ ecc_code[20] = ((val >> 8) & 0xFF);
1605 ++ ecc_code[21] = ((val >> 0) & 0xFF);
1606 ++ val = readl(gpmc_regs->gpmc_bch_result0[i]);
1607 ++ ecc_code[22] = ((val >> 24) & 0xFF);
1608 ++ ecc_code[23] = ((val >> 16) & 0xFF);
1609 ++ ecc_code[24] = ((val >> 8) & 0xFF);
1610 ++ ecc_code[25] = ((val >> 0) & 0xFF);
1611 ++ break;
1612 ++ default:
1613 ++ return -EINVAL;
1614 ++ }
1615 ++
1616 ++ /* ECC scheme specific syndrome customizations */
1617 ++ switch (info->ecc_opt) {
1618 ++ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
1619 ++ /* Add constant polynomial to remainder, so that
1620 ++ * ECC of blank pages results in 0x0 on reading back
1621 ++ */
1622 ++ for (j = 0; j < eccbytes; j++)
1623 ++ ecc_calc[j] ^= bch4_polynomial[j];
1624 ++ break;
1625 ++ case OMAP_ECC_BCH4_CODE_HW:
1626 ++ /* Set 8th ECC byte as 0x0 for ROM compatibility */
1627 ++ ecc_calc[eccbytes - 1] = 0x0;
1628 ++ break;
1629 ++ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
1630 ++ /* Add constant polynomial to remainder, so that
1631 ++ * ECC of blank pages results in 0x0 on reading back
1632 ++ */
1633 ++ for (j = 0; j < eccbytes; j++)
1634 ++ ecc_calc[j] ^= bch8_polynomial[j];
1635 ++ break;
1636 ++ case OMAP_ECC_BCH8_CODE_HW:
1637 ++ /* Set 14th ECC byte as 0x0 for ROM compatibility */
1638 ++ ecc_calc[eccbytes - 1] = 0x0;
1639 ++ break;
1640 ++ case OMAP_ECC_BCH16_CODE_HW:
1641 ++ break;
1642 ++ default:
1643 ++ return -EINVAL;
1644 ++ }
1645 ++
1646 ++ return 0;
1647 ++}
1648 ++
1649 ++/**
1650 ++ * omap_calculate_ecc_bch_sw - ECC generator for sector for SW based correction
1651 ++ * @mtd: MTD device structure
1652 ++ * @dat: The pointer to data on which ecc is computed
1653 ++ * @ecc_code: The ecc_code buffer
1654 ++ *
1655 ++ * Support calculating of BCH4/8/16 ECC vectors for one sector. This is used
1656 ++ * when SW based correction is required as ECC is required for one sector
1657 ++ * at a time.
1658 ++ */
1659 ++static int omap_calculate_ecc_bch_sw(struct mtd_info *mtd,
1660 ++ const u_char *dat, u_char *ecc_calc)
1661 ++{
1662 ++ return _omap_calculate_ecc_bch(mtd, dat, ecc_calc, 0);
1663 ++}
1664 ++
1665 ++/**
1666 ++ * omap_calculate_ecc_bch_multi - Generate ECC for multiple sectors
1667 ++ * @mtd: MTD device structure
1668 ++ * @dat: The pointer to data on which ecc is computed
1669 ++ * @ecc_code: The ecc_code buffer
1670 ++ *
1671 ++ * Support calculating of BCH4/8/16 ecc vectors for the entire page in one go.
1672 ++ */
1673 ++static int omap_calculate_ecc_bch_multi(struct mtd_info *mtd,
1674 ++ const u_char *dat, u_char *ecc_calc)
1675 ++{
1676 ++ struct omap_nand_info *info = mtd_to_omap(mtd);
1677 ++ int eccbytes = info->nand.ecc.bytes;
1678 ++ unsigned long nsectors;
1679 ++ int i, ret;
1680 +
1681 + nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
1682 + for (i = 0; i < nsectors; i++) {
1683 +- ecc_code = ecc_calc;
1684 +- switch (info->ecc_opt) {
1685 +- case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
1686 +- case OMAP_ECC_BCH8_CODE_HW:
1687 +- bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
1688 +- bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
1689 +- bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
1690 +- bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
1691 +- *ecc_code++ = (bch_val4 & 0xFF);
1692 +- *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
1693 +- *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
1694 +- *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
1695 +- *ecc_code++ = (bch_val3 & 0xFF);
1696 +- *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
1697 +- *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
1698 +- *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
1699 +- *ecc_code++ = (bch_val2 & 0xFF);
1700 +- *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
1701 +- *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
1702 +- *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
1703 +- *ecc_code++ = (bch_val1 & 0xFF);
1704 +- break;
1705 +- case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
1706 +- case OMAP_ECC_BCH4_CODE_HW:
1707 +- bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
1708 +- bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
1709 +- *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
1710 +- *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
1711 +- *ecc_code++ = ((bch_val2 & 0xF) << 4) |
1712 +- ((bch_val1 >> 28) & 0xF);
1713 +- *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
1714 +- *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
1715 +- *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
1716 +- *ecc_code++ = ((bch_val1 & 0xF) << 4);
1717 +- break;
1718 +- case OMAP_ECC_BCH16_CODE_HW:
1719 +- val = readl(gpmc_regs->gpmc_bch_result6[i]);
1720 +- ecc_code[0] = ((val >> 8) & 0xFF);
1721 +- ecc_code[1] = ((val >> 0) & 0xFF);
1722 +- val = readl(gpmc_regs->gpmc_bch_result5[i]);
1723 +- ecc_code[2] = ((val >> 24) & 0xFF);
1724 +- ecc_code[3] = ((val >> 16) & 0xFF);
1725 +- ecc_code[4] = ((val >> 8) & 0xFF);
1726 +- ecc_code[5] = ((val >> 0) & 0xFF);
1727 +- val = readl(gpmc_regs->gpmc_bch_result4[i]);
1728 +- ecc_code[6] = ((val >> 24) & 0xFF);
1729 +- ecc_code[7] = ((val >> 16) & 0xFF);
1730 +- ecc_code[8] = ((val >> 8) & 0xFF);
1731 +- ecc_code[9] = ((val >> 0) & 0xFF);
1732 +- val = readl(gpmc_regs->gpmc_bch_result3[i]);
1733 +- ecc_code[10] = ((val >> 24) & 0xFF);
1734 +- ecc_code[11] = ((val >> 16) & 0xFF);
1735 +- ecc_code[12] = ((val >> 8) & 0xFF);
1736 +- ecc_code[13] = ((val >> 0) & 0xFF);
1737 +- val = readl(gpmc_regs->gpmc_bch_result2[i]);
1738 +- ecc_code[14] = ((val >> 24) & 0xFF);
1739 +- ecc_code[15] = ((val >> 16) & 0xFF);
1740 +- ecc_code[16] = ((val >> 8) & 0xFF);
1741 +- ecc_code[17] = ((val >> 0) & 0xFF);
1742 +- val = readl(gpmc_regs->gpmc_bch_result1[i]);
1743 +- ecc_code[18] = ((val >> 24) & 0xFF);
1744 +- ecc_code[19] = ((val >> 16) & 0xFF);
1745 +- ecc_code[20] = ((val >> 8) & 0xFF);
1746 +- ecc_code[21] = ((val >> 0) & 0xFF);
1747 +- val = readl(gpmc_regs->gpmc_bch_result0[i]);
1748 +- ecc_code[22] = ((val >> 24) & 0xFF);
1749 +- ecc_code[23] = ((val >> 16) & 0xFF);
1750 +- ecc_code[24] = ((val >> 8) & 0xFF);
1751 +- ecc_code[25] = ((val >> 0) & 0xFF);
1752 +- break;
1753 +- default:
1754 +- return -EINVAL;
1755 +- }
1756 +-
1757 +- /* ECC scheme specific syndrome customizations */
1758 +- switch (info->ecc_opt) {
1759 +- case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
1760 +- /* Add constant polynomial to remainder, so that
1761 +- * ECC of blank pages results in 0x0 on reading back */
1762 +- for (j = 0; j < eccbytes; j++)
1763 +- ecc_calc[j] ^= bch4_polynomial[j];
1764 +- break;
1765 +- case OMAP_ECC_BCH4_CODE_HW:
1766 +- /* Set 8th ECC byte as 0x0 for ROM compatibility */
1767 +- ecc_calc[eccbytes - 1] = 0x0;
1768 +- break;
1769 +- case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
1770 +- /* Add constant polynomial to remainder, so that
1771 +- * ECC of blank pages results in 0x0 on reading back */
1772 +- for (j = 0; j < eccbytes; j++)
1773 +- ecc_calc[j] ^= bch8_polynomial[j];
1774 +- break;
1775 +- case OMAP_ECC_BCH8_CODE_HW:
1776 +- /* Set 14th ECC byte as 0x0 for ROM compatibility */
1777 +- ecc_calc[eccbytes - 1] = 0x0;
1778 +- break;
1779 +- case OMAP_ECC_BCH16_CODE_HW:
1780 +- break;
1781 +- default:
1782 +- return -EINVAL;
1783 +- }
1784 ++ ret = _omap_calculate_ecc_bch(mtd, dat, ecc_calc, i);
1785 ++ if (ret)
1786 ++ return ret;
1787 +
1788 +- ecc_calc += eccbytes;
1789 ++ ecc_calc += eccbytes;
1790 + }
1791 +
1792 + return 0;
1793 +@@ -1496,7 +1539,7 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
1794 + chip->write_buf(mtd, buf, mtd->writesize);
1795 +
1796 + /* Update ecc vector from GPMC result registers */
1797 +- chip->ecc.calculate(mtd, buf, &ecc_calc[0]);
1798 ++ omap_calculate_ecc_bch_multi(mtd, buf, &ecc_calc[0]);
1799 +
1800 + ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
1801 + chip->ecc.total);
1802 +@@ -1508,6 +1551,72 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
1803 + return 0;
1804 + }
1805 +
1806 ++/**
1807 ++ * omap_write_subpage_bch - BCH hardware ECC based subpage write
1808 ++ * @mtd: mtd info structure
1809 ++ * @chip: nand chip info structure
1810 ++ * @offset: column address of subpage within the page
1811 ++ * @data_len: data length
1812 ++ * @buf: data buffer
1813 ++ * @oob_required: must write chip->oob_poi to OOB
1814 ++ * @page: page number to write
1815 ++ *
1816 ++ * OMAP optimized subpage write method.
1817 ++ */
1818 ++static int omap_write_subpage_bch(struct mtd_info *mtd,
1819 ++ struct nand_chip *chip, u32 offset,
1820 ++ u32 data_len, const u8 *buf,
1821 ++ int oob_required, int page)
1822 ++{
1823 ++ u8 *ecc_calc = chip->buffers->ecccalc;
1824 ++ int ecc_size = chip->ecc.size;
1825 ++ int ecc_bytes = chip->ecc.bytes;
1826 ++ int ecc_steps = chip->ecc.steps;
1827 ++ u32 start_step = offset / ecc_size;
1828 ++ u32 end_step = (offset + data_len - 1) / ecc_size;
1829 ++ int step, ret = 0;
1830 ++
1831 ++ /*
1832 ++ * Write entire page at one go as it would be optimal
1833 ++ * as ECC is calculated by hardware.
1834 ++ * ECC is calculated for all subpages but we choose
1835 ++ * only what we want.
1836 ++ */
1837 ++
1838 ++ /* Enable GPMC ECC engine */
1839 ++ chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
1840 ++
1841 ++ /* Write data */
1842 ++ chip->write_buf(mtd, buf, mtd->writesize);
1843 ++
1844 ++ for (step = 0; step < ecc_steps; step++) {
1845 ++ /* mask ECC of un-touched subpages by padding 0xFF */
1846 ++ if (step < start_step || step > end_step)
1847 ++ memset(ecc_calc, 0xff, ecc_bytes);
1848 ++ else
1849 ++ ret = _omap_calculate_ecc_bch(mtd, buf, ecc_calc, step);
1850 ++
1851 ++ if (ret)
1852 ++ return ret;
1853 ++
1854 ++ buf += ecc_size;
1855 ++ ecc_calc += ecc_bytes;
1856 ++ }
1857 ++
1858 ++ /* copy calculated ECC for whole page to chip->buffer->oob */
1859 ++ /* this include masked-value(0xFF) for unwritten subpages */
1860 ++ ecc_calc = chip->buffers->ecccalc;
1861 ++ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
1862 ++ chip->ecc.total);
1863 ++ if (ret)
1864 ++ return ret;
1865 ++
1866 ++ /* write OOB buffer to NAND device */
1867 ++ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1868 ++
1869 ++ return 0;
1870 ++}
1871 ++
1872 + /**
1873 + * omap_read_page_bch - BCH ecc based page read function for entire page
1874 + * @mtd: mtd info structure
1875 +@@ -1544,7 +1653,7 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
1876 + chip->ecc.total);
1877 +
1878 + /* Calculate ecc bytes */
1879 +- chip->ecc.calculate(mtd, buf, ecc_calc);
1880 ++ omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc);
1881 +
1882 + ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1883 + chip->ecc.total);
1884 +@@ -2044,7 +2153,7 @@ static int omap_nand_probe(struct platform_device *pdev)
1885 + nand_chip->ecc.strength = 4;
1886 + nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
1887 + nand_chip->ecc.correct = nand_bch_correct_data;
1888 +- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
1889 ++ nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw;
1890 + mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
1891 + /* Reserve one byte for the OMAP marker */
1892 + oobbytes_per_step = nand_chip->ecc.bytes + 1;
1893 +@@ -2066,9 +2175,9 @@ static int omap_nand_probe(struct platform_device *pdev)
1894 + nand_chip->ecc.strength = 4;
1895 + nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
1896 + nand_chip->ecc.correct = omap_elm_correct_data;
1897 +- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
1898 + nand_chip->ecc.read_page = omap_read_page_bch;
1899 + nand_chip->ecc.write_page = omap_write_page_bch;
1900 ++ nand_chip->ecc.write_subpage = omap_write_subpage_bch;
1901 + mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
1902 + oobbytes_per_step = nand_chip->ecc.bytes;
1903 +
1904 +@@ -2087,7 +2196,7 @@ static int omap_nand_probe(struct platform_device *pdev)
1905 + nand_chip->ecc.strength = 8;
1906 + nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
1907 + nand_chip->ecc.correct = nand_bch_correct_data;
1908 +- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
1909 ++ nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw;
1910 + mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
1911 + /* Reserve one byte for the OMAP marker */
1912 + oobbytes_per_step = nand_chip->ecc.bytes + 1;
1913 +@@ -2109,9 +2218,9 @@ static int omap_nand_probe(struct platform_device *pdev)
1914 + nand_chip->ecc.strength = 8;
1915 + nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
1916 + nand_chip->ecc.correct = omap_elm_correct_data;
1917 +- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
1918 + nand_chip->ecc.read_page = omap_read_page_bch;
1919 + nand_chip->ecc.write_page = omap_write_page_bch;
1920 ++ nand_chip->ecc.write_subpage = omap_write_subpage_bch;
1921 + mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
1922 + oobbytes_per_step = nand_chip->ecc.bytes;
1923 +
1924 +@@ -2131,9 +2240,9 @@ static int omap_nand_probe(struct platform_device *pdev)
1925 + nand_chip->ecc.strength = 16;
1926 + nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
1927 + nand_chip->ecc.correct = omap_elm_correct_data;
1928 +- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
1929 + nand_chip->ecc.read_page = omap_read_page_bch;
1930 + nand_chip->ecc.write_page = omap_write_page_bch;
1931 ++ nand_chip->ecc.write_subpage = omap_write_subpage_bch;
1932 + mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
1933 + oobbytes_per_step = nand_chip->ecc.bytes;
1934 +
1935 +diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
1936 +index 8f8418d2ac4a..a0012c3cb4f6 100644
1937 +--- a/drivers/net/ethernet/3com/typhoon.c
1938 ++++ b/drivers/net/ethernet/3com/typhoon.c
1939 +@@ -2366,9 +2366,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1940 + * 4) Get the hardware address.
1941 + * 5) Put the card to sleep.
1942 + */
1943 +- if (typhoon_reset(ioaddr, WaitSleep) < 0) {
1944 ++ err = typhoon_reset(ioaddr, WaitSleep);
1945 ++ if (err < 0) {
1946 + err_msg = "could not reset 3XP";
1947 +- err = -EIO;
1948 + goto error_out_dma;
1949 + }
1950 +
1951 +@@ -2382,24 +2382,25 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1952 + typhoon_init_interface(tp);
1953 + typhoon_init_rings(tp);
1954 +
1955 +- if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
1956 ++ err = typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST);
1957 ++ if (err < 0) {
1958 + err_msg = "cannot boot 3XP sleep image";
1959 +- err = -EIO;
1960 + goto error_out_reset;
1961 + }
1962 +
1963 + INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
1964 +- if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
1965 ++ err = typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp);
1966 ++ if (err < 0) {
1967 + err_msg = "cannot read MAC address";
1968 +- err = -EIO;
1969 + goto error_out_reset;
1970 + }
1971 +
1972 + *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
1973 + *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
1974 +
1975 +- if(!is_valid_ether_addr(dev->dev_addr)) {
1976 ++ if (!is_valid_ether_addr(dev->dev_addr)) {
1977 + err_msg = "Could not obtain valid ethernet address, aborting";
1978 ++ err = -EIO;
1979 + goto error_out_reset;
1980 + }
1981 +
1982 +@@ -2407,7 +2408,8 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1983 + * later when we print out the version reported.
1984 + */
1985 + INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
1986 +- if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
1987 ++ err = typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp);
1988 ++ if (err < 0) {
1989 + err_msg = "Could not get Sleep Image version";
1990 + goto error_out_reset;
1991 + }
1992 +@@ -2424,9 +2426,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1993 + if(xp_resp[0].numDesc != 0)
1994 + tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
1995 +
1996 +- if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
1997 ++ err = typhoon_sleep(tp, PCI_D3hot, 0);
1998 ++ if (err < 0) {
1999 + err_msg = "cannot put adapter to sleep";
2000 +- err = -EIO;
2001 + goto error_out_reset;
2002 + }
2003 +
2004 +@@ -2449,7 +2451,8 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2005 + dev->features = dev->hw_features |
2006 + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
2007 +
2008 +- if(register_netdev(dev) < 0) {
2009 ++ err = register_netdev(dev);
2010 ++ if (err < 0) {
2011 + err_msg = "unable to register netdev";
2012 + goto error_out_reset;
2013 + }
2014 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2015 +index 333df540b375..5d2cf56aed0e 100644
2016 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2017 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2018 +@@ -3800,6 +3800,30 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
2019 + return rc;
2020 + }
2021 +
2022 ++static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
2023 ++{
2024 ++ int rc;
2025 ++
2026 ++ if (BNXT_PF(bp)) {
2027 ++ struct hwrm_func_cfg_input req = {0};
2028 ++
2029 ++ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
2030 ++ req.fid = cpu_to_le16(0xffff);
2031 ++ req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
2032 ++ req.async_event_cr = cpu_to_le16(idx);
2033 ++ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2034 ++ } else {
2035 ++ struct hwrm_func_vf_cfg_input req = {0};
2036 ++
2037 ++ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
2038 ++ req.enables =
2039 ++ cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
2040 ++ req.async_event_cr = cpu_to_le16(idx);
2041 ++ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2042 ++ }
2043 ++ return rc;
2044 ++}
2045 ++
2046 + static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
2047 + {
2048 + int i, rc = 0;
2049 +@@ -3816,6 +3840,12 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
2050 + goto err_out;
2051 + BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
2052 + bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
2053 ++
2054 ++ if (!i) {
2055 ++ rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
2056 ++ if (rc)
2057 ++ netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
2058 ++ }
2059 + }
2060 +
2061 + for (i = 0; i < bp->tx_nr_rings; i++) {
2062 +diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
2063 +index 0641c0098738..afb7ebe20b24 100644
2064 +--- a/drivers/net/ethernet/intel/e1000e/defines.h
2065 ++++ b/drivers/net/ethernet/intel/e1000e/defines.h
2066 +@@ -398,6 +398,7 @@
2067 + #define E1000_ICR_LSC 0x00000004 /* Link Status Change */
2068 + #define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */
2069 + #define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
2070 ++#define E1000_ICR_RXO 0x00000040 /* Receiver Overrun */
2071 + #define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
2072 + #define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
2073 + /* If this bit asserted, the driver should claim the interrupt */
2074 +diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
2075 +index b322011ec282..f457c5703d0c 100644
2076 +--- a/drivers/net/ethernet/intel/e1000e/mac.c
2077 ++++ b/drivers/net/ethernet/intel/e1000e/mac.c
2078 +@@ -410,6 +410,9 @@ void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
2079 + * Checks to see of the link status of the hardware has changed. If a
2080 + * change in link status has been detected, then we read the PHY registers
2081 + * to get the current speed/duplex if link exists.
2082 ++ *
2083 ++ * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
2084 ++ * up).
2085 + **/
2086 + s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
2087 + {
2088 +@@ -423,7 +426,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
2089 + * Change or Rx Sequence Error interrupt.
2090 + */
2091 + if (!mac->get_link_status)
2092 +- return 0;
2093 ++ return 1;
2094 +
2095 + /* First we want to see if the MII Status Register reports
2096 + * link. If so, then we want to get the current speed/duplex
2097 +@@ -461,10 +464,12 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
2098 + * different link partner.
2099 + */
2100 + ret_val = e1000e_config_fc_after_link_up(hw);
2101 +- if (ret_val)
2102 ++ if (ret_val) {
2103 + e_dbg("Error configuring flow control\n");
2104 ++ return ret_val;
2105 ++ }
2106 +
2107 +- return ret_val;
2108 ++ return 1;
2109 + }
2110 +
2111 + /**
2112 +diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
2113 +index 7017281ba2dc..0feddf3393f9 100644
2114 +--- a/drivers/net/ethernet/intel/e1000e/netdev.c
2115 ++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
2116 +@@ -1905,14 +1905,30 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
2117 + struct net_device *netdev = data;
2118 + struct e1000_adapter *adapter = netdev_priv(netdev);
2119 + struct e1000_hw *hw = &adapter->hw;
2120 ++ u32 icr;
2121 ++ bool enable = true;
2122 ++
2123 ++ icr = er32(ICR);
2124 ++ if (icr & E1000_ICR_RXO) {
2125 ++ ew32(ICR, E1000_ICR_RXO);
2126 ++ enable = false;
2127 ++ /* napi poll will re-enable Other, make sure it runs */
2128 ++ if (napi_schedule_prep(&adapter->napi)) {
2129 ++ adapter->total_rx_bytes = 0;
2130 ++ adapter->total_rx_packets = 0;
2131 ++ __napi_schedule(&adapter->napi);
2132 ++ }
2133 ++ }
2134 ++ if (icr & E1000_ICR_LSC) {
2135 ++ ew32(ICR, E1000_ICR_LSC);
2136 ++ hw->mac.get_link_status = true;
2137 ++ /* guard against interrupt when we're going down */
2138 ++ if (!test_bit(__E1000_DOWN, &adapter->state))
2139 ++ mod_timer(&adapter->watchdog_timer, jiffies + 1);
2140 ++ }
2141 +
2142 +- hw->mac.get_link_status = true;
2143 +-
2144 +- /* guard against interrupt when we're going down */
2145 +- if (!test_bit(__E1000_DOWN, &adapter->state)) {
2146 +- mod_timer(&adapter->watchdog_timer, jiffies + 1);
2147 ++ if (enable && !test_bit(__E1000_DOWN, &adapter->state))
2148 + ew32(IMS, E1000_IMS_OTHER);
2149 +- }
2150 +
2151 + return IRQ_HANDLED;
2152 + }
2153 +@@ -2683,7 +2699,8 @@ static int e1000e_poll(struct napi_struct *napi, int weight)
2154 + napi_complete_done(napi, work_done);
2155 + if (!test_bit(__E1000_DOWN, &adapter->state)) {
2156 + if (adapter->msix_entries)
2157 +- ew32(IMS, adapter->rx_ring->ims_val);
2158 ++ ew32(IMS, adapter->rx_ring->ims_val |
2159 ++ E1000_IMS_OTHER);
2160 + else
2161 + e1000_irq_enable(adapter);
2162 + }
2163 +@@ -4178,7 +4195,7 @@ static void e1000e_trigger_lsc(struct e1000_adapter *adapter)
2164 + struct e1000_hw *hw = &adapter->hw;
2165 +
2166 + if (adapter->msix_entries)
2167 +- ew32(ICS, E1000_ICS_OTHER);
2168 ++ ew32(ICS, E1000_ICS_LSC | E1000_ICS_OTHER);
2169 + else
2170 + ew32(ICS, E1000_ICS_LSC);
2171 + }
2172 +@@ -5056,7 +5073,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
2173 + case e1000_media_type_copper:
2174 + if (hw->mac.get_link_status) {
2175 + ret_val = hw->mac.ops.check_for_link(hw);
2176 +- link_active = !hw->mac.get_link_status;
2177 ++ link_active = ret_val > 0;
2178 + } else {
2179 + link_active = true;
2180 + }
2181 +@@ -5074,7 +5091,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
2182 + break;
2183 + }
2184 +
2185 +- if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
2186 ++ if ((ret_val == -E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
2187 + (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
2188 + /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
2189 + e_info("Gigabit has been disabled, downgrading speed\n");
2190 +diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
2191 +index d78d47b41a71..86ff0969efb6 100644
2192 +--- a/drivers/net/ethernet/intel/e1000e/phy.c
2193 ++++ b/drivers/net/ethernet/intel/e1000e/phy.c
2194 +@@ -1744,6 +1744,7 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
2195 + s32 ret_val = 0;
2196 + u16 i, phy_status;
2197 +
2198 ++ *success = false;
2199 + for (i = 0; i < iterations; i++) {
2200 + /* Some PHYs require the MII_BMSR register to be read
2201 + * twice due to the link bit being sticky. No harm doing
2202 +@@ -1763,16 +1764,16 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
2203 + ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
2204 + if (ret_val)
2205 + break;
2206 +- if (phy_status & BMSR_LSTATUS)
2207 ++ if (phy_status & BMSR_LSTATUS) {
2208 ++ *success = true;
2209 + break;
2210 ++ }
2211 + if (usec_interval >= 1000)
2212 + msleep(usec_interval / 1000);
2213 + else
2214 + udelay(usec_interval);
2215 + }
2216 +
2217 +- *success = (i < iterations);
2218 +-
2219 + return ret_val;
2220 + }
2221 +
2222 +diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
2223 +index 5de937852436..2aae6f88dca0 100644
2224 +--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
2225 ++++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
2226 +@@ -1225,7 +1225,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
2227 + break;
2228 +
2229 + /* prevent any other reads prior to eop_desc */
2230 +- read_barrier_depends();
2231 ++ smp_rmb();
2232 +
2233 + /* if DD is not set pending work has not been completed */
2234 + if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE))
2235 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
2236 +index 31c97e3937a4..2caafebb0295 100644
2237 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
2238 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
2239 +@@ -3604,7 +3604,7 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
2240 + break;
2241 +
2242 + /* prevent any other reads prior to eop_desc */
2243 +- read_barrier_depends();
2244 ++ smp_rmb();
2245 +
2246 + /* if the descriptor isn't done, no work yet to do */
2247 + if (!(eop_desc->cmd_type_offset_bsz &
2248 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
2249 +index 6287bf63c43c..c5430394fac9 100644
2250 +--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
2251 ++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
2252 +@@ -679,7 +679,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
2253 + break;
2254 +
2255 + /* prevent any other reads prior to eop_desc */
2256 +- read_barrier_depends();
2257 ++ smp_rmb();
2258 +
2259 + /* we have caught up to head, no work left to do */
2260 + if (tx_head == tx_desc)
2261 +diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
2262 +index 75f2a2cdd738..c03800d1000a 100644
2263 +--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
2264 ++++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
2265 +@@ -184,7 +184,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
2266 + break;
2267 +
2268 + /* prevent any other reads prior to eop_desc */
2269 +- read_barrier_depends();
2270 ++ smp_rmb();
2271 +
2272 + /* we have caught up to head, no work left to do */
2273 + if (tx_head == tx_desc)
2274 +diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
2275 +index c6c2562d9df3..16839600fb78 100644
2276 +--- a/drivers/net/ethernet/intel/igb/igb_main.c
2277 ++++ b/drivers/net/ethernet/intel/igb/igb_main.c
2278 +@@ -6660,7 +6660,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
2279 + break;
2280 +
2281 + /* prevent any other reads prior to eop_desc */
2282 +- read_barrier_depends();
2283 ++ smp_rmb();
2284 +
2285 + /* if DD is not set pending work has not been completed */
2286 + if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
2287 +diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
2288 +index 7dff7f6239cd..5428e39fa4e5 100644
2289 +--- a/drivers/net/ethernet/intel/igbvf/netdev.c
2290 ++++ b/drivers/net/ethernet/intel/igbvf/netdev.c
2291 +@@ -810,7 +810,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
2292 + break;
2293 +
2294 + /* prevent any other reads prior to eop_desc */
2295 +- read_barrier_depends();
2296 ++ smp_rmb();
2297 +
2298 + /* if DD is not set pending work has not been completed */
2299 + if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
2300 +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2301 +index 334eb96ecda3..a5428b6abdac 100644
2302 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2303 ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2304 +@@ -1171,7 +1171,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
2305 + break;
2306 +
2307 + /* prevent any other reads prior to eop_desc */
2308 +- read_barrier_depends();
2309 ++ smp_rmb();
2310 +
2311 + /* if DD is not set pending work has not been completed */
2312 + if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
2313 +diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
2314 +index cbf70fe4028a..1499ce2bf9f6 100644
2315 +--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
2316 ++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
2317 +@@ -325,7 +325,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
2318 + break;
2319 +
2320 + /* prevent any other reads prior to eop_desc */
2321 +- read_barrier_depends();
2322 ++ smp_rmb();
2323 +
2324 + /* if DD is not set pending work has not been completed */
2325 + if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
2326 +diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c
2327 +index 70ecd82d674d..098c814e22c8 100644
2328 +--- a/drivers/net/wireless/admtek/adm8211.c
2329 ++++ b/drivers/net/wireless/admtek/adm8211.c
2330 +@@ -413,6 +413,13 @@ static void adm8211_interrupt_rci(struct ieee80211_hw *dev)
2331 + skb_tail_pointer(newskb),
2332 + RX_PKT_SIZE,
2333 + PCI_DMA_FROMDEVICE);
2334 ++ if (pci_dma_mapping_error(priv->pdev,
2335 ++ priv->rx_buffers[entry].mapping)) {
2336 ++ priv->rx_buffers[entry].skb = NULL;
2337 ++ dev_kfree_skb(newskb);
2338 ++ skb = NULL;
2339 ++ /* TODO: update rx dropped stats */
2340 ++ }
2341 + } else {
2342 + skb = NULL;
2343 + /* TODO: update rx dropped stats */
2344 +@@ -1450,6 +1457,12 @@ static int adm8211_init_rings(struct ieee80211_hw *dev)
2345 + skb_tail_pointer(rx_info->skb),
2346 + RX_PKT_SIZE,
2347 + PCI_DMA_FROMDEVICE);
2348 ++ if (pci_dma_mapping_error(priv->pdev, rx_info->mapping)) {
2349 ++ dev_kfree_skb(rx_info->skb);
2350 ++ rx_info->skb = NULL;
2351 ++ break;
2352 ++ }
2353 ++
2354 + desc->buffer1 = cpu_to_le32(rx_info->mapping);
2355 + desc->status = cpu_to_le32(RDES0_STATUS_OWN | RDES0_STATUS_SQL);
2356 + }
2357 +@@ -1613,7 +1626,7 @@ static void adm8211_calc_durations(int *dur, int *plcp, size_t payload_len, int
2358 + }
2359 +
2360 + /* Transmit skb w/adm8211_tx_hdr (802.11 header created by hardware) */
2361 +-static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
2362 ++static int adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
2363 + u16 plcp_signal,
2364 + size_t hdrlen)
2365 + {
2366 +@@ -1625,6 +1638,8 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
2367 +
2368 + mapping = pci_map_single(priv->pdev, skb->data, skb->len,
2369 + PCI_DMA_TODEVICE);
2370 ++ if (pci_dma_mapping_error(priv->pdev, mapping))
2371 ++ return -ENOMEM;
2372 +
2373 + spin_lock_irqsave(&priv->lock, flags);
2374 +
2375 +@@ -1657,6 +1672,8 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
2376 +
2377 + /* Trigger transmit poll */
2378 + ADM8211_CSR_WRITE(TDR, 0);
2379 ++
2380 ++ return 0;
2381 + }
2382 +
2383 + /* Put adm8211_tx_hdr on skb and transmit */
2384 +@@ -1710,7 +1727,10 @@ static void adm8211_tx(struct ieee80211_hw *dev,
2385 +
2386 + txhdr->retry_limit = info->control.rates[0].count;
2387 +
2388 +- adm8211_tx_raw(dev, skb, plcp_signal, hdrlen);
2389 ++ if (adm8211_tx_raw(dev, skb, plcp_signal, hdrlen)) {
2390 ++ /* Drop packet */
2391 ++ ieee80211_free_txskb(dev, skb);
2392 ++ }
2393 + }
2394 +
2395 + static int adm8211_alloc_rings(struct ieee80211_hw *dev)
2396 +@@ -1843,7 +1863,8 @@ static int adm8211_probe(struct pci_dev *pdev,
2397 + priv->rx_ring_size = rx_ring_size;
2398 + priv->tx_ring_size = tx_ring_size;
2399 +
2400 +- if (adm8211_alloc_rings(dev)) {
2401 ++ err = adm8211_alloc_rings(dev);
2402 ++ if (err) {
2403 + printk(KERN_ERR "%s (adm8211): Cannot allocate TX/RX ring\n",
2404 + pci_name(pdev));
2405 + goto err_iounmap;
2406 +diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
2407 +index 366d3dcb8e9d..7b3017f55e3d 100644
2408 +--- a/drivers/net/wireless/ath/ath10k/core.c
2409 ++++ b/drivers/net/wireless/ath/ath10k/core.c
2410 +@@ -691,8 +691,11 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
2411 + "boot get otp board id result 0x%08x board_id %d chip_id %d\n",
2412 + result, board_id, chip_id);
2413 +
2414 +- if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0)
2415 ++ if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0 ||
2416 ++ (board_id == 0)) {
2417 ++ ath10k_warn(ar, "board id is not exist in otp, ignore it\n");
2418 + return -EOPNOTSUPP;
2419 ++ }
2420 +
2421 + ar->id.bmi_ids_valid = true;
2422 + ar->id.bmi_board_id = board_id;
2423 +diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
2424 +index 30e98afa2e68..17ab8efdac35 100644
2425 +--- a/drivers/net/wireless/ath/ath10k/mac.c
2426 ++++ b/drivers/net/wireless/ath/ath10k/mac.c
2427 +@@ -1224,6 +1224,36 @@ static int ath10k_monitor_recalc(struct ath10k *ar)
2428 + return ath10k_monitor_stop(ar);
2429 + }
2430 +
2431 ++static bool ath10k_mac_can_set_cts_prot(struct ath10k_vif *arvif)
2432 ++{
2433 ++ struct ath10k *ar = arvif->ar;
2434 ++
2435 ++ lockdep_assert_held(&ar->conf_mutex);
2436 ++
2437 ++ if (!arvif->is_started) {
2438 ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "defer cts setup, vdev is not ready yet\n");
2439 ++ return false;
2440 ++ }
2441 ++
2442 ++ return true;
2443 ++}
2444 ++
2445 ++static int ath10k_mac_set_cts_prot(struct ath10k_vif *arvif)
2446 ++{
2447 ++ struct ath10k *ar = arvif->ar;
2448 ++ u32 vdev_param;
2449 ++
2450 ++ lockdep_assert_held(&ar->conf_mutex);
2451 ++
2452 ++ vdev_param = ar->wmi.vdev_param->protection_mode;
2453 ++
2454 ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_protection %d\n",
2455 ++ arvif->vdev_id, arvif->use_cts_prot);
2456 ++
2457 ++ return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
2458 ++ arvif->use_cts_prot ? 1 : 0);
2459 ++}
2460 ++
2461 + static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
2462 + {
2463 + struct ath10k *ar = arvif->ar;
2464 +@@ -4668,7 +4698,8 @@ static int ath10k_mac_txpower_recalc(struct ath10k *ar)
2465 + lockdep_assert_held(&ar->conf_mutex);
2466 +
2467 + list_for_each_entry(arvif, &ar->arvifs, list) {
2468 +- WARN_ON(arvif->txpower < 0);
2469 ++ if (arvif->txpower <= 0)
2470 ++ continue;
2471 +
2472 + if (txpower == -1)
2473 + txpower = arvif->txpower;
2474 +@@ -4676,8 +4707,8 @@ static int ath10k_mac_txpower_recalc(struct ath10k *ar)
2475 + txpower = min(txpower, arvif->txpower);
2476 + }
2477 +
2478 +- if (WARN_ON(txpower == -1))
2479 +- return -EINVAL;
2480 ++ if (txpower == -1)
2481 ++ return 0;
2482 +
2483 + ret = ath10k_mac_txpower_setup(ar, txpower);
2484 + if (ret) {
2485 +@@ -5321,20 +5352,18 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2486 +
2487 + if (changed & BSS_CHANGED_ERP_CTS_PROT) {
2488 + arvif->use_cts_prot = info->use_cts_prot;
2489 +- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
2490 +- arvif->vdev_id, info->use_cts_prot);
2491 +
2492 + ret = ath10k_recalc_rtscts_prot(arvif);
2493 + if (ret)
2494 + ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
2495 + arvif->vdev_id, ret);
2496 +
2497 +- vdev_param = ar->wmi.vdev_param->protection_mode;
2498 +- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
2499 +- info->use_cts_prot ? 1 : 0);
2500 +- if (ret)
2501 +- ath10k_warn(ar, "failed to set protection mode %d on vdev %i: %d\n",
2502 +- info->use_cts_prot, arvif->vdev_id, ret);
2503 ++ if (ath10k_mac_can_set_cts_prot(arvif)) {
2504 ++ ret = ath10k_mac_set_cts_prot(arvif);
2505 ++ if (ret)
2506 ++ ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n",
2507 ++ arvif->vdev_id, ret);
2508 ++ }
2509 + }
2510 +
2511 + if (changed & BSS_CHANGED_ERP_SLOT) {
2512 +@@ -7355,6 +7384,13 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
2513 + arvif->is_up = true;
2514 + }
2515 +
2516 ++ if (ath10k_mac_can_set_cts_prot(arvif)) {
2517 ++ ret = ath10k_mac_set_cts_prot(arvif);
2518 ++ if (ret)
2519 ++ ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n",
2520 ++ arvif->vdev_id, ret);
2521 ++ }
2522 ++
2523 + mutex_unlock(&ar->conf_mutex);
2524 + return 0;
2525 +
2526 +diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
2527 +index e64f59300a7c..0e4d49adddd0 100644
2528 +--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
2529 ++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
2530 +@@ -1105,8 +1105,10 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
2531 + struct ath10k_fw_stats_pdev *dst;
2532 +
2533 + src = data;
2534 +- if (data_len < sizeof(*src))
2535 ++ if (data_len < sizeof(*src)) {
2536 ++ kfree(tb);
2537 + return -EPROTO;
2538 ++ }
2539 +
2540 + data += sizeof(*src);
2541 + data_len -= sizeof(*src);
2542 +@@ -1126,8 +1128,10 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
2543 + struct ath10k_fw_stats_vdev *dst;
2544 +
2545 + src = data;
2546 +- if (data_len < sizeof(*src))
2547 ++ if (data_len < sizeof(*src)) {
2548 ++ kfree(tb);
2549 + return -EPROTO;
2550 ++ }
2551 +
2552 + data += sizeof(*src);
2553 + data_len -= sizeof(*src);
2554 +@@ -1145,8 +1149,10 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
2555 + struct ath10k_fw_stats_peer *dst;
2556 +
2557 + src = data;
2558 +- if (data_len < sizeof(*src))
2559 ++ if (data_len < sizeof(*src)) {
2560 ++ kfree(tb);
2561 + return -EPROTO;
2562 ++ }
2563 +
2564 + data += sizeof(*src);
2565 + data_len -= sizeof(*src);
2566 +diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c
2567 +index d5a3bf91a03e..ab6d39e12069 100644
2568 +--- a/drivers/net/wireless/intersil/p54/main.c
2569 ++++ b/drivers/net/wireless/intersil/p54/main.c
2570 +@@ -852,12 +852,11 @@ void p54_unregister_common(struct ieee80211_hw *dev)
2571 + {
2572 + struct p54_common *priv = dev->priv;
2573 +
2574 +-#ifdef CONFIG_P54_LEDS
2575 +- p54_unregister_leds(priv);
2576 +-#endif /* CONFIG_P54_LEDS */
2577 +-
2578 + if (priv->registered) {
2579 + priv->registered = false;
2580 ++#ifdef CONFIG_P54_LEDS
2581 ++ p54_unregister_leds(priv);
2582 ++#endif /* CONFIG_P54_LEDS */
2583 + ieee80211_unregister_hw(dev);
2584 + }
2585 +
2586 +diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
2587 +index 8718950004f3..8d601dcf2948 100644
2588 +--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
2589 ++++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
2590 +@@ -2296,6 +2296,12 @@ static void mwifiex_recreate_adapter(struct sdio_mmc_card *card)
2591 + mmc_hw_reset(func->card->host);
2592 + sdio_release_host(func);
2593 +
2594 ++ /* Previous save_adapter won't be valid after this. We will cancel
2595 ++ * pending work requests.
2596 ++ */
2597 ++ clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags);
2598 ++ clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &iface_work_flags);
2599 ++
2600 + mwifiex_sdio_probe(func, device_id);
2601 + }
2602 +
2603 +diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
2604 +index bf3f0a39908c..9fc6f1615343 100644
2605 +--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
2606 ++++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
2607 +@@ -4707,8 +4707,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
2608 + rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 2);
2609 + else
2610 + rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 1);
2611 +- rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_PSDU, 0);
2612 +- rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_MPDU, 0);
2613 ++ rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_PSDU, 10);
2614 ++ rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_MPDU, 10);
2615 + rt2800_register_write(rt2x00dev, MAX_LEN_CFG, reg);
2616 +
2617 + rt2800_register_read(rt2x00dev, LED_CFG, &reg);
2618 +diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
2619 +index 631df690adbe..f57bb2cd604e 100644
2620 +--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
2621 ++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
2622 +@@ -57,7 +57,7 @@ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
2623 + if (status >= 0)
2624 + return 0;
2625 +
2626 +- if (status == -ENODEV) {
2627 ++ if (status == -ENODEV || status == -ENOENT) {
2628 + /* Device has disappeared. */
2629 + clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
2630 + break;
2631 +@@ -321,7 +321,7 @@ static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data)
2632 +
2633 + status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
2634 + if (status) {
2635 +- if (status == -ENODEV)
2636 ++ if (status == -ENODEV || status == -ENOENT)
2637 + clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
2638 + set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
2639 + rt2x00lib_dmadone(entry);
2640 +@@ -410,7 +410,7 @@ static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data)
2641 +
2642 + status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
2643 + if (status) {
2644 +- if (status == -ENODEV)
2645 ++ if (status == -ENODEV || status == -ENOENT)
2646 + clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
2647 + set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
2648 + rt2x00lib_dmadone(entry);
2649 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
2650 +index b3f6a9ed15d4..27a0e50c2793 100644
2651 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
2652 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
2653 +@@ -664,7 +664,7 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
2654 + struct rtl_priv *rtlpriv = rtl_priv(hw);
2655 + struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
2656 + struct sk_buff *skb = NULL;
2657 +-
2658 ++ bool rtstatus;
2659 + u32 totalpacketlen;
2660 + u8 u1rsvdpageloc[5] = { 0 };
2661 + bool b_dlok = false;
2662 +@@ -727,7 +727,9 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
2663 + memcpy((u8 *)skb_put(skb, totalpacketlen),
2664 + &reserved_page_packet, totalpacketlen);
2665 +
2666 +- b_dlok = true;
2667 ++ rtstatus = rtl_cmd_send_packet(hw, skb);
2668 ++ if (rtstatus)
2669 ++ b_dlok = true;
2670 +
2671 + if (b_dlok) {
2672 + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD ,
2673 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
2674 +index 1281ebe0c30a..82d53895ce4d 100644
2675 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
2676 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
2677 +@@ -1378,6 +1378,7 @@ static void _rtl8821ae_get_wakeup_reason(struct ieee80211_hw *hw)
2678 +
2679 + ppsc->wakeup_reason = 0;
2680 +
2681 ++ do_gettimeofday(&ts);
2682 + rtlhal->last_suspend_sec = ts.tv_sec;
2683 +
2684 + switch (fw_reason) {
2685 +diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
2686 +index fac7cabe8f56..d8d189d14834 100644
2687 +--- a/drivers/nvdimm/label.c
2688 ++++ b/drivers/nvdimm/label.c
2689 +@@ -861,7 +861,7 @@ static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
2690 + nsindex = to_namespace_index(ndd, 0);
2691 + memset(nsindex, 0, ndd->nsarea.config_size);
2692 + for (i = 0; i < 2; i++) {
2693 +- int rc = nd_label_write_index(ndd, i, i*2, ND_NSINDEX_INIT);
2694 ++ int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT);
2695 +
2696 + if (rc)
2697 + return rc;
2698 +diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
2699 +index a38ae34b74e4..b8fb1ef1fc15 100644
2700 +--- a/drivers/nvdimm/namespace_devs.c
2701 ++++ b/drivers/nvdimm/namespace_devs.c
2702 +@@ -1451,7 +1451,7 @@ static umode_t namespace_visible(struct kobject *kobj,
2703 + if (a == &dev_attr_resource.attr) {
2704 + if (is_namespace_blk(dev))
2705 + return 0;
2706 +- return a->mode;
2707 ++ return 0400;
2708 + }
2709 +
2710 + if (is_namespace_pmem(dev) || is_namespace_blk(dev)) {
2711 +diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
2712 +index 78cb3e2359bd..71eb6c637b60 100644
2713 +--- a/drivers/nvdimm/pfn_devs.c
2714 ++++ b/drivers/nvdimm/pfn_devs.c
2715 +@@ -270,8 +270,16 @@ static struct attribute *nd_pfn_attributes[] = {
2716 + NULL,
2717 + };
2718 +
2719 ++static umode_t pfn_visible(struct kobject *kobj, struct attribute *a, int n)
2720 ++{
2721 ++ if (a == &dev_attr_resource.attr)
2722 ++ return 0400;
2723 ++ return a->mode;
2724 ++}
2725 ++
2726 + struct attribute_group nd_pfn_attribute_group = {
2727 + .attrs = nd_pfn_attributes,
2728 ++ .is_visible = pfn_visible,
2729 + };
2730 +
2731 + static const struct attribute_group *nd_pfn_attribute_groups[] = {
2732 +diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
2733 +index 6fe4c48a21e4..f791d46fe50f 100644
2734 +--- a/drivers/nvme/target/admin-cmd.c
2735 ++++ b/drivers/nvme/target/admin-cmd.c
2736 +@@ -381,7 +381,6 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
2737 + {
2738 + struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
2739 + u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
2740 +- u64 val;
2741 + u32 val32;
2742 + u16 status = 0;
2743 +
2744 +@@ -391,8 +390,7 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
2745 + (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
2746 + break;
2747 + case NVME_FEAT_KATO:
2748 +- val = le64_to_cpu(req->cmd->prop_set.value);
2749 +- val32 = val & 0xffff;
2750 ++ val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
2751 + req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
2752 + nvmet_set_result(req, req->sq->ctrl->kato);
2753 + break;
2754 +diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
2755 +index d266d800f246..60bada90cd75 100644
2756 +--- a/drivers/pci/probe.c
2757 ++++ b/drivers/pci/probe.c
2758 +@@ -1438,8 +1438,16 @@ static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
2759 +
2760 + static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
2761 + {
2762 +- if (hpp)
2763 +- dev_warn(&dev->dev, "PCI-X settings not supported\n");
2764 ++ int pos;
2765 ++
2766 ++ if (!hpp)
2767 ++ return;
2768 ++
2769 ++ pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
2770 ++ if (!pos)
2771 ++ return;
2772 ++
2773 ++ dev_warn(&dev->dev, "PCI-X settings not supported\n");
2774 + }
2775 +
2776 + static bool pcie_root_rcb_set(struct pci_dev *dev)
2777 +@@ -1465,6 +1473,9 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
2778 + if (!hpp)
2779 + return;
2780 +
2781 ++ if (!pci_is_pcie(dev))
2782 ++ return;
2783 ++
2784 + if (hpp->revision > 1) {
2785 + dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
2786 + hpp->revision);
2787 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
2788 +index 5d8151b43fbb..98eba9127a0b 100644
2789 +--- a/drivers/pci/quirks.c
2790 ++++ b/drivers/pci/quirks.c
2791 +@@ -4088,12 +4088,14 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
2792 + static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
2793 + {
2794 + /*
2795 +- * Cavium devices matching this quirk do not perform peer-to-peer
2796 +- * with other functions, allowing masking out these bits as if they
2797 +- * were unimplemented in the ACS capability.
2798 ++ * Cavium root ports don't advertise an ACS capability. However,
2799 ++ * the RTL internally implements similar protection as if ACS had
2800 ++ * Request Redirection, Completion Redirection, Source Validation,
2801 ++ * and Upstream Forwarding features enabled. Assert that the
2802 ++ * hardware implements and enables equivalent ACS functionality for
2803 ++ * these flags.
2804 + */
2805 +- acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
2806 +- PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
2807 ++ acs_flags &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_SV | PCI_ACS_UF);
2808 +
2809 + return acs_flags ? 0 : 1;
2810 + }
2811 +diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c
2812 +index 7f3041697813..f714f67c4b64 100644
2813 +--- a/drivers/pinctrl/sirf/pinctrl-atlas7.c
2814 ++++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c
2815 +@@ -5420,14 +5420,15 @@ static int atlas7_pinmux_probe(struct platform_device *pdev)
2816 + sys2pci_np = of_find_node_by_name(NULL, "sys2pci");
2817 + if (!sys2pci_np)
2818 + return -EINVAL;
2819 ++
2820 + ret = of_address_to_resource(sys2pci_np, 0, &res);
2821 ++ of_node_put(sys2pci_np);
2822 + if (ret)
2823 + return ret;
2824 ++
2825 + pmx->sys2pci_base = devm_ioremap_resource(&pdev->dev, &res);
2826 +- if (IS_ERR(pmx->sys2pci_base)) {
2827 +- of_node_put(sys2pci_np);
2828 ++ if (IS_ERR(pmx->sys2pci_base))
2829 + return -ENOMEM;
2830 +- }
2831 +
2832 + pmx->dev = &pdev->dev;
2833 +
2834 +diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
2835 +index b7995474148c..8e281e47afec 100644
2836 +--- a/drivers/spi/Kconfig
2837 ++++ b/drivers/spi/Kconfig
2838 +@@ -365,6 +365,7 @@ config SPI_FSL_SPI
2839 + config SPI_FSL_DSPI
2840 + tristate "Freescale DSPI controller"
2841 + select REGMAP_MMIO
2842 ++ depends on HAS_DMA
2843 + depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
2844 + help
2845 + This enables support for the Freescale DSPI controller in master
2846 +diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
2847 +index 5578a077fcfb..50a5b0c2cc7b 100644
2848 +--- a/drivers/staging/iio/cdc/ad7150.c
2849 ++++ b/drivers/staging/iio/cdc/ad7150.c
2850 +@@ -274,7 +274,7 @@ static int ad7150_write_event_config(struct iio_dev *indio_dev,
2851 + error_ret:
2852 + mutex_unlock(&chip->state_lock);
2853 +
2854 +- return 0;
2855 ++ return ret;
2856 + }
2857 +
2858 + static int ad7150_read_event_value(struct iio_dev *indio_dev,
2859 +diff --git a/drivers/staging/media/cec/cec-adap.c b/drivers/staging/media/cec/cec-adap.c
2860 +index 057c9b5ab1e5..499d7bfe7147 100644
2861 +--- a/drivers/staging/media/cec/cec-adap.c
2862 ++++ b/drivers/staging/media/cec/cec-adap.c
2863 +@@ -288,10 +288,10 @@ static void cec_data_cancel(struct cec_data *data)
2864 +
2865 + /* Mark it as an error */
2866 + data->msg.tx_ts = ktime_get_ns();
2867 +- data->msg.tx_status = CEC_TX_STATUS_ERROR |
2868 +- CEC_TX_STATUS_MAX_RETRIES;
2869 ++ data->msg.tx_status |= CEC_TX_STATUS_ERROR |
2870 ++ CEC_TX_STATUS_MAX_RETRIES;
2871 ++ data->msg.tx_error_cnt++;
2872 + data->attempts = 0;
2873 +- data->msg.tx_error_cnt = 1;
2874 + /* Queue transmitted message for monitoring purposes */
2875 + cec_queue_msg_monitor(data->adap, &data->msg, 1);
2876 +
2877 +@@ -1062,6 +1062,8 @@ static int cec_config_thread_func(void *arg)
2878 + for (i = 1; i < las->num_log_addrs; i++)
2879 + las->log_addr[i] = CEC_LOG_ADDR_INVALID;
2880 + }
2881 ++ for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
2882 ++ las->log_addr[i] = CEC_LOG_ADDR_INVALID;
2883 + adap->is_configured = true;
2884 + adap->is_configuring = false;
2885 + cec_post_state_event(adap);
2886 +@@ -1079,8 +1081,6 @@ static int cec_config_thread_func(void *arg)
2887 + cec_report_features(adap, i);
2888 + cec_report_phys_addr(adap, i);
2889 + }
2890 +- for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
2891 +- las->log_addr[i] = CEC_LOG_ADDR_INVALID;
2892 + mutex_lock(&adap->lock);
2893 + adap->kthread_config = NULL;
2894 + mutex_unlock(&adap->lock);
2895 +@@ -1557,9 +1557,9 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
2896 + }
2897 +
2898 + case CEC_MSG_GIVE_FEATURES:
2899 +- if (adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0)
2900 +- return cec_report_features(adap, la_idx);
2901 +- return 0;
2902 ++ if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
2903 ++ return cec_feature_abort(adap, msg);
2904 ++ return cec_report_features(adap, la_idx);
2905 +
2906 + default:
2907 + /*
2908 +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
2909 +index f3c9d18e9dc5..0d578297d9f9 100644
2910 +--- a/drivers/target/iscsi/iscsi_target.c
2911 ++++ b/drivers/target/iscsi/iscsi_target.c
2912 +@@ -2104,12 +2104,14 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2913 +
2914 + if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
2915 + int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
2916 +- if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
2917 ++ if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) {
2918 + out_of_order_cmdsn = 1;
2919 +- else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
2920 ++ } else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
2921 ++ target_put_sess_cmd(&cmd->se_cmd);
2922 + return 0;
2923 +- else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
2924 ++ } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
2925 + return -1;
2926 ++ }
2927 + }
2928 + iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
2929 +
2930 +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
2931 +index bacfa8f81be8..4c0782cb1e94 100644
2932 +--- a/drivers/target/target_core_transport.c
2933 ++++ b/drivers/target/target_core_transport.c
2934 +@@ -1976,6 +1976,8 @@ static void target_restart_delayed_cmds(struct se_device *dev)
2935 + list_del(&cmd->se_delayed_node);
2936 + spin_unlock(&dev->delayed_cmd_lock);
2937 +
2938 ++ cmd->transport_state |= CMD_T_SENT;
2939 ++
2940 + __target_execute_cmd(cmd, true);
2941 +
2942 + if (cmd->sam_task_attr == TCM_ORDERED_TAG)
2943 +@@ -2013,6 +2015,8 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
2944 + pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
2945 + dev->dev_cur_ordered_id);
2946 + }
2947 ++ cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
2948 ++
2949 + restart:
2950 + target_restart_delayed_cmds(dev);
2951 + }
2952 +diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
2953 +index 6e29d053843d..9e36632b6f0e 100644
2954 +--- a/drivers/vhost/scsi.c
2955 ++++ b/drivers/vhost/scsi.c
2956 +@@ -693,6 +693,7 @@ vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
2957 + struct scatterlist *sg, int sg_count)
2958 + {
2959 + size_t off = iter->iov_offset;
2960 ++ struct scatterlist *p = sg;
2961 + int i, ret;
2962 +
2963 + for (i = 0; i < iter->nr_segs; i++) {
2964 +@@ -701,8 +702,8 @@ vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
2965 +
2966 + ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write);
2967 + if (ret < 0) {
2968 +- for (i = 0; i < sg_count; i++) {
2969 +- struct page *page = sg_page(&sg[i]);
2970 ++ while (p < sg) {
2971 ++ struct page *page = sg_page(p++);
2972 + if (page)
2973 + put_page(page);
2974 + }
2975 +diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
2976 +index 1e8be12ebb55..0a3c6762df1b 100644
2977 +--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
2978 ++++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
2979 +@@ -316,7 +316,7 @@ static int xenbus_write_transaction(unsigned msg_type,
2980 + rc = -ENOMEM;
2981 + goto out;
2982 + }
2983 +- } else if (msg_type == XS_TRANSACTION_END) {
2984 ++ } else if (u->u.msg.tx_id != 0) {
2985 + list_for_each_entry(trans, &u->transactions, list)
2986 + if (trans->handle.id == u->u.msg.tx_id)
2987 + break;
2988 +diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
2989 +index 30ca770c5e0b..f8ab4a66acaf 100644
2990 +--- a/fs/9p/vfs_inode.c
2991 ++++ b/fs/9p/vfs_inode.c
2992 +@@ -483,6 +483,9 @@ static int v9fs_test_inode(struct inode *inode, void *data)
2993 +
2994 + if (v9inode->qid.type != st->qid.type)
2995 + return 0;
2996 ++
2997 ++ if (v9inode->qid.path != st->qid.path)
2998 ++ return 0;
2999 + return 1;
3000 + }
3001 +
3002 +diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
3003 +index afaa4b6de801..c3dd0d42bb3a 100644
3004 +--- a/fs/9p/vfs_inode_dotl.c
3005 ++++ b/fs/9p/vfs_inode_dotl.c
3006 +@@ -87,6 +87,9 @@ static int v9fs_test_inode_dotl(struct inode *inode, void *data)
3007 +
3008 + if (v9inode->qid.type != st->qid.type)
3009 + return 0;
3010 ++
3011 ++ if (v9inode->qid.path != st->qid.path)
3012 ++ return 0;
3013 + return 1;
3014 + }
3015 +
3016 +diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
3017 +index 5db6c8d745ea..4c71dba90120 100644
3018 +--- a/fs/autofs4/waitq.c
3019 ++++ b/fs/autofs4/waitq.c
3020 +@@ -87,7 +87,8 @@ static int autofs4_write(struct autofs_sb_info *sbi,
3021 + spin_unlock_irqrestore(&current->sighand->siglock, flags);
3022 + }
3023 +
3024 +- return (bytes > 0);
3025 ++ /* if 'wr' returned 0 (impossible) we assume -EIO (safe) */
3026 ++ return bytes == 0 ? 0 : wr < 0 ? wr : -EIO;
3027 + }
3028 +
3029 + static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
3030 +@@ -101,6 +102,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
3031 + } pkt;
3032 + struct file *pipe = NULL;
3033 + size_t pktsz;
3034 ++ int ret;
3035 +
3036 + pr_debug("wait id = 0x%08lx, name = %.*s, type=%d\n",
3037 + (unsigned long) wq->wait_queue_token,
3038 +@@ -175,7 +177,18 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
3039 + mutex_unlock(&sbi->wq_mutex);
3040 +
3041 + if (autofs4_write(sbi, pipe, &pkt, pktsz))
3042 ++ switch (ret = autofs4_write(sbi, pipe, &pkt, pktsz)) {
3043 ++ case 0:
3044 ++ break;
3045 ++ case -ENOMEM:
3046 ++ case -ERESTARTSYS:
3047 ++ /* Just fail this one */
3048 ++ autofs4_wait_release(sbi, wq->wait_queue_token, ret);
3049 ++ break;
3050 ++ default:
3051 + autofs4_catatonic_mode(sbi);
3052 ++ break;
3053 ++ }
3054 + fput(pipe);
3055 + }
3056 +
3057 +diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
3058 +index 7fc89e4adb41..83bb2f2aa83c 100644
3059 +--- a/fs/btrfs/uuid-tree.c
3060 ++++ b/fs/btrfs/uuid-tree.c
3061 +@@ -351,7 +351,5 @@ int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info,
3062 +
3063 + out:
3064 + btrfs_free_path(path);
3065 +- if (ret)
3066 +- btrfs_warn(fs_info, "btrfs_uuid_tree_iterate failed %d", ret);
3067 +- return 0;
3068 ++ return ret;
3069 + }
3070 +diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
3071 +index 61cfccea77bc..73de1446c8d4 100644
3072 +--- a/fs/crypto/crypto.c
3073 ++++ b/fs/crypto/crypto.c
3074 +@@ -484,9 +484,6 @@ int fscrypt_initialize(void)
3075 + {
3076 + int i, res = -ENOMEM;
3077 +
3078 +- if (fscrypt_bounce_page_pool)
3079 +- return 0;
3080 +-
3081 + mutex_lock(&fscrypt_init_mutex);
3082 + if (fscrypt_bounce_page_pool)
3083 + goto already_initialized;
3084 +diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
3085 +index d1bbdc9dda76..e14bb7b67e9c 100644
3086 +--- a/fs/crypto/fname.c
3087 ++++ b/fs/crypto/fname.c
3088 +@@ -332,7 +332,7 @@ int fscrypt_fname_usr_to_disk(struct inode *inode,
3089 + * in a directory. Consequently, a user space name cannot be mapped to
3090 + * a disk-space name
3091 + */
3092 +- return -EACCES;
3093 ++ return -ENOKEY;
3094 + }
3095 + EXPORT_SYMBOL(fscrypt_fname_usr_to_disk);
3096 +
3097 +@@ -367,7 +367,7 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
3098 + return 0;
3099 + }
3100 + if (!lookup)
3101 +- return -EACCES;
3102 ++ return -ENOKEY;
3103 +
3104 + /*
3105 + * We don't have the key and we are doing a lookup; decode the
3106 +diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
3107 +index bb4e209bd809..c160d2d0e18d 100644
3108 +--- a/fs/crypto/policy.c
3109 ++++ b/fs/crypto/policy.c
3110 +@@ -113,7 +113,7 @@ int fscrypt_process_policy(struct file *filp,
3111 +
3112 + if (!inode_has_encryption_context(inode)) {
3113 + if (!S_ISDIR(inode->i_mode))
3114 +- ret = -EINVAL;
3115 ++ ret = -ENOTDIR;
3116 + else if (!inode->i_sb->s_cop->empty_dir)
3117 + ret = -EOPNOTSUPP;
3118 + else if (!inode->i_sb->s_cop->empty_dir(inode))
3119 +diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c
3120 +index 286f10b0363b..4f457d5c4933 100644
3121 +--- a/fs/ecryptfs/messaging.c
3122 ++++ b/fs/ecryptfs/messaging.c
3123 +@@ -442,15 +442,16 @@ void ecryptfs_release_messaging(void)
3124 + }
3125 + if (ecryptfs_daemon_hash) {
3126 + struct ecryptfs_daemon *daemon;
3127 ++ struct hlist_node *n;
3128 + int i;
3129 +
3130 + mutex_lock(&ecryptfs_daemon_hash_mux);
3131 + for (i = 0; i < (1 << ecryptfs_hash_bits); i++) {
3132 + int rc;
3133 +
3134 +- hlist_for_each_entry(daemon,
3135 +- &ecryptfs_daemon_hash[i],
3136 +- euid_chain) {
3137 ++ hlist_for_each_entry_safe(daemon, n,
3138 ++ &ecryptfs_daemon_hash[i],
3139 ++ euid_chain) {
3140 + rc = ecryptfs_exorcise_daemon(daemon);
3141 + if (rc)
3142 + printk(KERN_ERR "%s: Error whilst "
3143 +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
3144 +index a3e0b3b7441d..a77cbc5b657b 100644
3145 +--- a/fs/ext4/extents.c
3146 ++++ b/fs/ext4/extents.c
3147 +@@ -4803,7 +4803,8 @@ static long ext4_zero_range(struct file *file, loff_t offset,
3148 + }
3149 +
3150 + if (!(mode & FALLOC_FL_KEEP_SIZE) &&
3151 +- offset + len > i_size_read(inode)) {
3152 ++ (offset + len > i_size_read(inode) ||
3153 ++ offset + len > EXT4_I(inode)->i_disksize)) {
3154 + new_size = offset + len;
3155 + ret = inode_newsize_ok(inode, new_size);
3156 + if (ret)
3157 +@@ -4974,7 +4975,8 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
3158 + }
3159 +
3160 + if (!(mode & FALLOC_FL_KEEP_SIZE) &&
3161 +- offset + len > i_size_read(inode)) {
3162 ++ (offset + len > i_size_read(inode) ||
3163 ++ offset + len > EXT4_I(inode)->i_disksize)) {
3164 + new_size = offset + len;
3165 + ret = inode_newsize_ok(inode, new_size);
3166 + if (ret)
3167 +diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
3168 +index 170421edfdfe..2d94e8524839 100644
3169 +--- a/fs/ext4/ialloc.c
3170 ++++ b/fs/ext4/ialloc.c
3171 +@@ -771,7 +771,7 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
3172 + if (err)
3173 + return ERR_PTR(err);
3174 + if (!fscrypt_has_encryption_key(dir))
3175 +- return ERR_PTR(-EPERM);
3176 ++ return ERR_PTR(-ENOKEY);
3177 + if (!handle)
3178 + nblocks += EXT4_DATA_TRANS_BLOCKS(dir->i_sb);
3179 + encrypt = 1;
3180 +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
3181 +index 00b8a5a66961..4438b93f6fd6 100644
3182 +--- a/fs/ext4/namei.c
3183 ++++ b/fs/ext4/namei.c
3184 +@@ -1378,6 +1378,8 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
3185 + return NULL;
3186 +
3187 + retval = ext4_fname_setup_filename(dir, d_name, 1, &fname);
3188 ++ if (retval == -ENOENT)
3189 ++ return NULL;
3190 + if (retval)
3191 + return ERR_PTR(retval);
3192 +
3193 +@@ -3090,7 +3092,7 @@ static int ext4_symlink(struct inode *dir,
3194 + if (err)
3195 + return err;
3196 + if (!fscrypt_has_encryption_key(dir))
3197 +- return -EPERM;
3198 ++ return -ENOKEY;
3199 + disk_link.len = (fscrypt_fname_encrypted_size(dir, len) +
3200 + sizeof(struct fscrypt_symlink_data));
3201 + sd = kzalloc(disk_link.len, GFP_KERNEL);
3202 +diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
3203 +index 11f3717ce481..8add4e8bab99 100644
3204 +--- a/fs/f2fs/dir.c
3205 ++++ b/fs/f2fs/dir.c
3206 +@@ -277,7 +277,10 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
3207 +
3208 + err = fscrypt_setup_filename(dir, child, 1, &fname);
3209 + if (err) {
3210 +- *res_page = ERR_PTR(err);
3211 ++ if (err == -ENOENT)
3212 ++ *res_page = NULL;
3213 ++ else
3214 ++ *res_page = ERR_PTR(err);
3215 + return NULL;
3216 + }
3217 +
3218 +diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
3219 +index 08d7dc99042e..8556fe1ccb8a 100644
3220 +--- a/fs/f2fs/namei.c
3221 ++++ b/fs/f2fs/namei.c
3222 +@@ -403,7 +403,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
3223 + return err;
3224 +
3225 + if (!fscrypt_has_encryption_key(dir))
3226 +- return -EPERM;
3227 ++ return -ENOKEY;
3228 +
3229 + disk_link.len = (fscrypt_fname_encrypted_size(dir, len) +
3230 + sizeof(struct fscrypt_symlink_data));
3231 +@@ -447,7 +447,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
3232 + goto err_out;
3233 +
3234 + if (!fscrypt_has_encryption_key(inode)) {
3235 +- err = -EPERM;
3236 ++ err = -ENOKEY;
3237 + goto err_out;
3238 + }
3239 +
3240 +diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h
3241 +index 0ac4c1f73fbd..25177e6bd603 100644
3242 +--- a/fs/isofs/isofs.h
3243 ++++ b/fs/isofs/isofs.h
3244 +@@ -103,7 +103,7 @@ static inline unsigned int isonum_733(char *p)
3245 + /* Ignore bigendian datum due to broken mastering programs */
3246 + return get_unaligned_le32(p);
3247 + }
3248 +-extern int iso_date(char *, int);
3249 ++extern int iso_date(u8 *, int);
3250 +
3251 + struct inode; /* To make gcc happy */
3252 +
3253 +diff --git a/fs/isofs/rock.h b/fs/isofs/rock.h
3254 +index ed09e2b08637..f835976ce033 100644
3255 +--- a/fs/isofs/rock.h
3256 ++++ b/fs/isofs/rock.h
3257 +@@ -65,7 +65,7 @@ struct RR_PL_s {
3258 + };
3259 +
3260 + struct stamp {
3261 +- char time[7];
3262 ++ __u8 time[7]; /* actually 6 unsigned, 1 signed */
3263 + } __attribute__ ((packed));
3264 +
3265 + struct RR_TF_s {
3266 +diff --git a/fs/isofs/util.c b/fs/isofs/util.c
3267 +index 005a15cfd30a..37860fea364d 100644
3268 +--- a/fs/isofs/util.c
3269 ++++ b/fs/isofs/util.c
3270 +@@ -15,7 +15,7 @@
3271 + * to GMT. Thus we should always be correct.
3272 + */
3273 +
3274 +-int iso_date(char * p, int flag)
3275 ++int iso_date(u8 *p, int flag)
3276 + {
3277 + int year, month, day, hour, minute, second, tz;
3278 + int crtime;
3279 +diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
3280 +index fc4084ef4736..9d373247222c 100644
3281 +--- a/fs/lockd/svc.c
3282 ++++ b/fs/lockd/svc.c
3283 +@@ -365,6 +365,7 @@ static int lockd_start_svc(struct svc_serv *serv)
3284 + printk(KERN_WARNING
3285 + "lockd_up: svc_rqst allocation failed, error=%d\n",
3286 + error);
3287 ++ lockd_unregister_notifiers();
3288 + goto out_rqst;
3289 + }
3290 +
3291 +@@ -455,13 +456,16 @@ int lockd_up(struct net *net)
3292 + }
3293 +
3294 + error = lockd_up_net(serv, net);
3295 +- if (error < 0)
3296 +- goto err_net;
3297 ++ if (error < 0) {
3298 ++ lockd_unregister_notifiers();
3299 ++ goto err_put;
3300 ++ }
3301 +
3302 + error = lockd_start_svc(serv);
3303 +- if (error < 0)
3304 +- goto err_start;
3305 +-
3306 ++ if (error < 0) {
3307 ++ lockd_down_net(serv, net);
3308 ++ goto err_put;
3309 ++ }
3310 + nlmsvc_users++;
3311 + /*
3312 + * Note: svc_serv structures have an initial use count of 1,
3313 +@@ -472,12 +476,6 @@ int lockd_up(struct net *net)
3314 + err_create:
3315 + mutex_unlock(&nlmsvc_mutex);
3316 + return error;
3317 +-
3318 +-err_start:
3319 +- lockd_down_net(serv, net);
3320 +-err_net:
3321 +- lockd_unregister_notifiers();
3322 +- goto err_put;
3323 + }
3324 + EXPORT_SYMBOL_GPL(lockd_up);
3325 +
3326 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
3327 +index a53b8e0c896a..67845220fc27 100644
3328 +--- a/fs/nfs/nfs4proc.c
3329 ++++ b/fs/nfs/nfs4proc.c
3330 +@@ -256,15 +256,12 @@ const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
3331 + };
3332 +
3333 + const u32 nfs4_fs_locations_bitmap[3] = {
3334 +- FATTR4_WORD0_TYPE
3335 +- | FATTR4_WORD0_CHANGE
3336 ++ FATTR4_WORD0_CHANGE
3337 + | FATTR4_WORD0_SIZE
3338 + | FATTR4_WORD0_FSID
3339 + | FATTR4_WORD0_FILEID
3340 + | FATTR4_WORD0_FS_LOCATIONS,
3341 +- FATTR4_WORD1_MODE
3342 +- | FATTR4_WORD1_NUMLINKS
3343 +- | FATTR4_WORD1_OWNER
3344 ++ FATTR4_WORD1_OWNER
3345 + | FATTR4_WORD1_OWNER_GROUP
3346 + | FATTR4_WORD1_RAWDEV
3347 + | FATTR4_WORD1_SPACE_USED
3348 +@@ -6678,9 +6675,7 @@ static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
3349 + struct page *page)
3350 + {
3351 + struct nfs_server *server = NFS_SERVER(dir);
3352 +- u32 bitmask[3] = {
3353 +- [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
3354 +- };
3355 ++ u32 bitmask[3];
3356 + struct nfs4_fs_locations_arg args = {
3357 + .dir_fh = NFS_FH(dir),
3358 + .name = name,
3359 +@@ -6699,12 +6694,15 @@ static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
3360 +
3361 + dprintk("%s: start\n", __func__);
3362 +
3363 ++ bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS;
3364 ++ bitmask[1] = nfs4_fattr_bitmap[1];
3365 ++
3366 + /* Ask for the fileid of the absent filesystem if mounted_on_fileid
3367 + * is not supported */
3368 + if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
3369 +- bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
3370 ++ bitmask[0] &= ~FATTR4_WORD0_FILEID;
3371 + else
3372 +- bitmask[0] |= FATTR4_WORD0_FILEID;
3373 ++ bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
3374 +
3375 + nfs_fattr_init(&fs_locations->fattr);
3376 + fs_locations->server = server;
3377 +diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
3378 +index cfb8f7ce5cf6..20cd8500452a 100644
3379 +--- a/fs/nfs/nfs4trace.h
3380 ++++ b/fs/nfs/nfs4trace.h
3381 +@@ -201,17 +201,13 @@ DECLARE_EVENT_CLASS(nfs4_clientid_event,
3382 + TP_ARGS(clp, error),
3383 +
3384 + TP_STRUCT__entry(
3385 +- __string(dstaddr,
3386 +- rpc_peeraddr2str(clp->cl_rpcclient,
3387 +- RPC_DISPLAY_ADDR))
3388 ++ __string(dstaddr, clp->cl_hostname)
3389 + __field(int, error)
3390 + ),
3391 +
3392 + TP_fast_assign(
3393 + __entry->error = error;
3394 +- __assign_str(dstaddr,
3395 +- rpc_peeraddr2str(clp->cl_rpcclient,
3396 +- RPC_DISPLAY_ADDR));
3397 ++ __assign_str(dstaddr, clp->cl_hostname);
3398 + ),
3399 +
3400 + TP_printk(
3401 +@@ -1103,9 +1099,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event,
3402 + __field(dev_t, dev)
3403 + __field(u32, fhandle)
3404 + __field(u64, fileid)
3405 +- __string(dstaddr, clp ?
3406 +- rpc_peeraddr2str(clp->cl_rpcclient,
3407 +- RPC_DISPLAY_ADDR) : "unknown")
3408 ++ __string(dstaddr, clp ? clp->cl_hostname : "unknown")
3409 + ),
3410 +
3411 + TP_fast_assign(
3412 +@@ -1118,9 +1112,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event,
3413 + __entry->fileid = 0;
3414 + __entry->dev = 0;
3415 + }
3416 +- __assign_str(dstaddr, clp ?
3417 +- rpc_peeraddr2str(clp->cl_rpcclient,
3418 +- RPC_DISPLAY_ADDR) : "unknown")
3419 ++ __assign_str(dstaddr, clp ? clp->cl_hostname : "unknown")
3420 + ),
3421 +
3422 + TP_printk(
3423 +@@ -1162,9 +1154,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event,
3424 + __field(dev_t, dev)
3425 + __field(u32, fhandle)
3426 + __field(u64, fileid)
3427 +- __string(dstaddr, clp ?
3428 +- rpc_peeraddr2str(clp->cl_rpcclient,
3429 +- RPC_DISPLAY_ADDR) : "unknown")
3430 ++ __string(dstaddr, clp ? clp->cl_hostname : "unknown")
3431 + __field(int, stateid_seq)
3432 + __field(u32, stateid_hash)
3433 + ),
3434 +@@ -1179,9 +1169,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event,
3435 + __entry->fileid = 0;
3436 + __entry->dev = 0;
3437 + }
3438 +- __assign_str(dstaddr, clp ?
3439 +- rpc_peeraddr2str(clp->cl_rpcclient,
3440 +- RPC_DISPLAY_ADDR) : "unknown")
3441 ++ __assign_str(dstaddr, clp ? clp->cl_hostname : "unknown")
3442 + __entry->stateid_seq =
3443 + be32_to_cpu(stateid->seqid);
3444 + __entry->stateid_hash =
3445 +diff --git a/fs/nfs/super.c b/fs/nfs/super.c
3446 +index ddce94ce8142..51bf1f9ab287 100644
3447 +--- a/fs/nfs/super.c
3448 ++++ b/fs/nfs/super.c
3449 +@@ -1339,7 +1339,7 @@ static int nfs_parse_mount_options(char *raw,
3450 + mnt->options |= NFS_OPTION_MIGRATION;
3451 + break;
3452 + case Opt_nomigration:
3453 +- mnt->options &= NFS_OPTION_MIGRATION;
3454 ++ mnt->options &= ~NFS_OPTION_MIGRATION;
3455 + break;
3456 +
3457 + /*
3458 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
3459 +index d35eb077330f..ec2a69dac536 100644
3460 +--- a/fs/nfsd/nfs4state.c
3461 ++++ b/fs/nfsd/nfs4state.c
3462 +@@ -3967,7 +3967,8 @@ static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, statei
3463 + {
3464 + struct nfs4_stid *ret;
3465 +
3466 +- ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID);
3467 ++ ret = find_stateid_by_type(cl, s,
3468 ++ NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
3469 + if (!ret)
3470 + return NULL;
3471 + return delegstateid(ret);
3472 +@@ -3990,6 +3991,12 @@ nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
3473 + deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
3474 + if (deleg == NULL)
3475 + goto out;
3476 ++ if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
3477 ++ nfs4_put_stid(&deleg->dl_stid);
3478 ++ if (cl->cl_minorversion)
3479 ++ status = nfserr_deleg_revoked;
3480 ++ goto out;
3481 ++ }
3482 + flags = share_access_to_flags(open->op_share_access);
3483 + status = nfs4_check_delegmode(deleg, flags);
3484 + if (status) {
3485 +@@ -4858,6 +4865,16 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
3486 + struct nfs4_stid **s, struct nfsd_net *nn)
3487 + {
3488 + __be32 status;
3489 ++ bool return_revoked = false;
3490 ++
3491 ++ /*
3492 ++ * only return revoked delegations if explicitly asked.
3493 ++ * otherwise we report revoked or bad_stateid status.
3494 ++ */
3495 ++ if (typemask & NFS4_REVOKED_DELEG_STID)
3496 ++ return_revoked = true;
3497 ++ else if (typemask & NFS4_DELEG_STID)
3498 ++ typemask |= NFS4_REVOKED_DELEG_STID;
3499 +
3500 + if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3501 + return nfserr_bad_stateid;
3502 +@@ -4872,6 +4889,12 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
3503 + *s = find_stateid_by_type(cstate->clp, stateid, typemask);
3504 + if (!*s)
3505 + return nfserr_bad_stateid;
3506 ++ if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
3507 ++ nfs4_put_stid(*s);
3508 ++ if (cstate->minorversion)
3509 ++ return nfserr_deleg_revoked;
3510 ++ return nfserr_bad_stateid;
3511 ++ }
3512 + return nfs_ok;
3513 + }
3514 +
3515 +diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
3516 +index 7d18d62e8e07..36362d4bc344 100644
3517 +--- a/fs/nilfs2/segment.c
3518 ++++ b/fs/nilfs2/segment.c
3519 +@@ -1956,8 +1956,6 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
3520 + err, ii->vfs_inode.i_ino);
3521 + return err;
3522 + }
3523 +- mark_buffer_dirty(ibh);
3524 +- nilfs_mdt_mark_dirty(ifile);
3525 + spin_lock(&nilfs->ns_inode_lock);
3526 + if (likely(!ii->i_bh))
3527 + ii->i_bh = ibh;
3528 +@@ -1966,6 +1964,10 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
3529 + goto retry;
3530 + }
3531 +
3532 ++ // Always redirty the buffer to avoid race condition
3533 ++ mark_buffer_dirty(ii->i_bh);
3534 ++ nilfs_mdt_mark_dirty(ifile);
3535 ++
3536 + clear_bit(NILFS_I_QUEUED, &ii->i_state);
3537 + set_bit(NILFS_I_BUSY, &ii->i_state);
3538 + list_move_tail(&ii->i_dirty, &sci->sc_dirty_files);
3539 +diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
3540 +index 8a707f8a41c3..8a13e3903839 100644
3541 +--- a/include/trace/events/sunrpc.h
3542 ++++ b/include/trace/events/sunrpc.h
3543 +@@ -455,20 +455,22 @@ TRACE_EVENT(svc_recv,
3544 + TP_ARGS(rqst, status),
3545 +
3546 + TP_STRUCT__entry(
3547 +- __field(struct sockaddr *, addr)
3548 + __field(__be32, xid)
3549 + __field(int, status)
3550 + __field(unsigned long, flags)
3551 ++ __dynamic_array(unsigned char, addr, rqst->rq_addrlen)
3552 + ),
3553 +
3554 + TP_fast_assign(
3555 +- __entry->addr = (struct sockaddr *)&rqst->rq_addr;
3556 + __entry->xid = status > 0 ? rqst->rq_xid : 0;
3557 + __entry->status = status;
3558 + __entry->flags = rqst->rq_flags;
3559 ++ memcpy(__get_dynamic_array(addr),
3560 ++ &rqst->rq_addr, rqst->rq_addrlen);
3561 + ),
3562 +
3563 +- TP_printk("addr=%pIScp xid=0x%x status=%d flags=%s", __entry->addr,
3564 ++ TP_printk("addr=%pIScp xid=0x%x status=%d flags=%s",
3565 ++ (struct sockaddr *)__get_dynamic_array(addr),
3566 + be32_to_cpu(__entry->xid), __entry->status,
3567 + show_rqstp_flags(__entry->flags))
3568 + );
3569 +@@ -513,22 +515,23 @@ DECLARE_EVENT_CLASS(svc_rqst_status,
3570 + TP_ARGS(rqst, status),
3571 +
3572 + TP_STRUCT__entry(
3573 +- __field(struct sockaddr *, addr)
3574 + __field(__be32, xid)
3575 +- __field(int, dropme)
3576 + __field(int, status)
3577 + __field(unsigned long, flags)
3578 ++ __dynamic_array(unsigned char, addr, rqst->rq_addrlen)
3579 + ),
3580 +
3581 + TP_fast_assign(
3582 +- __entry->addr = (struct sockaddr *)&rqst->rq_addr;
3583 + __entry->xid = rqst->rq_xid;
3584 + __entry->status = status;
3585 + __entry->flags = rqst->rq_flags;
3586 ++ memcpy(__get_dynamic_array(addr),
3587 ++ &rqst->rq_addr, rqst->rq_addrlen);
3588 + ),
3589 +
3590 + TP_printk("addr=%pIScp rq_xid=0x%x status=%d flags=%s",
3591 +- __entry->addr, be32_to_cpu(__entry->xid),
3592 ++ (struct sockaddr *)__get_dynamic_array(addr),
3593 ++ be32_to_cpu(__entry->xid),
3594 + __entry->status, show_rqstp_flags(__entry->flags))
3595 + );
3596 +
3597 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
3598 +index 78181c03d9c7..e5066955cc3a 100644
3599 +--- a/kernel/sched/core.c
3600 ++++ b/kernel/sched/core.c
3601 +@@ -507,8 +507,7 @@ void resched_cpu(int cpu)
3602 + struct rq *rq = cpu_rq(cpu);
3603 + unsigned long flags;
3604 +
3605 +- if (!raw_spin_trylock_irqsave(&rq->lock, flags))
3606 +- return;
3607 ++ raw_spin_lock_irqsave(&rq->lock, flags);
3608 + resched_curr(rq);
3609 + raw_spin_unlock_irqrestore(&rq->lock, flags);
3610 + }
3611 +@@ -5878,6 +5877,12 @@ static int init_rootdomain(struct root_domain *rd)
3612 + if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
3613 + goto free_dlo_mask;
3614 +
3615 ++#ifdef HAVE_RT_PUSH_IPI
3616 ++ rd->rto_cpu = -1;
3617 ++ raw_spin_lock_init(&rd->rto_lock);
3618 ++ init_irq_work(&rd->rto_push_work, rto_push_irq_work_func);
3619 ++#endif
3620 ++
3621 + init_dl_bw(&rd->dl_bw);
3622 + if (cpudl_init(&rd->cpudl) != 0)
3623 + goto free_dlo_mask;
3624 +diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
3625 +index f139f22ce30d..9c131168d933 100644
3626 +--- a/kernel/sched/rt.c
3627 ++++ b/kernel/sched/rt.c
3628 +@@ -72,10 +72,6 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
3629 + raw_spin_unlock(&rt_b->rt_runtime_lock);
3630 + }
3631 +
3632 +-#if defined(CONFIG_SMP) && defined(HAVE_RT_PUSH_IPI)
3633 +-static void push_irq_work_func(struct irq_work *work);
3634 +-#endif
3635 +-
3636 + void init_rt_rq(struct rt_rq *rt_rq)
3637 + {
3638 + struct rt_prio_array *array;
3639 +@@ -95,13 +91,6 @@ void init_rt_rq(struct rt_rq *rt_rq)
3640 + rt_rq->rt_nr_migratory = 0;
3641 + rt_rq->overloaded = 0;
3642 + plist_head_init(&rt_rq->pushable_tasks);
3643 +-
3644 +-#ifdef HAVE_RT_PUSH_IPI
3645 +- rt_rq->push_flags = 0;
3646 +- rt_rq->push_cpu = nr_cpu_ids;
3647 +- raw_spin_lock_init(&rt_rq->push_lock);
3648 +- init_irq_work(&rt_rq->push_work, push_irq_work_func);
3649 +-#endif
3650 + #endif /* CONFIG_SMP */
3651 + /* We start is dequeued state, because no RT tasks are queued */
3652 + rt_rq->rt_queued = 0;
3653 +@@ -1864,160 +1853,166 @@ static void push_rt_tasks(struct rq *rq)
3654 + }
3655 +
3656 + #ifdef HAVE_RT_PUSH_IPI
3657 ++
3658 + /*
3659 +- * The search for the next cpu always starts at rq->cpu and ends
3660 +- * when we reach rq->cpu again. It will never return rq->cpu.
3661 +- * This returns the next cpu to check, or nr_cpu_ids if the loop
3662 +- * is complete.
3663 ++ * When a high priority task schedules out from a CPU and a lower priority
3664 ++ * task is scheduled in, a check is made to see if there's any RT tasks
3665 ++ * on other CPUs that are waiting to run because a higher priority RT task
3666 ++ * is currently running on its CPU. In this case, the CPU with multiple RT
3667 ++ * tasks queued on it (overloaded) needs to be notified that a CPU has opened
3668 ++ * up that may be able to run one of its non-running queued RT tasks.
3669 ++ *
3670 ++ * All CPUs with overloaded RT tasks need to be notified as there is currently
3671 ++ * no way to know which of these CPUs have the highest priority task waiting
3672 ++ * to run. Instead of trying to take a spinlock on each of these CPUs,
3673 ++ * which has shown to cause large latency when done on machines with many
3674 ++ * CPUs, sending an IPI to the CPUs to have them push off the overloaded
3675 ++ * RT tasks waiting to run.
3676 ++ *
3677 ++ * Just sending an IPI to each of the CPUs is also an issue, as on large
3678 ++ * count CPU machines, this can cause an IPI storm on a CPU, especially
3679 ++ * if its the only CPU with multiple RT tasks queued, and a large number
3680 ++ * of CPUs scheduling a lower priority task at the same time.
3681 ++ *
3682 ++ * Each root domain has its own irq work function that can iterate over
3683 ++ * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
3684 ++ * tassk must be checked if there's one or many CPUs that are lowering
3685 ++ * their priority, there's a single irq work iterator that will try to
3686 ++ * push off RT tasks that are waiting to run.
3687 ++ *
3688 ++ * When a CPU schedules a lower priority task, it will kick off the
3689 ++ * irq work iterator that will jump to each CPU with overloaded RT tasks.
3690 ++ * As it only takes the first CPU that schedules a lower priority task
3691 ++ * to start the process, the rto_start variable is incremented and if
3692 ++ * the atomic result is one, then that CPU will try to take the rto_lock.
3693 ++ * This prevents high contention on the lock as the process handles all
3694 ++ * CPUs scheduling lower priority tasks.
3695 ++ *
3696 ++ * All CPUs that are scheduling a lower priority task will increment the
3697 ++ * rt_loop_next variable. This will make sure that the irq work iterator
3698 ++ * checks all RT overloaded CPUs whenever a CPU schedules a new lower
3699 ++ * priority task, even if the iterator is in the middle of a scan. Incrementing
3700 ++ * the rt_loop_next will cause the iterator to perform another scan.
3701 + *
3702 +- * rq->rt.push_cpu holds the last cpu returned by this function,
3703 +- * or if this is the first instance, it must hold rq->cpu.
3704 + */
3705 + static int rto_next_cpu(struct rq *rq)
3706 + {
3707 +- int prev_cpu = rq->rt.push_cpu;
3708 ++ struct root_domain *rd = rq->rd;
3709 ++ int next;
3710 + int cpu;
3711 +
3712 +- cpu = cpumask_next(prev_cpu, rq->rd->rto_mask);
3713 +-
3714 + /*
3715 +- * If the previous cpu is less than the rq's CPU, then it already
3716 +- * passed the end of the mask, and has started from the beginning.
3717 +- * We end if the next CPU is greater or equal to rq's CPU.
3718 ++ * When starting the IPI RT pushing, the rto_cpu is set to -1,
3719 ++ * rt_next_cpu() will simply return the first CPU found in
3720 ++ * the rto_mask.
3721 ++ *
3722 ++ * If rto_next_cpu() is called with rto_cpu is a valid cpu, it
3723 ++ * will return the next CPU found in the rto_mask.
3724 ++ *
3725 ++ * If there are no more CPUs left in the rto_mask, then a check is made
3726 ++ * against rto_loop and rto_loop_next. rto_loop is only updated with
3727 ++ * the rto_lock held, but any CPU may increment the rto_loop_next
3728 ++ * without any locking.
3729 + */
3730 +- if (prev_cpu < rq->cpu) {
3731 +- if (cpu >= rq->cpu)
3732 +- return nr_cpu_ids;
3733 ++ for (;;) {
3734 +
3735 +- } else if (cpu >= nr_cpu_ids) {
3736 +- /*
3737 +- * We passed the end of the mask, start at the beginning.
3738 +- * If the result is greater or equal to the rq's CPU, then
3739 +- * the loop is finished.
3740 +- */
3741 +- cpu = cpumask_first(rq->rd->rto_mask);
3742 +- if (cpu >= rq->cpu)
3743 +- return nr_cpu_ids;
3744 +- }
3745 +- rq->rt.push_cpu = cpu;
3746 ++ /* When rto_cpu is -1 this acts like cpumask_first() */
3747 ++ cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
3748 +
3749 +- /* Return cpu to let the caller know if the loop is finished or not */
3750 +- return cpu;
3751 +-}
3752 ++ rd->rto_cpu = cpu;
3753 +
3754 +-static int find_next_push_cpu(struct rq *rq)
3755 +-{
3756 +- struct rq *next_rq;
3757 +- int cpu;
3758 ++ if (cpu < nr_cpu_ids)
3759 ++ return cpu;
3760 +
3761 +- while (1) {
3762 +- cpu = rto_next_cpu(rq);
3763 +- if (cpu >= nr_cpu_ids)
3764 +- break;
3765 +- next_rq = cpu_rq(cpu);
3766 ++ rd->rto_cpu = -1;
3767 +
3768 +- /* Make sure the next rq can push to this rq */
3769 +- if (next_rq->rt.highest_prio.next < rq->rt.highest_prio.curr)
3770 ++ /*
3771 ++ * ACQUIRE ensures we see the @rto_mask changes
3772 ++ * made prior to the @next value observed.
3773 ++ *
3774 ++ * Matches WMB in rt_set_overload().
3775 ++ */
3776 ++ next = atomic_read_acquire(&rd->rto_loop_next);
3777 ++
3778 ++ if (rd->rto_loop == next)
3779 + break;
3780 ++
3781 ++ rd->rto_loop = next;
3782 + }
3783 +
3784 +- return cpu;
3785 ++ return -1;
3786 ++}
3787 ++
3788 ++static inline bool rto_start_trylock(atomic_t *v)
3789 ++{
3790 ++ return !atomic_cmpxchg_acquire(v, 0, 1);
3791 + }
3792 +
3793 +-#define RT_PUSH_IPI_EXECUTING 1
3794 +-#define RT_PUSH_IPI_RESTART 2
3795 ++static inline void rto_start_unlock(atomic_t *v)
3796 ++{
3797 ++ atomic_set_release(v, 0);
3798 ++}
3799 +
3800 + static void tell_cpu_to_push(struct rq *rq)
3801 + {
3802 +- int cpu;
3803 ++ int cpu = -1;
3804 +
3805 +- if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
3806 +- raw_spin_lock(&rq->rt.push_lock);
3807 +- /* Make sure it's still executing */
3808 +- if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
3809 +- /*
3810 +- * Tell the IPI to restart the loop as things have
3811 +- * changed since it started.
3812 +- */
3813 +- rq->rt.push_flags |= RT_PUSH_IPI_RESTART;
3814 +- raw_spin_unlock(&rq->rt.push_lock);
3815 +- return;
3816 +- }
3817 +- raw_spin_unlock(&rq->rt.push_lock);
3818 +- }
3819 ++ /* Keep the loop going if the IPI is currently active */
3820 ++ atomic_inc(&rq->rd->rto_loop_next);
3821 +
3822 +- /* When here, there's no IPI going around */
3823 +-
3824 +- rq->rt.push_cpu = rq->cpu;
3825 +- cpu = find_next_push_cpu(rq);
3826 +- if (cpu >= nr_cpu_ids)
3827 ++ /* Only one CPU can initiate a loop at a time */
3828 ++ if (!rto_start_trylock(&rq->rd->rto_loop_start))
3829 + return;
3830 +
3831 +- rq->rt.push_flags = RT_PUSH_IPI_EXECUTING;
3832 ++ raw_spin_lock(&rq->rd->rto_lock);
3833 ++
3834 ++ /*
3835 ++ * The rto_cpu is updated under the lock, if it has a valid cpu
3836 ++ * then the IPI is still running and will continue due to the
3837 ++ * update to loop_next, and nothing needs to be done here.
3838 ++ * Otherwise it is finishing up and an ipi needs to be sent.
3839 ++ */
3840 ++ if (rq->rd->rto_cpu < 0)
3841 ++ cpu = rto_next_cpu(rq);
3842 ++
3843 ++ raw_spin_unlock(&rq->rd->rto_lock);
3844 +
3845 +- irq_work_queue_on(&rq->rt.push_work, cpu);
3846 ++ rto_start_unlock(&rq->rd->rto_loop_start);
3847 ++
3848 ++ if (cpu >= 0)
3849 ++ irq_work_queue_on(&rq->rd->rto_push_work, cpu);
3850 + }
3851 +
3852 + /* Called from hardirq context */
3853 +-static void try_to_push_tasks(void *arg)
3854 ++void rto_push_irq_work_func(struct irq_work *work)
3855 + {
3856 +- struct rt_rq *rt_rq = arg;
3857 +- struct rq *rq, *src_rq;
3858 +- int this_cpu;
3859 ++ struct rq *rq;
3860 + int cpu;
3861 +
3862 +- this_cpu = rt_rq->push_cpu;
3863 ++ rq = this_rq();
3864 +
3865 +- /* Paranoid check */
3866 +- BUG_ON(this_cpu != smp_processor_id());
3867 +-
3868 +- rq = cpu_rq(this_cpu);
3869 +- src_rq = rq_of_rt_rq(rt_rq);
3870 +-
3871 +-again:
3872 ++ /*
3873 ++ * We do not need to grab the lock to check for has_pushable_tasks.
3874 ++ * When it gets updated, a check is made if a push is possible.
3875 ++ */
3876 + if (has_pushable_tasks(rq)) {
3877 + raw_spin_lock(&rq->lock);
3878 +- push_rt_task(rq);
3879 ++ push_rt_tasks(rq);
3880 + raw_spin_unlock(&rq->lock);
3881 + }
3882 +
3883 +- /* Pass the IPI to the next rt overloaded queue */
3884 +- raw_spin_lock(&rt_rq->push_lock);
3885 +- /*
3886 +- * If the source queue changed since the IPI went out,
3887 +- * we need to restart the search from that CPU again.
3888 +- */
3889 +- if (rt_rq->push_flags & RT_PUSH_IPI_RESTART) {
3890 +- rt_rq->push_flags &= ~RT_PUSH_IPI_RESTART;
3891 +- rt_rq->push_cpu = src_rq->cpu;
3892 +- }
3893 ++ raw_spin_lock(&rq->rd->rto_lock);
3894 +
3895 +- cpu = find_next_push_cpu(src_rq);
3896 ++ /* Pass the IPI to the next rt overloaded queue */
3897 ++ cpu = rto_next_cpu(rq);
3898 +
3899 +- if (cpu >= nr_cpu_ids)
3900 +- rt_rq->push_flags &= ~RT_PUSH_IPI_EXECUTING;
3901 +- raw_spin_unlock(&rt_rq->push_lock);
3902 ++ raw_spin_unlock(&rq->rd->rto_lock);
3903 +
3904 +- if (cpu >= nr_cpu_ids)
3905 ++ if (cpu < 0)
3906 + return;
3907 +
3908 +- /*
3909 +- * It is possible that a restart caused this CPU to be
3910 +- * chosen again. Don't bother with an IPI, just see if we
3911 +- * have more to push.
3912 +- */
3913 +- if (unlikely(cpu == rq->cpu))
3914 +- goto again;
3915 +-
3916 + /* Try the next RT overloaded CPU */
3917 +- irq_work_queue_on(&rt_rq->push_work, cpu);
3918 +-}
3919 +-
3920 +-static void push_irq_work_func(struct irq_work *work)
3921 +-{
3922 +- struct rt_rq *rt_rq = container_of(work, struct rt_rq, push_work);
3923 +-
3924 +- try_to_push_tasks(rt_rq);
3925 ++ irq_work_queue_on(&rq->rd->rto_push_work, cpu);
3926 + }
3927 + #endif /* HAVE_RT_PUSH_IPI */
3928 +
3929 +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
3930 +index ad77d666583c..cff985feb6e7 100644
3931 +--- a/kernel/sched/sched.h
3932 ++++ b/kernel/sched/sched.h
3933 +@@ -463,7 +463,7 @@ static inline int rt_bandwidth_enabled(void)
3934 + }
3935 +
3936 + /* RT IPI pull logic requires IRQ_WORK */
3937 +-#ifdef CONFIG_IRQ_WORK
3938 ++#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP)
3939 + # define HAVE_RT_PUSH_IPI
3940 + #endif
3941 +
3942 +@@ -485,12 +485,6 @@ struct rt_rq {
3943 + unsigned long rt_nr_total;
3944 + int overloaded;
3945 + struct plist_head pushable_tasks;
3946 +-#ifdef HAVE_RT_PUSH_IPI
3947 +- int push_flags;
3948 +- int push_cpu;
3949 +- struct irq_work push_work;
3950 +- raw_spinlock_t push_lock;
3951 +-#endif
3952 + #endif /* CONFIG_SMP */
3953 + int rt_queued;
3954 +
3955 +@@ -572,6 +566,19 @@ struct root_domain {
3956 + struct dl_bw dl_bw;
3957 + struct cpudl cpudl;
3958 +
3959 ++#ifdef HAVE_RT_PUSH_IPI
3960 ++ /*
3961 ++ * For IPI pull requests, loop across the rto_mask.
3962 ++ */
3963 ++ struct irq_work rto_push_work;
3964 ++ raw_spinlock_t rto_lock;
3965 ++ /* These are only updated and read within rto_lock */
3966 ++ int rto_loop;
3967 ++ int rto_cpu;
3968 ++ /* These atomics are updated outside of a lock */
3969 ++ atomic_t rto_loop_next;
3970 ++ atomic_t rto_loop_start;
3971 ++#endif
3972 + /*
3973 + * The "RT overload" flag: it gets set if a CPU has more than
3974 + * one runnable RT task.
3975 +@@ -584,6 +591,9 @@ struct root_domain {
3976 +
3977 + extern struct root_domain def_root_domain;
3978 +
3979 ++#ifdef HAVE_RT_PUSH_IPI
3980 ++extern void rto_push_irq_work_func(struct irq_work *work);
3981 ++#endif
3982 + #endif /* CONFIG_SMP */
3983 +
3984 + /*
3985 +diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c
3986 +index e24388a863a7..468fb7cd1221 100644
3987 +--- a/lib/mpi/mpi-pow.c
3988 ++++ b/lib/mpi/mpi-pow.c
3989 +@@ -26,6 +26,7 @@
3990 + * however I decided to publish this code under the plain GPL.
3991 + */
3992 +
3993 ++#include <linux/sched.h>
3994 + #include <linux/string.h>
3995 + #include "mpi-internal.h"
3996 + #include "longlong.h"
3997 +@@ -256,6 +257,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
3998 + }
3999 + e <<= 1;
4000 + c--;
4001 ++ cond_resched();
4002 + }
4003 +
4004 + i--;
4005 +diff --git a/net/9p/client.c b/net/9p/client.c
4006 +index cf129fec7329..1fd60190177e 100644
4007 +--- a/net/9p/client.c
4008 ++++ b/net/9p/client.c
4009 +@@ -749,8 +749,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
4010 + }
4011 + again:
4012 + /* Wait for the response */
4013 +- err = wait_event_interruptible(*req->wq,
4014 +- req->status >= REQ_STATUS_RCVD);
4015 ++ err = wait_event_killable(*req->wq, req->status >= REQ_STATUS_RCVD);
4016 +
4017 + /*
4018 + * Make sure our req is coherent with regard to updates in other
4019 +diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
4020 +index f24b25c25106..f3a4efcf1456 100644
4021 +--- a/net/9p/trans_virtio.c
4022 ++++ b/net/9p/trans_virtio.c
4023 +@@ -286,8 +286,8 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
4024 + if (err == -ENOSPC) {
4025 + chan->ring_bufs_avail = 0;
4026 + spin_unlock_irqrestore(&chan->lock, flags);
4027 +- err = wait_event_interruptible(*chan->vc_wq,
4028 +- chan->ring_bufs_avail);
4029 ++ err = wait_event_killable(*chan->vc_wq,
4030 ++ chan->ring_bufs_avail);
4031 + if (err == -ERESTARTSYS)
4032 + return err;
4033 +
4034 +@@ -327,7 +327,7 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
4035 + * Other zc request to finish here
4036 + */
4037 + if (atomic_read(&vp_pinned) >= chan->p9_max_pages) {
4038 +- err = wait_event_interruptible(vp_wq,
4039 ++ err = wait_event_killable(vp_wq,
4040 + (atomic_read(&vp_pinned) < chan->p9_max_pages));
4041 + if (err == -ERESTARTSYS)
4042 + return err;
4043 +@@ -471,8 +471,8 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
4044 + if (err == -ENOSPC) {
4045 + chan->ring_bufs_avail = 0;
4046 + spin_unlock_irqrestore(&chan->lock, flags);
4047 +- err = wait_event_interruptible(*chan->vc_wq,
4048 +- chan->ring_bufs_avail);
4049 ++ err = wait_event_killable(*chan->vc_wq,
4050 ++ chan->ring_bufs_avail);
4051 + if (err == -ERESTARTSYS)
4052 + goto err_out;
4053 +
4054 +@@ -489,8 +489,7 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
4055 + virtqueue_kick(chan->vq);
4056 + spin_unlock_irqrestore(&chan->lock, flags);
4057 + p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n");
4058 +- err = wait_event_interruptible(*req->wq,
4059 +- req->status >= REQ_STATUS_RCVD);
4060 ++ err = wait_event_killable(*req->wq, req->status >= REQ_STATUS_RCVD);
4061 + /*
4062 + * Non kernel buffers are pinned, unpin them
4063 + */
4064 +diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
4065 +index 292e33bd916e..5f3a627afcc6 100644
4066 +--- a/net/ceph/crypto.c
4067 ++++ b/net/ceph/crypto.c
4068 +@@ -34,7 +34,9 @@ static int set_secret(struct ceph_crypto_key *key, void *buf)
4069 + return -ENOTSUPP;
4070 + }
4071 +
4072 +- WARN_ON(!key->len);
4073 ++ if (!key->len)
4074 ++ return -EINVAL;
4075 ++
4076 + key->key = kmemdup(buf, key->len, GFP_NOIO);
4077 + if (!key->key) {
4078 + ret = -ENOMEM;
4079 +diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
4080 +index 4d37bdcbc2d5..551dd393ceec 100644
4081 +--- a/net/ipv4/ip_sockglue.c
4082 ++++ b/net/ipv4/ip_sockglue.c
4083 +@@ -819,6 +819,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
4084 + {
4085 + struct ip_mreqn mreq;
4086 + struct net_device *dev = NULL;
4087 ++ int midx;
4088 +
4089 + if (sk->sk_type == SOCK_STREAM)
4090 + goto e_inval;
4091 +@@ -863,11 +864,15 @@ static int do_ip_setsockopt(struct sock *sk, int level,
4092 + err = -EADDRNOTAVAIL;
4093 + if (!dev)
4094 + break;
4095 ++
4096 ++ midx = l3mdev_master_ifindex(dev);
4097 ++
4098 + dev_put(dev);
4099 +
4100 + err = -EINVAL;
4101 + if (sk->sk_bound_dev_if &&
4102 +- mreq.imr_ifindex != sk->sk_bound_dev_if)
4103 ++ mreq.imr_ifindex != sk->sk_bound_dev_if &&
4104 ++ (!midx || midx != sk->sk_bound_dev_if))
4105 + break;
4106 +
4107 + inet->mc_index = mreq.imr_ifindex;
4108 +diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
4109 +index 636ec56f5f50..38bee173dc2b 100644
4110 +--- a/net/ipv6/ipv6_sockglue.c
4111 ++++ b/net/ipv6/ipv6_sockglue.c
4112 +@@ -585,16 +585,24 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
4113 +
4114 + if (val) {
4115 + struct net_device *dev;
4116 ++ int midx;
4117 +
4118 +- if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val)
4119 +- goto e_inval;
4120 ++ rcu_read_lock();
4121 +
4122 +- dev = dev_get_by_index(net, val);
4123 ++ dev = dev_get_by_index_rcu(net, val);
4124 + if (!dev) {
4125 ++ rcu_read_unlock();
4126 + retv = -ENODEV;
4127 + break;
4128 + }
4129 +- dev_put(dev);
4130 ++ midx = l3mdev_master_ifindex_rcu(dev);
4131 ++
4132 ++ rcu_read_unlock();
4133 ++
4134 ++ if (sk->sk_bound_dev_if &&
4135 ++ sk->sk_bound_dev_if != val &&
4136 ++ (!midx || midx != sk->sk_bound_dev_if))
4137 ++ goto e_inval;
4138 + }
4139 + np->mcast_oif = val;
4140 + retv = 0;
4141 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
4142 +index 61729641e027..6e8bacb0b458 100644
4143 +--- a/net/ipv6/route.c
4144 ++++ b/net/ipv6/route.c
4145 +@@ -3495,7 +3495,11 @@ static int ip6_route_dev_notify(struct notifier_block *this,
4146 + net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
4147 + net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
4148 + #endif
4149 +- } else if (event == NETDEV_UNREGISTER) {
4150 ++ } else if (event == NETDEV_UNREGISTER &&
4151 ++ dev->reg_state != NETREG_UNREGISTERED) {
4152 ++ /* NETDEV_UNREGISTER could be fired for multiple times by
4153 ++ * netdev_wait_allrefs(). Make sure we only call this once.
4154 ++ */
4155 + in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev);
4156 + #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4157 + in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev);
4158 +diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
4159 +index 34c2add2c455..03dbc6bd8598 100644
4160 +--- a/net/mac80211/ieee80211_i.h
4161 ++++ b/net/mac80211/ieee80211_i.h
4162 +@@ -681,7 +681,6 @@ struct ieee80211_if_mesh {
4163 + const struct ieee80211_mesh_sync_ops *sync_ops;
4164 + s64 sync_offset_clockdrift_max;
4165 + spinlock_t sync_offset_lock;
4166 +- bool adjusting_tbtt;
4167 + /* mesh power save */
4168 + enum nl80211_mesh_power_mode nonpeer_pm;
4169 + int ps_peers_light_sleep;
4170 +diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
4171 +index 50e1b7f78bd4..5c67a696e046 100644
4172 +--- a/net/mac80211/mesh.c
4173 ++++ b/net/mac80211/mesh.c
4174 +@@ -279,8 +279,6 @@ int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata,
4175 + /* Mesh PS mode. See IEEE802.11-2012 8.4.2.100.8 */
4176 + *pos |= ifmsh->ps_peers_deep_sleep ?
4177 + IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL : 0x00;
4178 +- *pos++ |= ifmsh->adjusting_tbtt ?
4179 +- IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING : 0x00;
4180 + *pos++ = 0x00;
4181 +
4182 + return 0;
4183 +@@ -850,7 +848,6 @@ int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
4184 + ifmsh->mesh_cc_id = 0; /* Disabled */
4185 + /* register sync ops from extensible synchronization framework */
4186 + ifmsh->sync_ops = ieee80211_mesh_sync_ops_get(ifmsh->mesh_sp_id);
4187 +- ifmsh->adjusting_tbtt = false;
4188 + ifmsh->sync_offset_clockdrift_max = 0;
4189 + set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags);
4190 + ieee80211_mesh_root_setup(ifmsh);
4191 +diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
4192 +index 7fcdcf622655..fcba70e57073 100644
4193 +--- a/net/mac80211/mesh_plink.c
4194 ++++ b/net/mac80211/mesh_plink.c
4195 +@@ -505,12 +505,14 @@ mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *addr,
4196 +
4197 + /* Userspace handles station allocation */
4198 + if (sdata->u.mesh.user_mpm ||
4199 +- sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED)
4200 +- cfg80211_notify_new_peer_candidate(sdata->dev, addr,
4201 +- elems->ie_start,
4202 +- elems->total_len,
4203 +- GFP_KERNEL);
4204 +- else
4205 ++ sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) {
4206 ++ if (mesh_peer_accepts_plinks(elems) &&
4207 ++ mesh_plink_availables(sdata))
4208 ++ cfg80211_notify_new_peer_candidate(sdata->dev, addr,
4209 ++ elems->ie_start,
4210 ++ elems->total_len,
4211 ++ GFP_KERNEL);
4212 ++ } else
4213 + sta = __mesh_sta_info_alloc(sdata, addr);
4214 +
4215 + return sta;
4216 +diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c
4217 +index faca22cd02b5..75608c07dc7b 100644
4218 +--- a/net/mac80211/mesh_sync.c
4219 ++++ b/net/mac80211/mesh_sync.c
4220 +@@ -123,7 +123,6 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
4221 + */
4222 +
4223 + if (elems->mesh_config && mesh_peer_tbtt_adjusting(elems)) {
4224 +- clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
4225 + msync_dbg(sdata, "STA %pM : is adjusting TBTT\n",
4226 + sta->sta.addr);
4227 + goto no_sync;
4228 +@@ -172,11 +171,9 @@ static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata,
4229 + struct beacon_data *beacon)
4230 + {
4231 + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
4232 +- u8 cap;
4233 +
4234 + WARN_ON(ifmsh->mesh_sp_id != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET);
4235 + WARN_ON(!rcu_read_lock_held());
4236 +- cap = beacon->meshconf->meshconf_cap;
4237 +
4238 + spin_lock_bh(&ifmsh->sync_offset_lock);
4239 +
4240 +@@ -190,21 +187,13 @@ static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata,
4241 + "TBTT : kicking off TBTT adjustment with clockdrift_max=%lld\n",
4242 + ifmsh->sync_offset_clockdrift_max);
4243 + set_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags);
4244 +-
4245 +- ifmsh->adjusting_tbtt = true;
4246 + } else {
4247 + msync_dbg(sdata,
4248 + "TBTT : max clockdrift=%lld; too small to adjust\n",
4249 + (long long)ifmsh->sync_offset_clockdrift_max);
4250 + ifmsh->sync_offset_clockdrift_max = 0;
4251 +-
4252 +- ifmsh->adjusting_tbtt = false;
4253 + }
4254 + spin_unlock_bh(&ifmsh->sync_offset_lock);
4255 +-
4256 +- beacon->meshconf->meshconf_cap = ifmsh->adjusting_tbtt ?
4257 +- IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING | cap :
4258 +- ~IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING & cap;
4259 + }
4260 +
4261 + static const struct sync_method sync_methods[] = {
4262 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
4263 +index 778fcdb83225..fa3ef25441e5 100644
4264 +--- a/net/netfilter/nf_tables_api.c
4265 ++++ b/net/netfilter/nf_tables_api.c
4266 +@@ -2068,7 +2068,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
4267 + * is called on error from nf_tables_newrule().
4268 + */
4269 + expr = nft_expr_first(rule);
4270 +- while (expr->ops && expr != nft_expr_last(rule)) {
4271 ++ while (expr != nft_expr_last(rule) && expr->ops) {
4272 + nf_tables_expr_destroy(ctx, expr);
4273 + expr = nft_expr_next(expr);
4274 + }
4275 +diff --git a/net/netfilter/nft_queue.c b/net/netfilter/nft_queue.c
4276 +index 393d359a1889..ef4768a451f4 100644
4277 +--- a/net/netfilter/nft_queue.c
4278 ++++ b/net/netfilter/nft_queue.c
4279 +@@ -38,7 +38,7 @@ static void nft_queue_eval(const struct nft_expr *expr,
4280 +
4281 + if (priv->queues_total > 1) {
4282 + if (priv->flags & NFT_QUEUE_FLAG_CPU_FANOUT) {
4283 +- int cpu = smp_processor_id();
4284 ++ int cpu = raw_smp_processor_id();
4285 +
4286 + queue = priv->queuenum + cpu % priv->queues_total;
4287 + } else {
4288 +diff --git a/net/nfc/core.c b/net/nfc/core.c
4289 +index 5cf33df888c3..c699d64a0753 100644
4290 +--- a/net/nfc/core.c
4291 ++++ b/net/nfc/core.c
4292 +@@ -1106,7 +1106,7 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
4293 + err_free_dev:
4294 + kfree(dev);
4295 +
4296 +- return ERR_PTR(rc);
4297 ++ return NULL;
4298 + }
4299 + EXPORT_SYMBOL(nfc_allocate_device);
4300 +
4301 +diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c
4302 +index d921adc62765..66b3d6228a15 100644
4303 +--- a/net/rds/ib_frmr.c
4304 ++++ b/net/rds/ib_frmr.c
4305 +@@ -104,14 +104,15 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
4306 + struct rds_ib_frmr *frmr = &ibmr->u.frmr;
4307 + struct ib_send_wr *failed_wr;
4308 + struct ib_reg_wr reg_wr;
4309 +- int ret;
4310 ++ int ret, off = 0;
4311 +
4312 + while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) {
4313 + atomic_inc(&ibmr->ic->i_fastreg_wrs);
4314 + cpu_relax();
4315 + }
4316 +
4317 +- ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_len, 0, PAGE_SIZE);
4318 ++ ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_len,
4319 ++ &off, PAGE_SIZE);
4320 + if (unlikely(ret != ibmr->sg_len))
4321 + return ret < 0 ? ret : -EINVAL;
4322 +
4323 +diff --git a/net/rds/rdma.c b/net/rds/rdma.c
4324 +index 8d3a851a3476..60e90f761838 100644
4325 +--- a/net/rds/rdma.c
4326 ++++ b/net/rds/rdma.c
4327 +@@ -40,7 +40,6 @@
4328 + /*
4329 + * XXX
4330 + * - build with sparse
4331 +- * - should we limit the size of a mr region? let transport return failure?
4332 + * - should we detect duplicate keys on a socket? hmm.
4333 + * - an rdma is an mlock, apply rlimit?
4334 + */
4335 +@@ -200,6 +199,14 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
4336 + goto out;
4337 + }
4338 +
4339 ++ /* Restrict the size of mr irrespective of underlying transport
4340 ++ * To account for unaligned mr regions, subtract one from nr_pages
4341 ++ */
4342 ++ if ((nr_pages - 1) > (RDS_MAX_MSG_SIZE >> PAGE_SHIFT)) {
4343 ++ ret = -EMSGSIZE;
4344 ++ goto out;
4345 ++ }
4346 ++
4347 + rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n",
4348 + args->vec.addr, args->vec.bytes, nr_pages);
4349 +
4350 +diff --git a/net/rds/rds.h b/net/rds/rds.h
4351 +index f107a968ddff..30a51fec0f63 100644
4352 +--- a/net/rds/rds.h
4353 ++++ b/net/rds/rds.h
4354 +@@ -50,6 +50,9 @@ void rdsdebug(char *fmt, ...)
4355 + #define RDS_FRAG_SHIFT 12
4356 + #define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT))
4357 +
4358 ++/* Used to limit both RDMA and non-RDMA RDS message to 1MB */
4359 ++#define RDS_MAX_MSG_SIZE ((unsigned int)(1 << 20))
4360 ++
4361 + #define RDS_CONG_MAP_BYTES (65536 / 8)
4362 + #define RDS_CONG_MAP_PAGES (PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
4363 + #define RDS_CONG_MAP_PAGE_BITS (PAGE_SIZE * 8)
4364 +diff --git a/net/rds/send.c b/net/rds/send.c
4365 +index f28651b6ae83..ad247dc71ebb 100644
4366 +--- a/net/rds/send.c
4367 ++++ b/net/rds/send.c
4368 +@@ -946,6 +946,11 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
4369 + ret = rds_cmsg_rdma_map(rs, rm, cmsg);
4370 + if (!ret)
4371 + *allocated_mr = 1;
4372 ++ else if (ret == -ENODEV)
4373 ++ /* Accommodate the get_mr() case which can fail
4374 ++ * if connection isn't established yet.
4375 ++ */
4376 ++ ret = -EAGAIN;
4377 + break;
4378 + case RDS_CMSG_ATOMIC_CSWP:
4379 + case RDS_CMSG_ATOMIC_FADD:
4380 +@@ -988,6 +993,26 @@ static int rds_send_mprds_hash(struct rds_sock *rs, struct rds_connection *conn)
4381 + return hash;
4382 + }
4383 +
4384 ++static int rds_rdma_bytes(struct msghdr *msg, size_t *rdma_bytes)
4385 ++{
4386 ++ struct rds_rdma_args *args;
4387 ++ struct cmsghdr *cmsg;
4388 ++
4389 ++ for_each_cmsghdr(cmsg, msg) {
4390 ++ if (!CMSG_OK(msg, cmsg))
4391 ++ return -EINVAL;
4392 ++
4393 ++ if (cmsg->cmsg_level != SOL_RDS)
4394 ++ continue;
4395 ++
4396 ++ if (cmsg->cmsg_type == RDS_CMSG_RDMA_ARGS) {
4397 ++ args = CMSG_DATA(cmsg);
4398 ++ *rdma_bytes += args->remote_vec.bytes;
4399 ++ }
4400 ++ }
4401 ++ return 0;
4402 ++}
4403 ++
4404 + int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
4405 + {
4406 + struct sock *sk = sock->sk;
4407 +@@ -1002,6 +1027,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
4408 + int nonblock = msg->msg_flags & MSG_DONTWAIT;
4409 + long timeo = sock_sndtimeo(sk, nonblock);
4410 + struct rds_conn_path *cpath;
4411 ++ size_t total_payload_len = payload_len, rdma_payload_len = 0;
4412 +
4413 + /* Mirror Linux UDP mirror of BSD error message compatibility */
4414 + /* XXX: Perhaps MSG_MORE someday */
4415 +@@ -1034,6 +1060,16 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
4416 + }
4417 + release_sock(sk);
4418 +
4419 ++ ret = rds_rdma_bytes(msg, &rdma_payload_len);
4420 ++ if (ret)
4421 ++ goto out;
4422 ++
4423 ++ total_payload_len += rdma_payload_len;
4424 ++ if (max_t(size_t, payload_len, rdma_payload_len) > RDS_MAX_MSG_SIZE) {
4425 ++ ret = -EMSGSIZE;
4426 ++ goto out;
4427 ++ }
4428 ++
4429 + if (payload_len > rds_sk_sndbuf(rs)) {
4430 + ret = -EMSGSIZE;
4431 + goto out;
4432 +@@ -1083,8 +1119,12 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
4433 +
4434 + /* Parse any control messages the user may have included. */
4435 + ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
4436 +- if (ret)
4437 ++ if (ret) {
4438 ++ /* Trigger connection so that its ready for the next retry */
4439 ++ if (ret == -EAGAIN)
4440 ++ rds_conn_connect_if_down(conn);
4441 + goto out;
4442 ++ }
4443 +
4444 + if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
4445 + printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
4446 +diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
4447 +index 8a398b3fb532..2f633eec6b7a 100644
4448 +--- a/net/vmw_vsock/af_vsock.c
4449 ++++ b/net/vmw_vsock/af_vsock.c
4450 +@@ -1524,8 +1524,7 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
4451 + long timeout;
4452 + int err;
4453 + struct vsock_transport_send_notify_data send_data;
4454 +-
4455 +- DEFINE_WAIT(wait);
4456 ++ DEFINE_WAIT_FUNC(wait, woken_wake_function);
4457 +
4458 + sk = sock->sk;
4459 + vsk = vsock_sk(sk);
4460 +@@ -1568,11 +1567,10 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
4461 + if (err < 0)
4462 + goto out;
4463 +
4464 +-
4465 + while (total_written < len) {
4466 + ssize_t written;
4467 +
4468 +- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
4469 ++ add_wait_queue(sk_sleep(sk), &wait);
4470 + while (vsock_stream_has_space(vsk) == 0 &&
4471 + sk->sk_err == 0 &&
4472 + !(sk->sk_shutdown & SEND_SHUTDOWN) &&
4473 +@@ -1581,33 +1579,30 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
4474 + /* Don't wait for non-blocking sockets. */
4475 + if (timeout == 0) {
4476 + err = -EAGAIN;
4477 +- finish_wait(sk_sleep(sk), &wait);
4478 ++ remove_wait_queue(sk_sleep(sk), &wait);
4479 + goto out_err;
4480 + }
4481 +
4482 + err = transport->notify_send_pre_block(vsk, &send_data);
4483 + if (err < 0) {
4484 +- finish_wait(sk_sleep(sk), &wait);
4485 ++ remove_wait_queue(sk_sleep(sk), &wait);
4486 + goto out_err;
4487 + }
4488 +
4489 + release_sock(sk);
4490 +- timeout = schedule_timeout(timeout);
4491 ++ timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout);
4492 + lock_sock(sk);
4493 + if (signal_pending(current)) {
4494 + err = sock_intr_errno(timeout);
4495 +- finish_wait(sk_sleep(sk), &wait);
4496 ++ remove_wait_queue(sk_sleep(sk), &wait);
4497 + goto out_err;
4498 + } else if (timeout == 0) {
4499 + err = -EAGAIN;
4500 +- finish_wait(sk_sleep(sk), &wait);
4501 ++ remove_wait_queue(sk_sleep(sk), &wait);
4502 + goto out_err;
4503 + }
4504 +-
4505 +- prepare_to_wait(sk_sleep(sk), &wait,
4506 +- TASK_INTERRUPTIBLE);
4507 + }
4508 +- finish_wait(sk_sleep(sk), &wait);
4509 ++ remove_wait_queue(sk_sleep(sk), &wait);
4510 +
4511 + /* These checks occur both as part of and after the loop
4512 + * conditional since we need to check before and after
4513 +diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
4514 +index 7f0598b32f13..c80d80e312e3 100644
4515 +--- a/sound/core/pcm_lib.c
4516 ++++ b/sound/core/pcm_lib.c
4517 +@@ -264,8 +264,10 @@ static void update_audio_tstamp(struct snd_pcm_substream *substream,
4518 + runtime->rate);
4519 + *audio_tstamp = ns_to_timespec(audio_nsecs);
4520 + }
4521 +- runtime->status->audio_tstamp = *audio_tstamp;
4522 +- runtime->status->tstamp = *curr_tstamp;
4523 ++ if (!timespec_equal(&runtime->status->audio_tstamp, audio_tstamp)) {
4524 ++ runtime->status->audio_tstamp = *audio_tstamp;
4525 ++ runtime->status->tstamp = *curr_tstamp;
4526 ++ }
4527 +
4528 + /*
4529 + * re-take a driver timestamp to let apps detect if the reference tstamp
4530 +diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c
4531 +index 59127b6ef39e..e00f7e399e46 100644
4532 +--- a/sound/core/timer_compat.c
4533 ++++ b/sound/core/timer_compat.c
4534 +@@ -66,11 +66,11 @@ static int snd_timer_user_info_compat(struct file *file,
4535 + struct snd_timer *t;
4536 +
4537 + tu = file->private_data;
4538 +- if (snd_BUG_ON(!tu->timeri))
4539 +- return -ENXIO;
4540 ++ if (!tu->timeri)
4541 ++ return -EBADFD;
4542 + t = tu->timeri->timer;
4543 +- if (snd_BUG_ON(!t))
4544 +- return -ENXIO;
4545 ++ if (!t)
4546 ++ return -EBADFD;
4547 + memset(&info, 0, sizeof(info));
4548 + info.card = t->card ? t->card->number : -1;
4549 + if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
4550 +@@ -99,8 +99,8 @@ static int snd_timer_user_status_compat(struct file *file,
4551 + struct snd_timer_status32 status;
4552 +
4553 + tu = file->private_data;
4554 +- if (snd_BUG_ON(!tu->timeri))
4555 +- return -ENXIO;
4556 ++ if (!tu->timeri)
4557 ++ return -EBADFD;
4558 + memset(&status, 0, sizeof(status));
4559 + status.tstamp.tv_sec = tu->tstamp.tv_sec;
4560 + status.tstamp.tv_nsec = tu->tstamp.tv_nsec;
4561 +diff --git a/sound/hda/hdmi_chmap.c b/sound/hda/hdmi_chmap.c
4562 +index 81acc20c2535..f21633cd9b38 100644
4563 +--- a/sound/hda/hdmi_chmap.c
4564 ++++ b/sound/hda/hdmi_chmap.c
4565 +@@ -746,7 +746,7 @@ static int hdmi_chmap_ctl_get(struct snd_kcontrol *kcontrol,
4566 + memset(pcm_chmap, 0, sizeof(pcm_chmap));
4567 + chmap->ops.get_chmap(chmap->hdac, pcm_idx, pcm_chmap);
4568 +
4569 +- for (i = 0; i < sizeof(chmap); i++)
4570 ++ for (i = 0; i < ARRAY_SIZE(pcm_chmap); i++)
4571 + ucontrol->value.integer.value[i] = pcm_chmap[i];
4572 +
4573 + return 0;
4574 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
4575 +index 5cb7e04fa4ba..293f3f213776 100644
4576 +--- a/sound/pci/hda/hda_intel.c
4577 ++++ b/sound/pci/hda/hda_intel.c
4578 +@@ -2305,6 +2305,9 @@ static const struct pci_device_id azx_ids[] = {
4579 + /* AMD Hudson */
4580 + { PCI_DEVICE(0x1022, 0x780d),
4581 + .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
4582 ++ /* AMD Raven */
4583 ++ { PCI_DEVICE(0x1022, 0x15e3),
4584 ++ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
4585 + /* ATI HDMI */
4586 + { PCI_DEVICE(0x1002, 0x0002),
4587 + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
4588 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4589 +index 80c40a1b8b65..d7fa7373cb94 100644
4590 +--- a/sound/pci/hda/patch_realtek.c
4591 ++++ b/sound/pci/hda/patch_realtek.c
4592 +@@ -4419,7 +4419,7 @@ static void alc_no_shutup(struct hda_codec *codec)
4593 + static void alc_fixup_no_shutup(struct hda_codec *codec,
4594 + const struct hda_fixup *fix, int action)
4595 + {
4596 +- if (action == HDA_FIXUP_ACT_PRE_PROBE) {
4597 ++ if (action == HDA_FIXUP_ACT_PROBE) {
4598 + struct alc_spec *spec = codec->spec;
4599 + spec->shutup = alc_no_shutup;
4600 + }
4601 +@@ -6272,7 +6272,7 @@ static int patch_alc269(struct hda_codec *codec)
4602 + case 0x10ec0703:
4603 + spec->codec_variant = ALC269_TYPE_ALC700;
4604 + spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
4605 +- alc_update_coef_idx(codec, 0x4a, 0, 1 << 15); /* Combo jack auto trigger control */
4606 ++ alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
4607 + break;
4608 +
4609 + }
4610 +diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
4611 +index 3bdd81930486..757af795cebd 100644
4612 +--- a/sound/soc/codecs/wm_adsp.c
4613 ++++ b/sound/soc/codecs/wm_adsp.c
4614 +@@ -1365,7 +1365,7 @@ static int wm_adsp_load(struct wm_adsp *dsp)
4615 + const struct wmfw_region *region;
4616 + const struct wm_adsp_region *mem;
4617 + const char *region_name;
4618 +- char *file, *text;
4619 ++ char *file, *text = NULL;
4620 + struct wm_adsp_buf *buf;
4621 + unsigned int reg;
4622 + int regions = 0;
4623 +@@ -1526,10 +1526,21 @@ static int wm_adsp_load(struct wm_adsp *dsp)
4624 + regions, le32_to_cpu(region->len), offset,
4625 + region_name);
4626 +
4627 ++ if ((pos + le32_to_cpu(region->len) + sizeof(*region)) >
4628 ++ firmware->size) {
4629 ++ adsp_err(dsp,
4630 ++ "%s.%d: %s region len %d bytes exceeds file length %zu\n",
4631 ++ file, regions, region_name,
4632 ++ le32_to_cpu(region->len), firmware->size);
4633 ++ ret = -EINVAL;
4634 ++ goto out_fw;
4635 ++ }
4636 ++
4637 + if (text) {
4638 + memcpy(text, region->data, le32_to_cpu(region->len));
4639 + adsp_info(dsp, "%s: %s\n", file, text);
4640 + kfree(text);
4641 ++ text = NULL;
4642 + }
4643 +
4644 + if (reg) {
4645 +@@ -1574,6 +1585,7 @@ static int wm_adsp_load(struct wm_adsp *dsp)
4646 + regmap_async_complete(regmap);
4647 + wm_adsp_buf_free(&buf_list);
4648 + release_firmware(firmware);
4649 ++ kfree(text);
4650 + out:
4651 + kfree(file);
4652 +
4653 +@@ -2054,6 +2066,17 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
4654 + }
4655 +
4656 + if (reg) {
4657 ++ if ((pos + le32_to_cpu(blk->len) + sizeof(*blk)) >
4658 ++ firmware->size) {
4659 ++ adsp_err(dsp,
4660 ++ "%s.%d: %s region len %d bytes exceeds file length %zu\n",
4661 ++ file, blocks, region_name,
4662 ++ le32_to_cpu(blk->len),
4663 ++ firmware->size);
4664 ++ ret = -EINVAL;
4665 ++ goto out_fw;
4666 ++ }
4667 ++
4668 + buf = wm_adsp_buf_alloc(blk->data,
4669 + le32_to_cpu(blk->len),
4670 + &buf_list);
4671 +diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
4672 +index f18141098b50..91b444db575e 100644
4673 +--- a/sound/soc/sh/rcar/core.c
4674 ++++ b/sound/soc/sh/rcar/core.c
4675 +@@ -978,10 +978,8 @@ static int __rsnd_kctrl_new(struct rsnd_mod *mod,
4676 + return -ENOMEM;
4677 +
4678 + ret = snd_ctl_add(card, kctrl);
4679 +- if (ret < 0) {
4680 +- snd_ctl_free_one(kctrl);
4681 ++ if (ret < 0)
4682 + return ret;
4683 +- }
4684 +
4685 + cfg->update = update;
4686 + cfg->card = card;
4687 +diff --git a/sound/usb/clock.c b/sound/usb/clock.c
4688 +index 26dd5f20f149..eb3396ffba4c 100644
4689 +--- a/sound/usb/clock.c
4690 ++++ b/sound/usb/clock.c
4691 +@@ -43,7 +43,7 @@ static struct uac_clock_source_descriptor *
4692 + while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra,
4693 + ctrl_iface->extralen,
4694 + cs, UAC2_CLOCK_SOURCE))) {
4695 +- if (cs->bClockID == clock_id)
4696 ++ if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id)
4697 + return cs;
4698 + }
4699 +
4700 +@@ -59,8 +59,11 @@ static struct uac_clock_selector_descriptor *
4701 + while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra,
4702 + ctrl_iface->extralen,
4703 + cs, UAC2_CLOCK_SELECTOR))) {
4704 +- if (cs->bClockID == clock_id)
4705 ++ if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id) {
4706 ++ if (cs->bLength < 5 + cs->bNrInPins)
4707 ++ return NULL;
4708 + return cs;
4709 ++ }
4710 + }
4711 +
4712 + return NULL;
4713 +@@ -75,7 +78,7 @@ static struct uac_clock_multiplier_descriptor *
4714 + while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra,
4715 + ctrl_iface->extralen,
4716 + cs, UAC2_CLOCK_MULTIPLIER))) {
4717 +- if (cs->bClockID == clock_id)
4718 ++ if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id)
4719 + return cs;
4720 + }
4721 +
4722 +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
4723 +index d82e3c81c258..9133d3e53d9d 100644
4724 +--- a/sound/usb/mixer.c
4725 ++++ b/sound/usb/mixer.c
4726 +@@ -1463,6 +1463,12 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
4727 + __u8 *bmaControls;
4728 +
4729 + if (state->mixer->protocol == UAC_VERSION_1) {
4730 ++ if (hdr->bLength < 7) {
4731 ++ usb_audio_err(state->chip,
4732 ++ "unit %u: invalid UAC_FEATURE_UNIT descriptor\n",
4733 ++ unitid);
4734 ++ return -EINVAL;
4735 ++ }
4736 + csize = hdr->bControlSize;
4737 + if (!csize) {
4738 + usb_audio_dbg(state->chip,
4739 +@@ -1480,6 +1486,12 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
4740 + }
4741 + } else {
4742 + struct uac2_feature_unit_descriptor *ftr = _ftr;
4743 ++ if (hdr->bLength < 6) {
4744 ++ usb_audio_err(state->chip,
4745 ++ "unit %u: invalid UAC_FEATURE_UNIT descriptor\n",
4746 ++ unitid);
4747 ++ return -EINVAL;
4748 ++ }
4749 + csize = 4;
4750 + channels = (hdr->bLength - 6) / 4 - 1;
4751 + bmaControls = ftr->bmaControls;
4752 +@@ -2080,7 +2092,8 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
4753 + const struct usbmix_name_map *map;
4754 + char **namelist;
4755 +
4756 +- if (!desc->bNrInPins || desc->bLength < 5 + desc->bNrInPins) {
4757 ++ if (desc->bLength < 5 || !desc->bNrInPins ||
4758 ++ desc->bLength < 5 + desc->bNrInPins) {
4759 + usb_audio_err(state->chip,
4760 + "invalid SELECTOR UNIT descriptor %d\n", unitid);
4761 + return -EINVAL;