Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Tue, 03 Jul 2018 13:16:39
Message-Id: 1530623779.64c66e521226c8e9be72c2770e227c5c2ca7d7b1.mpagano@gentoo
1 commit: 64c66e521226c8e9be72c2770e227c5c2ca7d7b1
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Jul 3 13:16:19 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Jul 3 13:16:19 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=64c66e52
7
8 Linux patch 4.9.111
9
10 0000_README | 4 +
11 1110_linux-4.9.111.patch | 3042 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 3046 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index da069fc..59417ea 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -483,6 +483,10 @@ Patch: 1109_linux-4.9.110.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.9.110
21
22 +Patch: 1110_linux-4.9.111.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.9.111
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1110_linux-4.9.111.patch b/1110_linux-4.9.111.patch
31 new file mode 100644
32 index 0000000..2464427
33 --- /dev/null
34 +++ b/1110_linux-4.9.111.patch
35 @@ -0,0 +1,3042 @@
36 +diff --git a/Documentation/printk-formats.txt b/Documentation/printk-formats.txt
37 +index 5962949944fd..d2fbeeb29582 100644
38 +--- a/Documentation/printk-formats.txt
39 ++++ b/Documentation/printk-formats.txt
40 +@@ -279,11 +279,10 @@ struct clk:
41 +
42 + %pC pll1
43 + %pCn pll1
44 +- %pCr 1560000000
45 +
46 + For printing struct clk structures. '%pC' and '%pCn' print the name
47 + (Common Clock Framework) or address (legacy clock framework) of the
48 +- structure; '%pCr' prints the current clock rate.
49 ++ structure.
50 +
51 + Passed by reference.
52 +
53 +diff --git a/Makefile b/Makefile
54 +index 2fcfe1147eaa..b10646531fcd 100644
55 +--- a/Makefile
56 ++++ b/Makefile
57 +@@ -1,6 +1,6 @@
58 + VERSION = 4
59 + PATCHLEVEL = 9
60 +-SUBLEVEL = 110
61 ++SUBLEVEL = 111
62 + EXTRAVERSION =
63 + NAME = Roaring Lionus
64 +
65 +diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h
66 +index 0a9d5dd93294..6949c7d4481c 100644
67 +--- a/arch/arm/include/asm/kgdb.h
68 ++++ b/arch/arm/include/asm/kgdb.h
69 +@@ -76,7 +76,7 @@ extern int kgdb_fault_expected;
70 +
71 + #define KGDB_MAX_NO_CPUS 1
72 + #define BUFMAX 400
73 +-#define NUMREGBYTES (DBG_MAX_REG_NUM << 2)
74 ++#define NUMREGBYTES (GDB_MAX_REGS << 2)
75 + #define NUMCRITREGBYTES (32 << 2)
76 +
77 + #define _R0 0
78 +diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
79 +index 7959d2c92010..625c2b240ffb 100644
80 +--- a/arch/arm64/kernel/cpufeature.c
81 ++++ b/arch/arm64/kernel/cpufeature.c
82 +@@ -826,7 +826,7 @@ static int __init parse_kpti(char *str)
83 + __kpti_forced = enabled ? 1 : -1;
84 + return 0;
85 + }
86 +-__setup("kpti=", parse_kpti);
87 ++early_param("kpti", parse_kpti);
88 + #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
89 +
90 + static const struct arm64_cpu_capabilities arm64_features[] = {
91 +diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
92 +index 66cce2138f95..18d96d349a8b 100644
93 +--- a/arch/arm64/mm/proc.S
94 ++++ b/arch/arm64/mm/proc.S
95 +@@ -186,8 +186,9 @@ ENDPROC(idmap_cpu_replace_ttbr1)
96 +
97 + .macro __idmap_kpti_put_pgtable_ent_ng, type
98 + orr \type, \type, #PTE_NG // Same bit for blocks and pages
99 +- str \type, [cur_\()\type\()p] // Update the entry and ensure it
100 +- dc civac, cur_\()\type\()p // is visible to all CPUs.
101 ++ str \type, [cur_\()\type\()p] // Update the entry and ensure
102 ++ dmb sy // that it is visible to all
103 ++ dc civac, cur_\()\type\()p // CPUs.
104 + .endm
105 +
106 + /*
107 +diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c
108 +index 6e4955bc542b..fcd52cefee29 100644
109 +--- a/arch/m68k/mm/kmap.c
110 ++++ b/arch/m68k/mm/kmap.c
111 +@@ -88,7 +88,8 @@ static inline void free_io_area(void *addr)
112 + for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
113 + if (tmp->addr == addr) {
114 + *p = tmp->next;
115 +- __iounmap(tmp->addr, tmp->size);
116 ++ /* remove gap added in get_io_area() */
117 ++ __iounmap(tmp->addr, tmp->size - IO_SIZE);
118 + kfree(tmp);
119 + return;
120 + }
121 +diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
122 +index 6054d49e608e..8c9cbf13d32a 100644
123 +--- a/arch/mips/bcm47xx/setup.c
124 ++++ b/arch/mips/bcm47xx/setup.c
125 +@@ -212,6 +212,12 @@ static int __init bcm47xx_cpu_fixes(void)
126 + */
127 + if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706)
128 + cpu_wait = NULL;
129 ++
130 ++ /*
131 ++ * BCM47XX Erratum "R10: PCIe Transactions Periodically Fail"
132 ++ * Enable ExternalSync for sync instruction to take effect
133 ++ */
134 ++ set_c0_config7(MIPS_CONF7_ES);
135 + break;
136 + #endif
137 + }
138 +diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
139 +index ecabc00c1e66..853b2f4954fa 100644
140 +--- a/arch/mips/include/asm/io.h
141 ++++ b/arch/mips/include/asm/io.h
142 +@@ -412,6 +412,8 @@ static inline type pfx##in##bwlq##p(unsigned long port) \
143 + __val = *__addr; \
144 + slow; \
145 + \
146 ++ /* prevent prefetching of coherent DMA data prematurely */ \
147 ++ rmb(); \
148 + return pfx##ioswab##bwlq(__addr, __val); \
149 + }
150 +
151 +diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
152 +index df78b2ca70eb..22a6782f84f5 100644
153 +--- a/arch/mips/include/asm/mipsregs.h
154 ++++ b/arch/mips/include/asm/mipsregs.h
155 +@@ -663,6 +663,8 @@
156 + #define MIPS_CONF7_WII (_ULCAST_(1) << 31)
157 +
158 + #define MIPS_CONF7_RPS (_ULCAST_(1) << 2)
159 ++/* ExternalSync */
160 ++#define MIPS_CONF7_ES (_ULCAST_(1) << 8)
161 +
162 + #define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
163 + #define MIPS_CONF7_AR (_ULCAST_(1) << 16)
164 +@@ -2641,6 +2643,7 @@ __BUILD_SET_C0(status)
165 + __BUILD_SET_C0(cause)
166 + __BUILD_SET_C0(config)
167 + __BUILD_SET_C0(config5)
168 ++__BUILD_SET_C0(config7)
169 + __BUILD_SET_C0(intcontrol)
170 + __BUILD_SET_C0(intctl)
171 + __BUILD_SET_C0(srsmap)
172 +diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
173 +index 2f7c734771f4..0df911e772ae 100644
174 +--- a/arch/mips/kernel/mcount.S
175 ++++ b/arch/mips/kernel/mcount.S
176 +@@ -116,10 +116,20 @@ ftrace_stub:
177 + NESTED(_mcount, PT_SIZE, ra)
178 + PTR_LA t1, ftrace_stub
179 + PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */
180 +- bne t1, t2, static_trace
181 ++ beq t1, t2, fgraph_trace
182 + nop
183 +
184 ++ MCOUNT_SAVE_REGS
185 ++
186 ++ move a0, ra /* arg1: self return address */
187 ++ jalr t2 /* (1) call *ftrace_trace_function */
188 ++ move a1, AT /* arg2: parent's return address */
189 ++
190 ++ MCOUNT_RESTORE_REGS
191 ++
192 ++fgraph_trace:
193 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER
194 ++ PTR_LA t1, ftrace_stub
195 + PTR_L t3, ftrace_graph_return
196 + bne t1, t3, ftrace_graph_caller
197 + nop
198 +@@ -128,24 +138,11 @@ NESTED(_mcount, PT_SIZE, ra)
199 + bne t1, t3, ftrace_graph_caller
200 + nop
201 + #endif
202 +- b ftrace_stub
203 +-#ifdef CONFIG_32BIT
204 +- addiu sp, sp, 8
205 +-#else
206 +- nop
207 +-#endif
208 +
209 +-static_trace:
210 +- MCOUNT_SAVE_REGS
211 +-
212 +- move a0, ra /* arg1: self return address */
213 +- jalr t2 /* (1) call *ftrace_trace_function */
214 +- move a1, AT /* arg2: parent's return address */
215 +-
216 +- MCOUNT_RESTORE_REGS
217 + #ifdef CONFIG_32BIT
218 + addiu sp, sp, 8
219 + #endif
220 ++
221 + .globl ftrace_stub
222 + ftrace_stub:
223 + RETURN_BACK
224 +diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
225 +index 2dc52e6d2af4..e24ae0fa80ed 100644
226 +--- a/arch/powerpc/kernel/entry_64.S
227 ++++ b/arch/powerpc/kernel/entry_64.S
228 +@@ -586,6 +586,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
229 + * actually hit this code path.
230 + */
231 +
232 ++ isync
233 + slbie r6
234 + slbie r6 /* Workaround POWER5 < DD2.1 issue */
235 + slbmte r7,r0
236 +diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
237 +index 8f0c7c5d93f2..93a6eeba3ace 100644
238 +--- a/arch/powerpc/kernel/fadump.c
239 ++++ b/arch/powerpc/kernel/fadump.c
240 +@@ -1033,6 +1033,9 @@ void fadump_cleanup(void)
241 + init_fadump_mem_struct(&fdm,
242 + be64_to_cpu(fdm_active->cpu_state_data.destination_address));
243 + fadump_invalidate_dump(&fdm);
244 ++ } else if (fw_dump.dump_registered) {
245 ++ /* Un-register Firmware-assisted dump if it was registered. */
246 ++ fadump_unregister_dump(&fdm);
247 + }
248 + }
249 +
250 +diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
251 +index 469d86d1c2a5..532c585ec24b 100644
252 +--- a/arch/powerpc/kernel/hw_breakpoint.c
253 ++++ b/arch/powerpc/kernel/hw_breakpoint.c
254 +@@ -175,8 +175,8 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
255 + if (cpu_has_feature(CPU_FTR_DAWR)) {
256 + length_max = 512 ; /* 64 doublewords */
257 + /* DAWR region can't cross 512 boundary */
258 +- if ((bp->attr.bp_addr >> 10) !=
259 +- ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 10))
260 ++ if ((bp->attr.bp_addr >> 9) !=
261 ++ ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 9))
262 + return -EINVAL;
263 + }
264 + if (info->len >
265 +diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
266 +index d97370866a5f..adfa63e7df8c 100644
267 +--- a/arch/powerpc/kernel/ptrace.c
268 ++++ b/arch/powerpc/kernel/ptrace.c
269 +@@ -2380,6 +2380,7 @@ static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
270 + /* Create a new breakpoint request if one doesn't exist already */
271 + hw_breakpoint_init(&attr);
272 + attr.bp_addr = hw_brk.address;
273 ++ attr.bp_len = 8;
274 + arch_bp_generic_fields(hw_brk.type,
275 + &attr.bp_type);
276 +
277 +diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
278 +index f602307a4386..9ed90c502005 100644
279 +--- a/arch/powerpc/platforms/powernv/pci-ioda.c
280 ++++ b/arch/powerpc/platforms/powernv/pci-ioda.c
281 +@@ -3424,7 +3424,6 @@ static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe)
282 + WARN_ON(pe->table_group.group);
283 + }
284 +
285 +- pnv_pci_ioda2_table_free_pages(tbl);
286 + iommu_free_table(tbl, "pnv");
287 + }
288 +
289 +diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
290 +index 78d1c6a3d221..eb53c2c78a1f 100644
291 +--- a/arch/x86/include/asm/barrier.h
292 ++++ b/arch/x86/include/asm/barrier.h
293 +@@ -37,7 +37,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
294 + {
295 + unsigned long mask;
296 +
297 +- asm ("cmp %1,%2; sbb %0,%0;"
298 ++ asm volatile ("cmp %1,%2; sbb %0,%0;"
299 + :"=r" (mask)
300 + :"g"(size),"r" (index)
301 + :"cc");
302 +diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
303 +index f46071cb2c90..3e0199ee5a2f 100644
304 +--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
305 ++++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
306 +@@ -143,6 +143,11 @@ static struct severity {
307 + SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
308 + USER
309 + ),
310 ++ MCESEV(
311 ++ PANIC, "Data load in unrecoverable area of kernel",
312 ++ SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
313 ++ KERNEL
314 ++ ),
315 + #endif
316 + MCESEV(
317 + PANIC, "Action required: unknown MCACOD",
318 +diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
319 +index 7bbd50fa72ad..c49e146d4332 100644
320 +--- a/arch/x86/kernel/cpu/mcheck/mce.c
321 ++++ b/arch/x86/kernel/cpu/mcheck/mce.c
322 +@@ -738,23 +738,25 @@ EXPORT_SYMBOL_GPL(machine_check_poll);
323 + static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
324 + struct pt_regs *regs)
325 + {
326 +- int i, ret = 0;
327 + char *tmp;
328 ++ int i;
329 +
330 + for (i = 0; i < mca_cfg.banks; i++) {
331 + m->status = mce_rdmsrl(msr_ops.status(i));
332 +- if (m->status & MCI_STATUS_VAL) {
333 +- __set_bit(i, validp);
334 +- if (quirk_no_way_out)
335 +- quirk_no_way_out(i, m, regs);
336 +- }
337 ++ if (!(m->status & MCI_STATUS_VAL))
338 ++ continue;
339 ++
340 ++ __set_bit(i, validp);
341 ++ if (quirk_no_way_out)
342 ++ quirk_no_way_out(i, m, regs);
343 +
344 + if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
345 ++ mce_read_aux(m, i);
346 + *msg = tmp;
347 +- ret = 1;
348 ++ return 1;
349 + }
350 + }
351 +- return ret;
352 ++ return 0;
353 + }
354 +
355 + /*
356 +@@ -1140,13 +1142,18 @@ void do_machine_check(struct pt_regs *regs, long error_code)
357 + lmce = m.mcgstatus & MCG_STATUS_LMCES;
358 +
359 + /*
360 ++ * Local machine check may already know that we have to panic.
361 ++ * Broadcast machine check begins rendezvous in mce_start()
362 + * Go through all banks in exclusion of the other CPUs. This way we
363 + * don't report duplicated events on shared banks because the first one
364 +- * to see it will clear it. If this is a Local MCE, then no need to
365 +- * perform rendezvous.
366 ++ * to see it will clear it.
367 + */
368 +- if (!lmce)
369 ++ if (lmce) {
370 ++ if (no_way_out)
371 ++ mce_panic("Fatal local machine check", &m, msg);
372 ++ } else {
373 + order = mce_start(&no_way_out);
374 ++ }
375 +
376 + for (i = 0; i < cfg->banks; i++) {
377 + __clear_bit(i, toclear);
378 +@@ -1222,12 +1229,17 @@ void do_machine_check(struct pt_regs *regs, long error_code)
379 + no_way_out = worst >= MCE_PANIC_SEVERITY;
380 + } else {
381 + /*
382 +- * Local MCE skipped calling mce_reign()
383 +- * If we found a fatal error, we need to panic here.
384 ++ * If there was a fatal machine check we should have
385 ++ * already called mce_panic earlier in this function.
386 ++ * Since we re-read the banks, we might have found
387 ++ * something new. Check again to see if we found a
388 ++ * fatal error. We call "mce_severity()" again to
389 ++ * make sure we have the right "msg".
390 + */
391 +- if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
392 +- mce_panic("Machine check from unknown source",
393 +- NULL, NULL);
394 ++ if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) {
395 ++ mce_severity(&m, cfg->tolerant, &msg, true);
396 ++ mce_panic("Local fatal machine check!", &m, msg);
397 ++ }
398 + }
399 +
400 + /*
401 +diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
402 +index 0bee04d41bed..b57100a2c834 100644
403 +--- a/arch/x86/kernel/quirks.c
404 ++++ b/arch/x86/kernel/quirks.c
405 +@@ -643,12 +643,19 @@ static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev)
406 + /* Skylake */
407 + static void quirk_intel_purley_xeon_ras_cap(struct pci_dev *pdev)
408 + {
409 +- u32 capid0;
410 ++ u32 capid0, capid5;
411 +
412 + pci_read_config_dword(pdev, 0x84, &capid0);
413 ++ pci_read_config_dword(pdev, 0x98, &capid5);
414 +
415 +- if ((capid0 & 0xc0) == 0xc0)
416 ++ /*
417 ++ * CAPID0{7:6} indicate whether this is an advanced RAS SKU
418 ++ * CAPID5{8:5} indicate that various NVDIMM usage modes are
419 ++ * enabled, so memory machine check recovery is also enabled.
420 ++ */
421 ++ if ((capid0 & 0xc0) == 0xc0 || (capid5 & 0x1e0))
422 + static_branch_inc(&mcsafe_key);
423 ++
424 + }
425 + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap);
426 + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_ras_cap);
427 +diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
428 +index f2142932ff0b..5bbfa2f63b8c 100644
429 +--- a/arch/x86/kernel/traps.c
430 ++++ b/arch/x86/kernel/traps.c
431 +@@ -799,16 +799,18 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
432 + char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
433 + "simd exception";
434 +
435 +- if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
436 +- return;
437 + cond_local_irq_enable(regs);
438 +
439 + if (!user_mode(regs)) {
440 +- if (!fixup_exception(regs, trapnr)) {
441 +- task->thread.error_code = error_code;
442 +- task->thread.trap_nr = trapnr;
443 ++ if (fixup_exception(regs, trapnr))
444 ++ return;
445 ++
446 ++ task->thread.error_code = error_code;
447 ++ task->thread.trap_nr = trapnr;
448 ++
449 ++ if (notify_die(DIE_TRAP, str, regs, error_code,
450 ++ trapnr, SIGFPE) != NOTIFY_STOP)
451 + die(str, regs, error_code);
452 +- }
453 + return;
454 + }
455 +
456 +diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
457 +index f92bdb9f4e46..ae9b84cae57c 100644
458 +--- a/arch/x86/mm/init.c
459 ++++ b/arch/x86/mm/init.c
460 +@@ -653,7 +653,9 @@ void __init init_mem_mapping(void)
461 + */
462 + int devmem_is_allowed(unsigned long pagenr)
463 + {
464 +- if (page_is_ram(pagenr)) {
465 ++ if (region_intersects(PFN_PHYS(pagenr), PAGE_SIZE,
466 ++ IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
467 ++ != REGION_DISJOINT) {
468 + /*
469 + * For disallowed memory regions in the low 1MB range,
470 + * request that the page be shown as all zeros.
471 +diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
472 +index ce37d5b899fe..44bd9a377ad1 100644
473 +--- a/arch/xtensa/kernel/traps.c
474 ++++ b/arch/xtensa/kernel/traps.c
475 +@@ -334,7 +334,7 @@ do_unaligned_user (struct pt_regs *regs)
476 + info.si_errno = 0;
477 + info.si_code = BUS_ADRALN;
478 + info.si_addr = (void *) regs->excvaddr;
479 +- force_sig_info(SIGSEGV, &info, current);
480 ++ force_sig_info(SIGBUS, &info, current);
481 +
482 + }
483 + #endif
484 +diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
485 +index ce2df8c9c583..7e6a43ffdcbe 100644
486 +--- a/crypto/asymmetric_keys/x509_cert_parser.c
487 ++++ b/crypto/asymmetric_keys/x509_cert_parser.c
488 +@@ -249,6 +249,15 @@ int x509_note_signature(void *context, size_t hdrlen,
489 + return -EINVAL;
490 + }
491 +
492 ++ if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0) {
493 ++ /* Discard the BIT STRING metadata */
494 ++ if (vlen < 1 || *(const u8 *)value != 0)
495 ++ return -EBADMSG;
496 ++
497 ++ value++;
498 ++ vlen--;
499 ++ }
500 ++
501 + ctx->cert->raw_sig = value;
502 + ctx->cert->raw_sig_size = vlen;
503 + return 0;
504 +diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
505 +index 343cad9bf7e7..ef3016a467a0 100644
506 +--- a/drivers/block/rbd.c
507 ++++ b/drivers/block/rbd.c
508 +@@ -3900,7 +3900,6 @@ static void cancel_tasks_sync(struct rbd_device *rbd_dev)
509 + {
510 + dout("%s rbd_dev %p\n", __func__, rbd_dev);
511 +
512 +- cancel_delayed_work_sync(&rbd_dev->watch_dwork);
513 + cancel_work_sync(&rbd_dev->acquired_lock_work);
514 + cancel_work_sync(&rbd_dev->released_lock_work);
515 + cancel_delayed_work_sync(&rbd_dev->lock_dwork);
516 +@@ -3918,6 +3917,7 @@ static void rbd_unregister_watch(struct rbd_device *rbd_dev)
517 + rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
518 + mutex_unlock(&rbd_dev->watch_mutex);
519 +
520 ++ cancel_delayed_work_sync(&rbd_dev->watch_dwork);
521 + ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
522 + }
523 +
524 +diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
525 +index 74b2f4a14643..3a8b9aef96a6 100644
526 +--- a/drivers/bluetooth/hci_qca.c
527 ++++ b/drivers/bluetooth/hci_qca.c
528 +@@ -939,6 +939,12 @@ static int qca_setup(struct hci_uart *hu)
529 + } else if (ret == -ENOENT) {
530 + /* No patch/nvm-config found, run with original fw/config */
531 + ret = 0;
532 ++ } else if (ret == -EAGAIN) {
533 ++ /*
534 ++ * Userspace firmware loader will return -EAGAIN in case no
535 ++ * patch/nvm-config is found, so run with original fw/config.
536 ++ */
537 ++ ret = 0;
538 + }
539 +
540 + /* Setup bdaddr */
541 +diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
542 +index feafdab734ae..4835b588b783 100644
543 +--- a/drivers/char/ipmi/ipmi_bt_sm.c
544 ++++ b/drivers/char/ipmi/ipmi_bt_sm.c
545 +@@ -522,11 +522,12 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
546 + if (status & BT_H_BUSY) /* clear a leftover H_BUSY */
547 + BT_CONTROL(BT_H_BUSY);
548 +
549 ++ bt->timeout = bt->BT_CAP_req2rsp;
550 ++
551 + /* Read BT capabilities if it hasn't been done yet */
552 + if (!bt->BT_CAP_outreqs)
553 + BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN,
554 + SI_SM_CALL_WITHOUT_DELAY);
555 +- bt->timeout = bt->BT_CAP_req2rsp;
556 + BT_SI_SM_RETURN(SI_SM_IDLE);
557 +
558 + case BT_STATE_XACTION_START:
559 +diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c
560 +index 45ad168e1496..2bb2551c6245 100644
561 +--- a/drivers/clk/at91/clk-pll.c
562 ++++ b/drivers/clk/at91/clk-pll.c
563 +@@ -132,19 +132,8 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
564 + unsigned long parent_rate)
565 + {
566 + struct clk_pll *pll = to_clk_pll(hw);
567 +- unsigned int pllr;
568 +- u16 mul;
569 +- u8 div;
570 +-
571 +- regmap_read(pll->regmap, PLL_REG(pll->id), &pllr);
572 +-
573 +- div = PLL_DIV(pllr);
574 +- mul = PLL_MUL(pllr, pll->layout);
575 +-
576 +- if (!div || !mul)
577 +- return 0;
578 +
579 +- return (parent_rate / div) * (mul + 1);
580 ++ return (parent_rate / pll->div) * (pll->mul + 1);
581 + }
582 +
583 + static long clk_pll_get_best_div_mul(struct clk_pll *pll, unsigned long rate,
584 +diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
585 +index 25c41cd9cdfc..7ecc5eac3d7f 100644
586 +--- a/drivers/clk/renesas/renesas-cpg-mssr.c
587 ++++ b/drivers/clk/renesas/renesas-cpg-mssr.c
588 +@@ -243,8 +243,9 @@ struct clk *cpg_mssr_clk_src_twocell_get(struct of_phandle_args *clkspec,
589 + dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
590 + PTR_ERR(clk));
591 + else
592 +- dev_dbg(dev, "clock (%u, %u) is %pC at %pCr Hz\n",
593 +- clkspec->args[0], clkspec->args[1], clk, clk);
594 ++ dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
595 ++ clkspec->args[0], clkspec->args[1], clk,
596 ++ clk_get_rate(clk));
597 + return clk;
598 + }
599 +
600 +@@ -304,7 +305,7 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
601 + if (IS_ERR_OR_NULL(clk))
602 + goto fail;
603 +
604 +- dev_dbg(dev, "Core clock %pC at %pCr Hz\n", clk, clk);
605 ++ dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
606 + priv->clks[id] = clk;
607 + return;
608 +
609 +@@ -372,7 +373,7 @@ static void __init cpg_mssr_register_mod_clk(const struct mssr_mod_clk *mod,
610 + if (IS_ERR(clk))
611 + goto fail;
612 +
613 +- dev_dbg(dev, "Module clock %pC at %pCr Hz\n", clk, clk);
614 ++ dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
615 + priv->clks[id] = clk;
616 + return;
617 +
618 +diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
619 +index 854a56781100..fd96af1d2ef0 100644
620 +--- a/drivers/cpuidle/cpuidle-powernv.c
621 ++++ b/drivers/cpuidle/cpuidle-powernv.c
622 +@@ -32,9 +32,31 @@ static struct cpuidle_state *cpuidle_state_table;
623 +
624 + static u64 stop_psscr_table[CPUIDLE_STATE_MAX];
625 +
626 +-static u64 snooze_timeout;
627 ++static u64 default_snooze_timeout;
628 + static bool snooze_timeout_en;
629 +
630 ++static u64 get_snooze_timeout(struct cpuidle_device *dev,
631 ++ struct cpuidle_driver *drv,
632 ++ int index)
633 ++{
634 ++ int i;
635 ++
636 ++ if (unlikely(!snooze_timeout_en))
637 ++ return default_snooze_timeout;
638 ++
639 ++ for (i = index + 1; i < drv->state_count; i++) {
640 ++ struct cpuidle_state *s = &drv->states[i];
641 ++ struct cpuidle_state_usage *su = &dev->states_usage[i];
642 ++
643 ++ if (s->disabled || su->disable)
644 ++ continue;
645 ++
646 ++ return s->target_residency * tb_ticks_per_usec;
647 ++ }
648 ++
649 ++ return default_snooze_timeout;
650 ++}
651 ++
652 + static int snooze_loop(struct cpuidle_device *dev,
653 + struct cpuidle_driver *drv,
654 + int index)
655 +@@ -44,7 +66,7 @@ static int snooze_loop(struct cpuidle_device *dev,
656 + local_irq_enable();
657 + set_thread_flag(TIF_POLLING_NRFLAG);
658 +
659 +- snooze_exit_time = get_tb() + snooze_timeout;
660 ++ snooze_exit_time = get_tb() + get_snooze_timeout(dev, drv, index);
661 + ppc64_runlatch_off();
662 + while (!need_resched()) {
663 + HMT_low();
664 +@@ -337,11 +359,9 @@ static int powernv_idle_probe(void)
665 + cpuidle_state_table = powernv_states;
666 + /* Device tree can indicate more idle states */
667 + max_idle_state = powernv_add_idle_states();
668 +- if (max_idle_state > 1) {
669 ++ default_snooze_timeout = TICK_USEC * tb_ticks_per_usec;
670 ++ if (max_idle_state > 1)
671 + snooze_timeout_en = true;
672 +- snooze_timeout = powernv_states[1].target_residency *
673 +- tb_ticks_per_usec;
674 +- }
675 + } else
676 + return -ENODEV;
677 +
678 +diff --git a/drivers/iio/buffer/kfifo_buf.c b/drivers/iio/buffer/kfifo_buf.c
679 +index 7ef9b13262a8..e44181f9eb36 100644
680 +--- a/drivers/iio/buffer/kfifo_buf.c
681 ++++ b/drivers/iio/buffer/kfifo_buf.c
682 +@@ -19,7 +19,7 @@ struct iio_kfifo {
683 + #define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer)
684 +
685 + static inline int __iio_allocate_kfifo(struct iio_kfifo *buf,
686 +- int bytes_per_datum, int length)
687 ++ size_t bytes_per_datum, unsigned int length)
688 + {
689 + if ((length == 0) || (bytes_per_datum == 0))
690 + return -EINVAL;
691 +@@ -71,7 +71,7 @@ static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd)
692 + return 0;
693 + }
694 +
695 +-static int iio_set_length_kfifo(struct iio_buffer *r, int length)
696 ++static int iio_set_length_kfifo(struct iio_buffer *r, unsigned int length)
697 + {
698 + /* Avoid an invalid state */
699 + if (length < 2)
700 +diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
701 +index a3279f3d2578..a79d9b340cfa 100644
702 +--- a/drivers/infiniband/hw/hfi1/hfi.h
703 ++++ b/drivers/infiniband/hw/hfi1/hfi.h
704 +@@ -1631,6 +1631,7 @@ struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd)
705 + #define HFI1_HAS_SDMA_TIMEOUT 0x8
706 + #define HFI1_HAS_SEND_DMA 0x10 /* Supports Send DMA */
707 + #define HFI1_FORCED_FREEZE 0x80 /* driver forced freeze mode */
708 ++#define HFI1_SHUTDOWN 0x100 /* device is shutting down */
709 +
710 + /* IB dword length mask in PBC (lower 11 bits); same for all chips */
711 + #define HFI1_PBC_LENGTH_MASK ((1 << 11) - 1)
712 +diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
713 +index ae1f90ddd4e8..c81c44525dd5 100644
714 +--- a/drivers/infiniband/hw/hfi1/init.c
715 ++++ b/drivers/infiniband/hw/hfi1/init.c
716 +@@ -857,6 +857,10 @@ static void shutdown_device(struct hfi1_devdata *dd)
717 + unsigned pidx;
718 + int i;
719 +
720 ++ if (dd->flags & HFI1_SHUTDOWN)
721 ++ return;
722 ++ dd->flags |= HFI1_SHUTDOWN;
723 ++
724 + for (pidx = 0; pidx < dd->num_pports; ++pidx) {
725 + ppd = dd->pport + pidx;
726 +
727 +@@ -1168,6 +1172,7 @@ void hfi1_disable_after_error(struct hfi1_devdata *dd)
728 +
729 + static void remove_one(struct pci_dev *);
730 + static int init_one(struct pci_dev *, const struct pci_device_id *);
731 ++static void shutdown_one(struct pci_dev *);
732 +
733 + #define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: "
734 + #define PFX DRIVER_NAME ": "
735 +@@ -1184,6 +1189,7 @@ static struct pci_driver hfi1_pci_driver = {
736 + .name = DRIVER_NAME,
737 + .probe = init_one,
738 + .remove = remove_one,
739 ++ .shutdown = shutdown_one,
740 + .id_table = hfi1_pci_tbl,
741 + .err_handler = &hfi1_pci_err_handler,
742 + };
743 +@@ -1590,6 +1596,13 @@ static void remove_one(struct pci_dev *pdev)
744 + postinit_cleanup(dd);
745 + }
746 +
747 ++static void shutdown_one(struct pci_dev *pdev)
748 ++{
749 ++ struct hfi1_devdata *dd = pci_get_drvdata(pdev);
750 ++
751 ++ shutdown_device(dd);
752 ++}
753 ++
754 + /**
755 + * hfi1_create_rcvhdrq - create a receive header queue
756 + * @dd: the hfi1_ib device
757 +diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
758 +index 18d309e40f1b..d9323d7c479c 100644
759 +--- a/drivers/infiniband/hw/mlx4/mad.c
760 ++++ b/drivers/infiniband/hw/mlx4/mad.c
761 +@@ -1897,7 +1897,6 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
762 + "buf:%lld\n", wc.wr_id);
763 + break;
764 + default:
765 +- BUG_ON(1);
766 + break;
767 + }
768 + } else {
769 +diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
770 +index fc62a7ded734..a19ebb19952e 100644
771 +--- a/drivers/infiniband/hw/mlx5/cq.c
772 ++++ b/drivers/infiniband/hw/mlx5/cq.c
773 +@@ -645,7 +645,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
774 + }
775 +
776 + static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
777 +- struct ib_wc *wc)
778 ++ struct ib_wc *wc, bool is_fatal_err)
779 + {
780 + struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
781 + struct mlx5_ib_wc *soft_wc, *next;
782 +@@ -658,6 +658,10 @@ static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
783 + mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n",
784 + cq->mcq.cqn);
785 +
786 ++ if (unlikely(is_fatal_err)) {
787 ++ soft_wc->wc.status = IB_WC_WR_FLUSH_ERR;
788 ++ soft_wc->wc.vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
789 ++ }
790 + wc[npolled++] = soft_wc->wc;
791 + list_del(&soft_wc->list);
792 + kfree(soft_wc);
793 +@@ -678,12 +682,17 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
794 +
795 + spin_lock_irqsave(&cq->lock, flags);
796 + if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
797 +- mlx5_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
798 ++ /* make sure no soft wqe's are waiting */
799 ++ if (unlikely(!list_empty(&cq->wc_list)))
800 ++ soft_polled = poll_soft_wc(cq, num_entries, wc, true);
801 ++
802 ++ mlx5_ib_poll_sw_comp(cq, num_entries - soft_polled,
803 ++ wc + soft_polled, &npolled);
804 + goto out;
805 + }
806 +
807 + if (unlikely(!list_empty(&cq->wc_list)))
808 +- soft_polled = poll_soft_wc(cq, num_entries, wc);
809 ++ soft_polled = poll_soft_wc(cq, num_entries, wc, false);
810 +
811 + for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
812 + if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))
813 +diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
814 +index a3e21a25cea5..ef092cca092e 100644
815 +--- a/drivers/infiniband/hw/qib/qib.h
816 ++++ b/drivers/infiniband/hw/qib/qib.h
817 +@@ -1250,6 +1250,7 @@ static inline struct qib_ibport *to_iport(struct ib_device *ibdev, u8 port)
818 + #define QIB_BADINTR 0x8000 /* severe interrupt problems */
819 + #define QIB_DCA_ENABLED 0x10000 /* Direct Cache Access enabled */
820 + #define QIB_HAS_QSFP 0x20000 /* device (card instance) has QSFP */
821 ++#define QIB_SHUTDOWN 0x40000 /* device is shutting down */
822 +
823 + /*
824 + * values for ppd->lflags (_ib_port_ related flags)
825 +@@ -1448,8 +1449,7 @@ u64 qib_sps_ints(void);
826 + /*
827 + * dma_addr wrappers - all 0's invalid for hw
828 + */
829 +-dma_addr_t qib_map_page(struct pci_dev *, struct page *, unsigned long,
830 +- size_t, int);
831 ++int qib_map_page(struct pci_dev *d, struct page *p, dma_addr_t *daddr);
832 + const char *qib_get_unit_name(int unit);
833 + const char *qib_get_card_name(struct rvt_dev_info *rdi);
834 + struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi);
835 +diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
836 +index 382466a90da7..cc6a92316c7c 100644
837 +--- a/drivers/infiniband/hw/qib/qib_file_ops.c
838 ++++ b/drivers/infiniband/hw/qib/qib_file_ops.c
839 +@@ -364,6 +364,8 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
840 + goto done;
841 + }
842 + for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
843 ++ dma_addr_t daddr;
844 ++
845 + for (; ntids--; tid++) {
846 + if (tid == tidcnt)
847 + tid = 0;
848 +@@ -380,12 +382,14 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
849 + ret = -ENOMEM;
850 + break;
851 + }
852 ++ ret = qib_map_page(dd->pcidev, pagep[i], &daddr);
853 ++ if (ret)
854 ++ break;
855 ++
856 + tidlist[i] = tid + tidoff;
857 + /* we "know" system pages and TID pages are same size */
858 + dd->pageshadow[ctxttid + tid] = pagep[i];
859 +- dd->physshadow[ctxttid + tid] =
860 +- qib_map_page(dd->pcidev, pagep[i], 0, PAGE_SIZE,
861 +- PCI_DMA_FROMDEVICE);
862 ++ dd->physshadow[ctxttid + tid] = daddr;
863 + /*
864 + * don't need atomic or it's overhead
865 + */
866 +diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
867 +index 1730aa839a47..caf7c5120b0a 100644
868 +--- a/drivers/infiniband/hw/qib/qib_init.c
869 ++++ b/drivers/infiniband/hw/qib/qib_init.c
870 +@@ -878,6 +878,10 @@ static void qib_shutdown_device(struct qib_devdata *dd)
871 + struct qib_pportdata *ppd;
872 + unsigned pidx;
873 +
874 ++ if (dd->flags & QIB_SHUTDOWN)
875 ++ return;
876 ++ dd->flags |= QIB_SHUTDOWN;
877 ++
878 + for (pidx = 0; pidx < dd->num_pports; ++pidx) {
879 + ppd = dd->pport + pidx;
880 +
881 +@@ -1223,6 +1227,7 @@ void qib_disable_after_error(struct qib_devdata *dd)
882 +
883 + static void qib_remove_one(struct pci_dev *);
884 + static int qib_init_one(struct pci_dev *, const struct pci_device_id *);
885 ++static void qib_shutdown_one(struct pci_dev *);
886 +
887 + #define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: "
888 + #define PFX QIB_DRV_NAME ": "
889 +@@ -1240,6 +1245,7 @@ static struct pci_driver qib_driver = {
890 + .name = QIB_DRV_NAME,
891 + .probe = qib_init_one,
892 + .remove = qib_remove_one,
893 ++ .shutdown = qib_shutdown_one,
894 + .id_table = qib_pci_tbl,
895 + .err_handler = &qib_pci_err_handler,
896 + };
897 +@@ -1591,6 +1597,13 @@ static void qib_remove_one(struct pci_dev *pdev)
898 + qib_postinit_cleanup(dd);
899 + }
900 +
901 ++static void qib_shutdown_one(struct pci_dev *pdev)
902 ++{
903 ++ struct qib_devdata *dd = pci_get_drvdata(pdev);
904 ++
905 ++ qib_shutdown_device(dd);
906 ++}
907 ++
908 + /**
909 + * qib_create_rcvhdrq - create a receive header queue
910 + * @dd: the qlogic_ib device
911 +diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
912 +index 75f08624ac05..4715edff5488 100644
913 +--- a/drivers/infiniband/hw/qib/qib_user_pages.c
914 ++++ b/drivers/infiniband/hw/qib/qib_user_pages.c
915 +@@ -98,23 +98,27 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
916 + *
917 + * I'm sure we won't be so lucky with other iommu's, so FIXME.
918 + */
919 +-dma_addr_t qib_map_page(struct pci_dev *hwdev, struct page *page,
920 +- unsigned long offset, size_t size, int direction)
921 ++int qib_map_page(struct pci_dev *hwdev, struct page *page, dma_addr_t *daddr)
922 + {
923 + dma_addr_t phys;
924 +
925 +- phys = pci_map_page(hwdev, page, offset, size, direction);
926 ++ phys = pci_map_page(hwdev, page, 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
927 ++ if (pci_dma_mapping_error(hwdev, phys))
928 ++ return -ENOMEM;
929 +
930 +- if (phys == 0) {
931 +- pci_unmap_page(hwdev, phys, size, direction);
932 +- phys = pci_map_page(hwdev, page, offset, size, direction);
933 ++ if (!phys) {
934 ++ pci_unmap_page(hwdev, phys, PAGE_SIZE, PCI_DMA_FROMDEVICE);
935 ++ phys = pci_map_page(hwdev, page, 0, PAGE_SIZE,
936 ++ PCI_DMA_FROMDEVICE);
937 ++ if (pci_dma_mapping_error(hwdev, phys))
938 ++ return -ENOMEM;
939 + /*
940 + * FIXME: If we get 0 again, we should keep this page,
941 + * map another, then free the 0 page.
942 + */
943 + }
944 +-
945 +- return phys;
946 ++ *daddr = phys;
947 ++ return 0;
948 + }
949 +
950 + /**
951 +diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
952 +index b879d21b7548..02a5e2d7e574 100644
953 +--- a/drivers/infiniband/ulp/isert/ib_isert.c
954 ++++ b/drivers/infiniband/ulp/isert/ib_isert.c
955 +@@ -879,15 +879,9 @@ isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_des
956 + }
957 +
958 + static void
959 +-isert_create_send_desc(struct isert_conn *isert_conn,
960 +- struct isert_cmd *isert_cmd,
961 +- struct iser_tx_desc *tx_desc)
962 ++__isert_create_send_desc(struct isert_device *device,
963 ++ struct iser_tx_desc *tx_desc)
964 + {
965 +- struct isert_device *device = isert_conn->device;
966 +- struct ib_device *ib_dev = device->ib_device;
967 +-
968 +- ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
969 +- ISER_HEADERS_LEN, DMA_TO_DEVICE);
970 +
971 + memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl));
972 + tx_desc->iser_header.flags = ISCSI_CTRL;
973 +@@ -900,6 +894,20 @@ isert_create_send_desc(struct isert_conn *isert_conn,
974 + }
975 + }
976 +
977 ++static void
978 ++isert_create_send_desc(struct isert_conn *isert_conn,
979 ++ struct isert_cmd *isert_cmd,
980 ++ struct iser_tx_desc *tx_desc)
981 ++{
982 ++ struct isert_device *device = isert_conn->device;
983 ++ struct ib_device *ib_dev = device->ib_device;
984 ++
985 ++ ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
986 ++ ISER_HEADERS_LEN, DMA_TO_DEVICE);
987 ++
988 ++ __isert_create_send_desc(device, tx_desc);
989 ++}
990 ++
991 + static int
992 + isert_init_tx_hdrs(struct isert_conn *isert_conn,
993 + struct iser_tx_desc *tx_desc)
994 +@@ -987,7 +995,7 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
995 + struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc;
996 + int ret;
997 +
998 +- isert_create_send_desc(isert_conn, NULL, tx_desc);
999 ++ __isert_create_send_desc(device, tx_desc);
1000 +
1001 + memcpy(&tx_desc->iscsi_header, &login->rsp[0],
1002 + sizeof(struct iscsi_hdr));
1003 +@@ -2082,7 +2090,7 @@ isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
1004 +
1005 + sig_attrs->check_mask =
1006 + (se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
1007 +- (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
1008 ++ (se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG ? 0x30 : 0) |
1009 + (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
1010 + return 0;
1011 + }
1012 +diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h
1013 +index c0ec26118732..83dd0ce3ad2a 100644
1014 +--- a/drivers/input/mouse/elan_i2c.h
1015 ++++ b/drivers/input/mouse/elan_i2c.h
1016 +@@ -27,6 +27,8 @@
1017 + #define ETP_DISABLE_POWER 0x0001
1018 + #define ETP_PRESSURE_OFFSET 25
1019 +
1020 ++#define ETP_CALIBRATE_MAX_LEN 3
1021 ++
1022 + /* IAP Firmware handling */
1023 + #define ETP_PRODUCT_ID_FORMAT_STRING "%d.0"
1024 + #define ETP_FW_NAME "elan_i2c_" ETP_PRODUCT_ID_FORMAT_STRING ".bin"
1025 +diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
1026 +index aeb8250ab079..97f6e05cffce 100644
1027 +--- a/drivers/input/mouse/elan_i2c_core.c
1028 ++++ b/drivers/input/mouse/elan_i2c_core.c
1029 +@@ -595,7 +595,7 @@ static ssize_t calibrate_store(struct device *dev,
1030 + int tries = 20;
1031 + int retval;
1032 + int error;
1033 +- u8 val[3];
1034 ++ u8 val[ETP_CALIBRATE_MAX_LEN];
1035 +
1036 + retval = mutex_lock_interruptible(&data->sysfs_mutex);
1037 + if (retval)
1038 +@@ -1250,6 +1250,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
1039 + { "ELAN060C", 0 },
1040 + { "ELAN0611", 0 },
1041 + { "ELAN0612", 0 },
1042 ++ { "ELAN0618", 0 },
1043 + { "ELAN1000", 0 },
1044 + { }
1045 + };
1046 +diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
1047 +index 05b8695a6369..d21bd55f3c42 100644
1048 +--- a/drivers/input/mouse/elan_i2c_smbus.c
1049 ++++ b/drivers/input/mouse/elan_i2c_smbus.c
1050 +@@ -56,7 +56,7 @@
1051 + static int elan_smbus_initialize(struct i2c_client *client)
1052 + {
1053 + u8 check[ETP_SMBUS_HELLOPACKET_LEN] = { 0x55, 0x55, 0x55, 0x55, 0x55 };
1054 +- u8 values[ETP_SMBUS_HELLOPACKET_LEN] = { 0, 0, 0, 0, 0 };
1055 ++ u8 values[I2C_SMBUS_BLOCK_MAX] = {0};
1056 + int len, error;
1057 +
1058 + /* Get hello packet */
1059 +@@ -117,12 +117,16 @@ static int elan_smbus_calibrate(struct i2c_client *client)
1060 + static int elan_smbus_calibrate_result(struct i2c_client *client, u8 *val)
1061 + {
1062 + int error;
1063 ++ u8 buf[I2C_SMBUS_BLOCK_MAX] = {0};
1064 ++
1065 ++ BUILD_BUG_ON(ETP_CALIBRATE_MAX_LEN > sizeof(buf));
1066 +
1067 + error = i2c_smbus_read_block_data(client,
1068 +- ETP_SMBUS_CALIBRATE_QUERY, val);
1069 ++ ETP_SMBUS_CALIBRATE_QUERY, buf);
1070 + if (error < 0)
1071 + return error;
1072 +
1073 ++ memcpy(val, buf, ETP_CALIBRATE_MAX_LEN);
1074 + return 0;
1075 + }
1076 +
1077 +@@ -470,6 +474,8 @@ static int elan_smbus_get_report(struct i2c_client *client, u8 *report)
1078 + {
1079 + int len;
1080 +
1081 ++ BUILD_BUG_ON(I2C_SMBUS_BLOCK_MAX > ETP_SMBUS_REPORT_LEN);
1082 ++
1083 + len = i2c_smbus_read_block_data(client,
1084 + ETP_SMBUS_PACKET_QUERY,
1085 + &report[ETP_SMBUS_REPORT_OFFSET]);
1086 +diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
1087 +index c519c0b09568..4e77adbfa835 100644
1088 +--- a/drivers/input/mouse/elantech.c
1089 ++++ b/drivers/input/mouse/elantech.c
1090 +@@ -800,7 +800,7 @@ static int elantech_packet_check_v4(struct psmouse *psmouse)
1091 + else if (ic_version == 7 && etd->samples[1] == 0x2A)
1092 + sanity_check = ((packet[3] & 0x1c) == 0x10);
1093 + else
1094 +- sanity_check = ((packet[0] & 0x0c) == 0x04 &&
1095 ++ sanity_check = ((packet[0] & 0x08) == 0x00 &&
1096 + (packet[3] & 0x1c) == 0x10);
1097 +
1098 + if (!sanity_check)
1099 +@@ -1173,6 +1173,12 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
1100 + { }
1101 + };
1102 +
1103 ++static const char * const middle_button_pnp_ids[] = {
1104 ++ "LEN2131", /* ThinkPad P52 w/ NFC */
1105 ++ "LEN2132", /* ThinkPad P52 */
1106 ++ NULL
1107 ++};
1108 ++
1109 + /*
1110 + * Set the appropriate event bits for the input subsystem
1111 + */
1112 +@@ -1192,7 +1198,8 @@ static int elantech_set_input_params(struct psmouse *psmouse)
1113 + __clear_bit(EV_REL, dev->evbit);
1114 +
1115 + __set_bit(BTN_LEFT, dev->keybit);
1116 +- if (dmi_check_system(elantech_dmi_has_middle_button))
1117 ++ if (dmi_check_system(elantech_dmi_has_middle_button) ||
1118 ++ psmouse_matches_pnp_id(psmouse, middle_button_pnp_ids))
1119 + __set_bit(BTN_MIDDLE, dev->keybit);
1120 + __set_bit(BTN_RIGHT, dev->keybit);
1121 +
1122 +diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
1123 +index 0b678b5da4c4..0f0374a4ac6e 100644
1124 +--- a/drivers/md/dm-thin.c
1125 ++++ b/drivers/md/dm-thin.c
1126 +@@ -1384,6 +1384,8 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
1127 +
1128 + static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
1129 +
1130 ++static void requeue_bios(struct pool *pool);
1131 ++
1132 + static void check_for_space(struct pool *pool)
1133 + {
1134 + int r;
1135 +@@ -1396,8 +1398,10 @@ static void check_for_space(struct pool *pool)
1136 + if (r)
1137 + return;
1138 +
1139 +- if (nr_free)
1140 ++ if (nr_free) {
1141 + set_pool_mode(pool, PM_WRITE);
1142 ++ requeue_bios(pool);
1143 ++ }
1144 + }
1145 +
1146 + /*
1147 +@@ -1474,7 +1478,10 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1148 +
1149 + r = dm_pool_alloc_data_block(pool->pmd, result);
1150 + if (r) {
1151 +- metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
1152 ++ if (r == -ENOSPC)
1153 ++ set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
1154 ++ else
1155 ++ metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
1156 + return r;
1157 + }
1158 +
1159 +diff --git a/drivers/md/md.c b/drivers/md/md.c
1160 +index cae8f3c12e32..3bb985679f34 100644
1161 +--- a/drivers/md/md.c
1162 ++++ b/drivers/md/md.c
1163 +@@ -2694,7 +2694,8 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
1164 + err = 0;
1165 + }
1166 + } else if (cmd_match(buf, "re-add")) {
1167 +- if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1)) {
1168 ++ if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
1169 ++ rdev->saved_raid_disk >= 0) {
1170 + /* clear_bit is performed _after_ all the devices
1171 + * have their local Faulty bit cleared. If any writes
1172 + * happen in the meantime in the local node, they
1173 +@@ -8272,6 +8273,7 @@ static int remove_and_add_spares(struct mddev *mddev,
1174 + if (mddev->pers->hot_remove_disk(
1175 + mddev, rdev) == 0) {
1176 + sysfs_unlink_rdev(mddev, rdev);
1177 ++ rdev->saved_raid_disk = rdev->raid_disk;
1178 + rdev->raid_disk = -1;
1179 + removed++;
1180 + }
1181 +diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
1182 +index 01511e5a5566..2f054db8807b 100644
1183 +--- a/drivers/media/dvb-core/dvb_frontend.c
1184 ++++ b/drivers/media/dvb-core/dvb_frontend.c
1185 +@@ -251,8 +251,20 @@ static void dvb_frontend_add_event(struct dvb_frontend *fe,
1186 + wake_up_interruptible (&events->wait_queue);
1187 + }
1188 +
1189 ++static int dvb_frontend_test_event(struct dvb_frontend_private *fepriv,
1190 ++ struct dvb_fe_events *events)
1191 ++{
1192 ++ int ret;
1193 ++
1194 ++ up(&fepriv->sem);
1195 ++ ret = events->eventw != events->eventr;
1196 ++ down(&fepriv->sem);
1197 ++
1198 ++ return ret;
1199 ++}
1200 ++
1201 + static int dvb_frontend_get_event(struct dvb_frontend *fe,
1202 +- struct dvb_frontend_event *event, int flags)
1203 ++ struct dvb_frontend_event *event, int flags)
1204 + {
1205 + struct dvb_frontend_private *fepriv = fe->frontend_priv;
1206 + struct dvb_fe_events *events = &fepriv->events;
1207 +@@ -270,13 +282,8 @@ static int dvb_frontend_get_event(struct dvb_frontend *fe,
1208 + if (flags & O_NONBLOCK)
1209 + return -EWOULDBLOCK;
1210 +
1211 +- up(&fepriv->sem);
1212 +-
1213 +- ret = wait_event_interruptible (events->wait_queue,
1214 +- events->eventw != events->eventr);
1215 +-
1216 +- if (down_interruptible (&fepriv->sem))
1217 +- return -ERESTARTSYS;
1218 ++ ret = wait_event_interruptible(events->wait_queue,
1219 ++ dvb_frontend_test_event(fepriv, events));
1220 +
1221 + if (ret < 0)
1222 + return ret;
1223 +diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
1224 +index 921cf1edb3b1..69156affd0ae 100644
1225 +--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
1226 ++++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
1227 +@@ -864,6 +864,9 @@ struct usb_device_id cx231xx_id_table[] = {
1228 + .driver_info = CX231XX_BOARD_CNXT_RDE_250},
1229 + {USB_DEVICE(0x0572, 0x58A0),
1230 + .driver_info = CX231XX_BOARD_CNXT_RDU_250},
1231 ++ /* AverMedia DVD EZMaker 7 */
1232 ++ {USB_DEVICE(0x07ca, 0xc039),
1233 ++ .driver_info = CX231XX_BOARD_CNXT_VIDEO_GRABBER},
1234 + {USB_DEVICE(0x2040, 0xb110),
1235 + .driver_info = CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL},
1236 + {USB_DEVICE(0x2040, 0xb111),
1237 +diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
1238 +index a9fc64557c53..f1f697296ca0 100644
1239 +--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
1240 ++++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
1241 +@@ -864,7 +864,7 @@ static int put_v4l2_ext_controls32(struct file *file,
1242 + get_user(kcontrols, &kp->controls))
1243 + return -EFAULT;
1244 +
1245 +- if (!count)
1246 ++ if (!count || count > (U32_MAX/sizeof(*ucontrols)))
1247 + return 0;
1248 + if (get_user(p, &up->controls))
1249 + return -EFAULT;
1250 +diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
1251 +index 70c646b0097d..19ac8bc8e7ea 100644
1252 +--- a/drivers/mfd/intel-lpss.c
1253 ++++ b/drivers/mfd/intel-lpss.c
1254 +@@ -275,11 +275,11 @@ static void intel_lpss_init_dev(const struct intel_lpss *lpss)
1255 +
1256 + intel_lpss_deassert_reset(lpss);
1257 +
1258 ++ intel_lpss_set_remap_addr(lpss);
1259 ++
1260 + if (!intel_lpss_has_idma(lpss))
1261 + return;
1262 +
1263 +- intel_lpss_set_remap_addr(lpss);
1264 +-
1265 + /* Make sure that SPI multiblock DMA transfers are re-enabled */
1266 + if (lpss->type == LPSS_DEV_SPI)
1267 + writel(value, lpss->priv + LPSS_PRIV_SSP_REG);
1268 +diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
1269 +index 107c05b3ddbb..33d025e42793 100644
1270 +--- a/drivers/mtd/chips/cfi_cmdset_0002.c
1271 ++++ b/drivers/mtd/chips/cfi_cmdset_0002.c
1272 +@@ -1876,7 +1876,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1273 + if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1274 + break;
1275 +
1276 +- if (chip_ready(map, adr)) {
1277 ++ if (chip_good(map, adr, datum)) {
1278 + xip_enable(map, chip, adr);
1279 + goto op_done;
1280 + }
1281 +@@ -2531,7 +2531,7 @@ static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1282 +
1283 + struct ppb_lock {
1284 + struct flchip *chip;
1285 +- loff_t offset;
1286 ++ unsigned long adr;
1287 + int locked;
1288 + };
1289 +
1290 +@@ -2549,8 +2549,9 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
1291 + unsigned long timeo;
1292 + int ret;
1293 +
1294 ++ adr += chip->start;
1295 + mutex_lock(&chip->mutex);
1296 +- ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
1297 ++ ret = get_chip(map, chip, adr, FL_LOCKING);
1298 + if (ret) {
1299 + mutex_unlock(&chip->mutex);
1300 + return ret;
1301 +@@ -2568,8 +2569,8 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
1302 +
1303 + if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1304 + chip->state = FL_LOCKING;
1305 +- map_write(map, CMD(0xA0), chip->start + adr);
1306 +- map_write(map, CMD(0x00), chip->start + adr);
1307 ++ map_write(map, CMD(0xA0), adr);
1308 ++ map_write(map, CMD(0x00), adr);
1309 + } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1310 + /*
1311 + * Unlocking of one specific sector is not supported, so we
1312 +@@ -2607,7 +2608,7 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
1313 + map_write(map, CMD(0x00), chip->start);
1314 +
1315 + chip->state = FL_READY;
1316 +- put_chip(map, chip, adr + chip->start);
1317 ++ put_chip(map, chip, adr);
1318 + mutex_unlock(&chip->mutex);
1319 +
1320 + return ret;
1321 +@@ -2664,9 +2665,9 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
1322 + * sectors shall be unlocked, so lets keep their locking
1323 + * status at "unlocked" (locked=0) for the final re-locking.
1324 + */
1325 +- if ((adr < ofs) || (adr >= (ofs + len))) {
1326 ++ if ((offset < ofs) || (offset >= (ofs + len))) {
1327 + sect[sectors].chip = &cfi->chips[chipnum];
1328 +- sect[sectors].offset = offset;
1329 ++ sect[sectors].adr = adr;
1330 + sect[sectors].locked = do_ppb_xxlock(
1331 + map, &cfi->chips[chipnum], adr, 0,
1332 + DO_XXLOCK_ONEBLOCK_GETLOCK);
1333 +@@ -2680,6 +2681,8 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
1334 + i++;
1335 +
1336 + if (adr >> cfi->chipshift) {
1337 ++ if (offset >= (ofs + len))
1338 ++ break;
1339 + adr = 0;
1340 + chipnum++;
1341 +
1342 +@@ -2710,7 +2713,7 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
1343 + */
1344 + for (i = 0; i < sectors; i++) {
1345 + if (sect[i].locked)
1346 +- do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0,
1347 ++ do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0,
1348 + DO_XXLOCK_ONEBLOCK_LOCK);
1349 + }
1350 +
1351 +diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
1352 +index 6cb5ca52cb5a..ad2b57c6b13f 100644
1353 +--- a/drivers/mtd/ubi/build.c
1354 ++++ b/drivers/mtd/ubi/build.c
1355 +@@ -1137,6 +1137,9 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
1356 + */
1357 + get_device(&ubi->dev);
1358 +
1359 ++#ifdef CONFIG_MTD_UBI_FASTMAP
1360 ++ cancel_work_sync(&ubi->fm_work);
1361 ++#endif
1362 + ubi_debugfs_exit_dev(ubi);
1363 + uif_close(ubi);
1364 +
1365 +diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
1366 +index 388e46be6ad9..d0884bd9d955 100644
1367 +--- a/drivers/mtd/ubi/eba.c
1368 ++++ b/drivers/mtd/ubi/eba.c
1369 +@@ -490,6 +490,82 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
1370 + return err;
1371 + }
1372 +
1373 ++#ifdef CONFIG_MTD_UBI_FASTMAP
1374 ++/**
1375 ++ * check_mapping - check and fixup a mapping
1376 ++ * @ubi: UBI device description object
1377 ++ * @vol: volume description object
1378 ++ * @lnum: logical eraseblock number
1379 ++ * @pnum: physical eraseblock number
1380 ++ *
1381 ++ * Checks whether a given mapping is valid. Fastmap cannot track LEB unmap
1382 ++ * operations, if such an operation is interrupted the mapping still looks
1383 ++ * good, but upon first read an ECC is reported to the upper layer.
1384 ++ * Normaly during the full-scan at attach time this is fixed, for Fastmap
1385 ++ * we have to deal with it while reading.
1386 ++ * If the PEB behind a LEB shows this symthom we change the mapping to
1387 ++ * %UBI_LEB_UNMAPPED and schedule the PEB for erasure.
1388 ++ *
1389 ++ * Returns 0 on success, negative error code in case of failure.
1390 ++ */
1391 ++static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
1392 ++ int *pnum)
1393 ++{
1394 ++ int err;
1395 ++ struct ubi_vid_io_buf *vidb;
1396 ++
1397 ++ if (!ubi->fast_attach)
1398 ++ return 0;
1399 ++
1400 ++ vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
1401 ++ if (!vidb)
1402 ++ return -ENOMEM;
1403 ++
1404 ++ err = ubi_io_read_vid_hdr(ubi, *pnum, vidb, 0);
1405 ++ if (err > 0 && err != UBI_IO_BITFLIPS) {
1406 ++ int torture = 0;
1407 ++
1408 ++ switch (err) {
1409 ++ case UBI_IO_FF:
1410 ++ case UBI_IO_FF_BITFLIPS:
1411 ++ case UBI_IO_BAD_HDR:
1412 ++ case UBI_IO_BAD_HDR_EBADMSG:
1413 ++ break;
1414 ++ default:
1415 ++ ubi_assert(0);
1416 ++ }
1417 ++
1418 ++ if (err == UBI_IO_BAD_HDR_EBADMSG || err == UBI_IO_FF_BITFLIPS)
1419 ++ torture = 1;
1420 ++
1421 ++ down_read(&ubi->fm_eba_sem);
1422 ++ vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED;
1423 ++ up_read(&ubi->fm_eba_sem);
1424 ++ ubi_wl_put_peb(ubi, vol->vol_id, lnum, *pnum, torture);
1425 ++
1426 ++ *pnum = UBI_LEB_UNMAPPED;
1427 ++ } else if (err < 0) {
1428 ++ ubi_err(ubi, "unable to read VID header back from PEB %i: %i",
1429 ++ *pnum, err);
1430 ++
1431 ++ goto out_free;
1432 ++ }
1433 ++
1434 ++ err = 0;
1435 ++
1436 ++out_free:
1437 ++ ubi_free_vid_buf(vidb);
1438 ++
1439 ++ return err;
1440 ++}
1441 ++#else
1442 ++static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
1443 ++ int *pnum)
1444 ++{
1445 ++ return 0;
1446 ++}
1447 ++#endif
1448 ++
1449 + /**
1450 + * ubi_eba_read_leb - read data.
1451 + * @ubi: UBI device description object
1452 +@@ -522,7 +598,13 @@ int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
1453 + return err;
1454 +
1455 + pnum = vol->eba_tbl->entries[lnum].pnum;
1456 +- if (pnum < 0) {
1457 ++ if (pnum >= 0) {
1458 ++ err = check_mapping(ubi, vol, lnum, &pnum);
1459 ++ if (err < 0)
1460 ++ goto out_unlock;
1461 ++ }
1462 ++
1463 ++ if (pnum == UBI_LEB_UNMAPPED) {
1464 + /*
1465 + * The logical eraseblock is not mapped, fill the whole buffer
1466 + * with 0xFF bytes. The exception is static volumes for which
1467 +@@ -930,6 +1012,12 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
1468 + return err;
1469 +
1470 + pnum = vol->eba_tbl->entries[lnum].pnum;
1471 ++ if (pnum >= 0) {
1472 ++ err = check_mapping(ubi, vol, lnum, &pnum);
1473 ++ if (err < 0)
1474 ++ goto out;
1475 ++ }
1476 ++
1477 + if (pnum >= 0) {
1478 + dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
1479 + len, offset, vol_id, lnum, pnum);
1480 +diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
1481 +index 668b46202507..23a6986d512b 100644
1482 +--- a/drivers/mtd/ubi/wl.c
1483 ++++ b/drivers/mtd/ubi/wl.c
1484 +@@ -1505,6 +1505,7 @@ int ubi_thread(void *u)
1485 + }
1486 +
1487 + dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1488 ++ ubi->thread_enabled = 0;
1489 + return 0;
1490 + }
1491 +
1492 +@@ -1514,9 +1515,6 @@ int ubi_thread(void *u)
1493 + */
1494 + static void shutdown_work(struct ubi_device *ubi)
1495 + {
1496 +-#ifdef CONFIG_MTD_UBI_FASTMAP
1497 +- flush_work(&ubi->fm_work);
1498 +-#endif
1499 + while (!list_empty(&ubi->works)) {
1500 + struct ubi_work *wrk;
1501 +
1502 +diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
1503 +index feb61eaffe32..3086cae62fdc 100644
1504 +--- a/drivers/net/usb/cdc_ncm.c
1505 ++++ b/drivers/net/usb/cdc_ncm.c
1506 +@@ -1124,7 +1124,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
1507 + * accordingly. Otherwise, we should check here.
1508 + */
1509 + if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
1510 +- delayed_ndp_size = ctx->max_ndp_size;
1511 ++ delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus);
1512 + else
1513 + delayed_ndp_size = 0;
1514 +
1515 +@@ -1257,7 +1257,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
1516 + /* If requested, put NDP at end of frame. */
1517 + if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
1518 + nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
1519 +- cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_max);
1520 ++ cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_max - ctx->max_ndp_size);
1521 + nth16->wNdpIndex = cpu_to_le16(skb_out->len);
1522 + memcpy(skb_put(skb_out, ctx->max_ndp_size), ctx->delayed_ndp16, ctx->max_ndp_size);
1523 +
1524 +diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
1525 +index 8311a93cabd8..c1a65ce31243 100644
1526 +--- a/drivers/nvdimm/bus.c
1527 ++++ b/drivers/nvdimm/bus.c
1528 +@@ -505,14 +505,18 @@ int nvdimm_revalidate_disk(struct gendisk *disk)
1529 + {
1530 + struct device *dev = disk_to_dev(disk)->parent;
1531 + struct nd_region *nd_region = to_nd_region(dev->parent);
1532 +- const char *pol = nd_region->ro ? "only" : "write";
1533 ++ int disk_ro = get_disk_ro(disk);
1534 +
1535 +- if (nd_region->ro == get_disk_ro(disk))
1536 ++ /*
1537 ++ * Upgrade to read-only if the region is read-only preserve as
1538 ++ * read-only if the disk is already read-only.
1539 ++ */
1540 ++ if (disk_ro || nd_region->ro == disk_ro)
1541 + return 0;
1542 +
1543 +- dev_info(dev, "%s read-%s, marking %s read-%s\n",
1544 +- dev_name(&nd_region->dev), pol, disk->disk_name, pol);
1545 +- set_disk_ro(disk, nd_region->ro);
1546 ++ dev_info(dev, "%s read-only, marking %s read-only\n",
1547 ++ dev_name(&nd_region->dev), disk->disk_name);
1548 ++ set_disk_ro(disk, 1);
1549 +
1550 + return 0;
1551 +
1552 +diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
1553 +index 53c83d66eb7e..90b5a898d6b1 100644
1554 +--- a/drivers/of/unittest.c
1555 ++++ b/drivers/of/unittest.c
1556 +@@ -155,20 +155,20 @@ static void __init of_unittest_dynamic(void)
1557 + /* Add a new property - should pass*/
1558 + prop->name = "new-property";
1559 + prop->value = "new-property-data";
1560 +- prop->length = strlen(prop->value);
1561 ++ prop->length = strlen(prop->value) + 1;
1562 + unittest(of_add_property(np, prop) == 0, "Adding a new property failed\n");
1563 +
1564 + /* Try to add an existing property - should fail */
1565 + prop++;
1566 + prop->name = "new-property";
1567 + prop->value = "new-property-data-should-fail";
1568 +- prop->length = strlen(prop->value);
1569 ++ prop->length = strlen(prop->value) + 1;
1570 + unittest(of_add_property(np, prop) != 0,
1571 + "Adding an existing property should have failed\n");
1572 +
1573 + /* Try to modify an existing property - should pass */
1574 + prop->value = "modify-property-data-should-pass";
1575 +- prop->length = strlen(prop->value);
1576 ++ prop->length = strlen(prop->value) + 1;
1577 + unittest(of_update_property(np, prop) == 0,
1578 + "Updating an existing property should have passed\n");
1579 +
1580 +@@ -176,7 +176,7 @@ static void __init of_unittest_dynamic(void)
1581 + prop++;
1582 + prop->name = "modify-property";
1583 + prop->value = "modify-missing-property-data-should-pass";
1584 +- prop->length = strlen(prop->value);
1585 ++ prop->length = strlen(prop->value) + 1;
1586 + unittest(of_update_property(np, prop) == 0,
1587 + "Updating a missing property should have passed\n");
1588 +
1589 +diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
1590 +index 37d70b5ad22f..2bba8481beb1 100644
1591 +--- a/drivers/pci/hotplug/pciehp.h
1592 ++++ b/drivers/pci/hotplug/pciehp.h
1593 +@@ -134,7 +134,7 @@ struct controller *pcie_init(struct pcie_device *dev);
1594 + int pcie_init_notification(struct controller *ctrl);
1595 + int pciehp_enable_slot(struct slot *p_slot);
1596 + int pciehp_disable_slot(struct slot *p_slot);
1597 +-void pcie_enable_notification(struct controller *ctrl);
1598 ++void pcie_reenable_notification(struct controller *ctrl);
1599 + int pciehp_power_on_slot(struct slot *slot);
1600 + void pciehp_power_off_slot(struct slot *slot);
1601 + void pciehp_get_power_status(struct slot *slot, u8 *status);
1602 +diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
1603 +index 7d32fa33dcef..6620b1095046 100644
1604 +--- a/drivers/pci/hotplug/pciehp_core.c
1605 ++++ b/drivers/pci/hotplug/pciehp_core.c
1606 +@@ -297,7 +297,7 @@ static int pciehp_resume(struct pcie_device *dev)
1607 + ctrl = get_service_data(dev);
1608 +
1609 + /* reinitialize the chipset's event detection logic */
1610 +- pcie_enable_notification(ctrl);
1611 ++ pcie_reenable_notification(ctrl);
1612 +
1613 + slot = ctrl->slot;
1614 +
1615 +diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
1616 +index d08dfc8b9ba9..8d811ea353c8 100644
1617 +--- a/drivers/pci/hotplug/pciehp_hpc.c
1618 ++++ b/drivers/pci/hotplug/pciehp_hpc.c
1619 +@@ -673,7 +673,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
1620 + return handled;
1621 + }
1622 +
1623 +-void pcie_enable_notification(struct controller *ctrl)
1624 ++static void pcie_enable_notification(struct controller *ctrl)
1625 + {
1626 + u16 cmd, mask;
1627 +
1628 +@@ -711,6 +711,17 @@ void pcie_enable_notification(struct controller *ctrl)
1629 + pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
1630 + }
1631 +
1632 ++void pcie_reenable_notification(struct controller *ctrl)
1633 ++{
1634 ++ /*
1635 ++ * Clear both Presence and Data Link Layer Changed to make sure
1636 ++ * those events still fire after we have re-enabled them.
1637 ++ */
1638 ++ pcie_capability_write_word(ctrl->pcie->port, PCI_EXP_SLTSTA,
1639 ++ PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
1640 ++ pcie_enable_notification(ctrl);
1641 ++}
1642 ++
1643 + static void pcie_disable_notification(struct controller *ctrl)
1644 + {
1645 + u16 mask;
1646 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
1647 +index b55f9179c94e..a05d143ac43b 100644
1648 +--- a/drivers/pci/quirks.c
1649 ++++ b/drivers/pci/quirks.c
1650 +@@ -4225,11 +4225,29 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
1651 + * 0xa290-0xa29f PCI Express Root port #{0-16}
1652 + * 0xa2e7-0xa2ee PCI Express Root port #{17-24}
1653 + *
1654 ++ * Mobile chipsets are also affected, 7th & 8th Generation
1655 ++ * Specification update confirms ACS errata 22, status no fix: (7th Generation
1656 ++ * Intel Processor Family I/O for U/Y Platforms and 8th Generation Intel
1657 ++ * Processor Family I/O for U Quad Core Platforms Specification Update,
1658 ++ * August 2017, Revision 002, Document#: 334660-002)[6]
1659 ++ * Device IDs from I/O datasheet: (7th Generation Intel Processor Family I/O
1660 ++ * for U/Y Platforms and 8th Generation Intel ® Processor Family I/O for U
1661 ++ * Quad Core Platforms, Vol 1 of 2, August 2017, Document#: 334658-003)[7]
1662 ++ *
1663 ++ * 0x9d10-0x9d1b PCI Express Root port #{1-12}
1664 ++ *
1665 ++ * The 300 series chipset suffers from the same bug so include those root
1666 ++ * ports here as well.
1667 ++ *
1668 ++ * 0xa32c-0xa343 PCI Express Root port #{0-24}
1669 ++ *
1670 + * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html
1671 + * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html
1672 + * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html
1673 + * [4] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-spec-update.html
1674 + * [5] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-datasheet-vol-1.html
1675 ++ * [6] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-spec-update.html
1676 ++ * [7] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-datasheet-vol-1.html
1677 + */
1678 + static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
1679 + {
1680 +@@ -4239,6 +4257,8 @@ static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
1681 + switch (dev->device) {
1682 + case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */
1683 + case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */
1684 ++ case 0x9d10 ... 0x9d1b: /* 7th & 8th Gen Mobile */
1685 ++ case 0xa32c ... 0xa343: /* 300 series */
1686 + return true;
1687 + }
1688 +
1689 +diff --git a/drivers/pwm/pwm-lpss-platform.c b/drivers/pwm/pwm-lpss-platform.c
1690 +index 54433fc6d1a4..e4eaefc2a2ef 100644
1691 +--- a/drivers/pwm/pwm-lpss-platform.c
1692 ++++ b/drivers/pwm/pwm-lpss-platform.c
1693 +@@ -52,6 +52,10 @@ static int pwm_lpss_remove_platform(struct platform_device *pdev)
1694 + return pwm_lpss_remove(lpwm);
1695 + }
1696 +
1697 ++static SIMPLE_DEV_PM_OPS(pwm_lpss_platform_pm_ops,
1698 ++ pwm_lpss_suspend,
1699 ++ pwm_lpss_resume);
1700 ++
1701 + static const struct acpi_device_id pwm_lpss_acpi_match[] = {
1702 + { "80860F09", (unsigned long)&pwm_lpss_byt_info },
1703 + { "80862288", (unsigned long)&pwm_lpss_bsw_info },
1704 +@@ -64,6 +68,7 @@ static struct platform_driver pwm_lpss_driver_platform = {
1705 + .driver = {
1706 + .name = "pwm-lpss",
1707 + .acpi_match_table = pwm_lpss_acpi_match,
1708 ++ .pm = &pwm_lpss_platform_pm_ops,
1709 + },
1710 + .probe = pwm_lpss_probe_platform,
1711 + .remove = pwm_lpss_remove_platform,
1712 +diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
1713 +index 72c0bce5a75c..5208b3f80ad8 100644
1714 +--- a/drivers/pwm/pwm-lpss.c
1715 ++++ b/drivers/pwm/pwm-lpss.c
1716 +@@ -31,10 +31,13 @@
1717 + /* Size of each PWM register space if multiple */
1718 + #define PWM_SIZE 0x400
1719 +
1720 ++#define MAX_PWMS 4
1721 ++
1722 + struct pwm_lpss_chip {
1723 + struct pwm_chip chip;
1724 + void __iomem *regs;
1725 + const struct pwm_lpss_boardinfo *info;
1726 ++ u32 saved_ctrl[MAX_PWMS];
1727 + };
1728 +
1729 + /* BayTrail */
1730 +@@ -168,6 +171,9 @@ struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
1731 + unsigned long c;
1732 + int ret;
1733 +
1734 ++ if (WARN_ON(info->npwm > MAX_PWMS))
1735 ++ return ERR_PTR(-ENODEV);
1736 ++
1737 + lpwm = devm_kzalloc(dev, sizeof(*lpwm), GFP_KERNEL);
1738 + if (!lpwm)
1739 + return ERR_PTR(-ENOMEM);
1740 +@@ -203,6 +209,30 @@ int pwm_lpss_remove(struct pwm_lpss_chip *lpwm)
1741 + }
1742 + EXPORT_SYMBOL_GPL(pwm_lpss_remove);
1743 +
1744 ++int pwm_lpss_suspend(struct device *dev)
1745 ++{
1746 ++ struct pwm_lpss_chip *lpwm = dev_get_drvdata(dev);
1747 ++ int i;
1748 ++
1749 ++ for (i = 0; i < lpwm->info->npwm; i++)
1750 ++ lpwm->saved_ctrl[i] = readl(lpwm->regs + i * PWM_SIZE + PWM);
1751 ++
1752 ++ return 0;
1753 ++}
1754 ++EXPORT_SYMBOL_GPL(pwm_lpss_suspend);
1755 ++
1756 ++int pwm_lpss_resume(struct device *dev)
1757 ++{
1758 ++ struct pwm_lpss_chip *lpwm = dev_get_drvdata(dev);
1759 ++ int i;
1760 ++
1761 ++ for (i = 0; i < lpwm->info->npwm; i++)
1762 ++ writel(lpwm->saved_ctrl[i], lpwm->regs + i * PWM_SIZE + PWM);
1763 ++
1764 ++ return 0;
1765 ++}
1766 ++EXPORT_SYMBOL_GPL(pwm_lpss_resume);
1767 ++
1768 + MODULE_DESCRIPTION("PWM driver for Intel LPSS");
1769 + MODULE_AUTHOR("Mika Westerberg <mika.westerberg@×××××××××××.com>");
1770 + MODULE_LICENSE("GPL v2");
1771 +diff --git a/drivers/pwm/pwm-lpss.h b/drivers/pwm/pwm-lpss.h
1772 +index 04766e0d41aa..27d5081ec218 100644
1773 +--- a/drivers/pwm/pwm-lpss.h
1774 ++++ b/drivers/pwm/pwm-lpss.h
1775 +@@ -31,5 +31,7 @@ extern const struct pwm_lpss_boardinfo pwm_lpss_bxt_info;
1776 + struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
1777 + const struct pwm_lpss_boardinfo *info);
1778 + int pwm_lpss_remove(struct pwm_lpss_chip *lpwm);
1779 ++int pwm_lpss_suspend(struct device *dev);
1780 ++int pwm_lpss_resume(struct device *dev);
1781 +
1782 + #endif /* __PWM_LPSS_H */
1783 +diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
1784 +index 1d4770c02e57..fd3d9419c468 100644
1785 +--- a/drivers/rpmsg/qcom_smd.c
1786 ++++ b/drivers/rpmsg/qcom_smd.c
1787 +@@ -1006,12 +1006,12 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
1788 + void *info;
1789 + int ret;
1790 +
1791 +- channel = devm_kzalloc(&edge->dev, sizeof(*channel), GFP_KERNEL);
1792 ++ channel = kzalloc(sizeof(*channel), GFP_KERNEL);
1793 + if (!channel)
1794 + return ERR_PTR(-ENOMEM);
1795 +
1796 + channel->edge = edge;
1797 +- channel->name = devm_kstrdup(&edge->dev, name, GFP_KERNEL);
1798 ++ channel->name = kstrdup(name, GFP_KERNEL);
1799 + if (!channel->name)
1800 + return ERR_PTR(-ENOMEM);
1801 +
1802 +@@ -1061,8 +1061,8 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
1803 + return channel;
1804 +
1805 + free_name_and_channel:
1806 +- devm_kfree(&edge->dev, channel->name);
1807 +- devm_kfree(&edge->dev, channel);
1808 ++ kfree(channel->name);
1809 ++ kfree(channel);
1810 +
1811 + return ERR_PTR(ret);
1812 + }
1813 +@@ -1279,13 +1279,13 @@ static int qcom_smd_parse_edge(struct device *dev,
1814 + */
1815 + static void qcom_smd_edge_release(struct device *dev)
1816 + {
1817 +- struct qcom_smd_channel *channel;
1818 ++ struct qcom_smd_channel *channel, *tmp;
1819 + struct qcom_smd_edge *edge = to_smd_edge(dev);
1820 +
1821 +- list_for_each_entry(channel, &edge->channels, list) {
1822 +- SET_RX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED);
1823 +- SET_RX_CHANNEL_INFO(channel, head, 0);
1824 +- SET_RX_CHANNEL_INFO(channel, tail, 0);
1825 ++ list_for_each_entry_safe(channel, tmp, &edge->channels, list) {
1826 ++ list_del(&channel->list);
1827 ++ kfree(channel->name);
1828 ++ kfree(channel);
1829 + }
1830 +
1831 + kfree(edge);
1832 +diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
1833 +index 4534a7ce77b8..b6caad0fee24 100644
1834 +--- a/drivers/s390/scsi/zfcp_dbf.c
1835 ++++ b/drivers/s390/scsi/zfcp_dbf.c
1836 +@@ -625,6 +625,46 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
1837 + spin_unlock_irqrestore(&dbf->scsi_lock, flags);
1838 + }
1839 +
1840 ++/**
1841 ++ * zfcp_dbf_scsi_eh() - Trace event for special cases of scsi_eh callbacks.
1842 ++ * @tag: Identifier for event.
1843 ++ * @adapter: Pointer to zfcp adapter as context for this event.
1844 ++ * @scsi_id: SCSI ID/target to indicate scope of task management function (TMF).
1845 ++ * @ret: Return value of calling function.
1846 ++ *
1847 ++ * This SCSI trace variant does not depend on any of:
1848 ++ * scsi_cmnd, zfcp_fsf_req, scsi_device.
1849 ++ */
1850 ++void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter,
1851 ++ unsigned int scsi_id, int ret)
1852 ++{
1853 ++ struct zfcp_dbf *dbf = adapter->dbf;
1854 ++ struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
1855 ++ unsigned long flags;
1856 ++ static int const level = 1;
1857 ++
1858 ++ if (unlikely(!debug_level_enabled(adapter->dbf->scsi, level)))
1859 ++ return;
1860 ++
1861 ++ spin_lock_irqsave(&dbf->scsi_lock, flags);
1862 ++ memset(rec, 0, sizeof(*rec));
1863 ++
1864 ++ memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
1865 ++ rec->id = ZFCP_DBF_SCSI_CMND;
1866 ++ rec->scsi_result = ret; /* re-use field, int is 4 bytes and fits */
1867 ++ rec->scsi_retries = ~0;
1868 ++ rec->scsi_allowed = ~0;
1869 ++ rec->fcp_rsp_info = ~0;
1870 ++ rec->scsi_id = scsi_id;
1871 ++ rec->scsi_lun = (u32)ZFCP_DBF_INVALID_LUN;
1872 ++ rec->scsi_lun_64_hi = (u32)(ZFCP_DBF_INVALID_LUN >> 32);
1873 ++ rec->host_scribble = ~0;
1874 ++ memset(rec->scsi_opcode, 0xff, ZFCP_DBF_SCSI_OPCODE);
1875 ++
1876 ++ debug_event(dbf->scsi, level, rec, sizeof(*rec));
1877 ++ spin_unlock_irqrestore(&dbf->scsi_lock, flags);
1878 ++}
1879 ++
1880 + static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size)
1881 + {
1882 + struct debug_info *d;
1883 +diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
1884 +index 3b23d6754598..2abcd331b05d 100644
1885 +--- a/drivers/s390/scsi/zfcp_erp.c
1886 ++++ b/drivers/s390/scsi/zfcp_erp.c
1887 +@@ -34,11 +34,28 @@ enum zfcp_erp_steps {
1888 + ZFCP_ERP_STEP_LUN_OPENING = 0x2000,
1889 + };
1890 +
1891 ++/**
1892 ++ * enum zfcp_erp_act_type - Type of ERP action object.
1893 ++ * @ZFCP_ERP_ACTION_REOPEN_LUN: LUN recovery.
1894 ++ * @ZFCP_ERP_ACTION_REOPEN_PORT: Port recovery.
1895 ++ * @ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: Forced port recovery.
1896 ++ * @ZFCP_ERP_ACTION_REOPEN_ADAPTER: Adapter recovery.
1897 ++ * @ZFCP_ERP_ACTION_NONE: Eyecatcher pseudo flag to bitwise or-combine with
1898 ++ * either of the first four enum values.
1899 ++ * Used to indicate that an ERP action could not be
1900 ++ * set up despite a detected need for some recovery.
1901 ++ * @ZFCP_ERP_ACTION_FAILED: Eyecatcher pseudo flag to bitwise or-combine with
1902 ++ * either of the first four enum values.
1903 ++ * Used to indicate that ERP not needed because
1904 ++ * the object has ZFCP_STATUS_COMMON_ERP_FAILED.
1905 ++ */
1906 + enum zfcp_erp_act_type {
1907 + ZFCP_ERP_ACTION_REOPEN_LUN = 1,
1908 + ZFCP_ERP_ACTION_REOPEN_PORT = 2,
1909 + ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3,
1910 + ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4,
1911 ++ ZFCP_ERP_ACTION_NONE = 0xc0,
1912 ++ ZFCP_ERP_ACTION_FAILED = 0xe0,
1913 + };
1914 +
1915 + enum zfcp_erp_act_state {
1916 +@@ -125,6 +142,49 @@ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
1917 + }
1918 + }
1919 +
1920 ++static int zfcp_erp_handle_failed(int want, struct zfcp_adapter *adapter,
1921 ++ struct zfcp_port *port,
1922 ++ struct scsi_device *sdev)
1923 ++{
1924 ++ int need = want;
1925 ++ struct zfcp_scsi_dev *zsdev;
1926 ++
1927 ++ switch (want) {
1928 ++ case ZFCP_ERP_ACTION_REOPEN_LUN:
1929 ++ zsdev = sdev_to_zfcp(sdev);
1930 ++ if (atomic_read(&zsdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
1931 ++ need = 0;
1932 ++ break;
1933 ++ case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
1934 ++ if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
1935 ++ need = 0;
1936 ++ break;
1937 ++ case ZFCP_ERP_ACTION_REOPEN_PORT:
1938 ++ if (atomic_read(&port->status) &
1939 ++ ZFCP_STATUS_COMMON_ERP_FAILED) {
1940 ++ need = 0;
1941 ++ /* ensure propagation of failed status to new devices */
1942 ++ zfcp_erp_set_port_status(
1943 ++ port, ZFCP_STATUS_COMMON_ERP_FAILED);
1944 ++ }
1945 ++ break;
1946 ++ case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
1947 ++ if (atomic_read(&adapter->status) &
1948 ++ ZFCP_STATUS_COMMON_ERP_FAILED) {
1949 ++ need = 0;
1950 ++ /* ensure propagation of failed status to new devices */
1951 ++ zfcp_erp_set_adapter_status(
1952 ++ adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
1953 ++ }
1954 ++ break;
1955 ++ default:
1956 ++ need = 0;
1957 ++ break;
1958 ++ }
1959 ++
1960 ++ return need;
1961 ++}
1962 ++
1963 + static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
1964 + struct zfcp_port *port,
1965 + struct scsi_device *sdev)
1966 +@@ -248,16 +308,27 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
1967 + int retval = 1, need;
1968 + struct zfcp_erp_action *act;
1969 +
1970 +- if (!adapter->erp_thread)
1971 +- return -EIO;
1972 ++ need = zfcp_erp_handle_failed(want, adapter, port, sdev);
1973 ++ if (!need) {
1974 ++ need = ZFCP_ERP_ACTION_FAILED; /* marker for trace */
1975 ++ goto out;
1976 ++ }
1977 ++
1978 ++ if (!adapter->erp_thread) {
1979 ++ need = ZFCP_ERP_ACTION_NONE; /* marker for trace */
1980 ++ retval = -EIO;
1981 ++ goto out;
1982 ++ }
1983 +
1984 + need = zfcp_erp_required_act(want, adapter, port, sdev);
1985 + if (!need)
1986 + goto out;
1987 +
1988 + act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev);
1989 +- if (!act)
1990 ++ if (!act) {
1991 ++ need |= ZFCP_ERP_ACTION_NONE; /* marker for trace */
1992 + goto out;
1993 ++ }
1994 + atomic_or(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
1995 + ++adapter->erp_total_count;
1996 + list_add_tail(&act->list, &adapter->erp_ready_head);
1997 +@@ -268,18 +339,32 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
1998 + return retval;
1999 + }
2000 +
2001 ++void zfcp_erp_port_forced_no_port_dbf(char *id, struct zfcp_adapter *adapter,
2002 ++ u64 port_name, u32 port_id)
2003 ++{
2004 ++ unsigned long flags;
2005 ++ static /* don't waste stack */ struct zfcp_port tmpport;
2006 ++
2007 ++ write_lock_irqsave(&adapter->erp_lock, flags);
2008 ++ /* Stand-in zfcp port with fields just good enough for
2009 ++ * zfcp_dbf_rec_trig() and zfcp_dbf_set_common().
2010 ++ * Under lock because tmpport is static.
2011 ++ */
2012 ++ atomic_set(&tmpport.status, -1); /* unknown */
2013 ++ tmpport.wwpn = port_name;
2014 ++ tmpport.d_id = port_id;
2015 ++ zfcp_dbf_rec_trig(id, adapter, &tmpport, NULL,
2016 ++ ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
2017 ++ ZFCP_ERP_ACTION_NONE);
2018 ++ write_unlock_irqrestore(&adapter->erp_lock, flags);
2019 ++}
2020 ++
2021 + static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
2022 + int clear_mask, char *id)
2023 + {
2024 + zfcp_erp_adapter_block(adapter, clear_mask);
2025 + zfcp_scsi_schedule_rports_block(adapter);
2026 +
2027 +- /* ensure propagation of failed status to new devices */
2028 +- if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
2029 +- zfcp_erp_set_adapter_status(adapter,
2030 +- ZFCP_STATUS_COMMON_ERP_FAILED);
2031 +- return -EIO;
2032 +- }
2033 + return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
2034 + adapter, NULL, NULL, id, 0);
2035 + }
2036 +@@ -298,12 +383,8 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id)
2037 + zfcp_scsi_schedule_rports_block(adapter);
2038 +
2039 + write_lock_irqsave(&adapter->erp_lock, flags);
2040 +- if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
2041 +- zfcp_erp_set_adapter_status(adapter,
2042 +- ZFCP_STATUS_COMMON_ERP_FAILED);
2043 +- else
2044 +- zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
2045 +- NULL, NULL, id, 0);
2046 ++ zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
2047 ++ NULL, NULL, id, 0);
2048 + write_unlock_irqrestore(&adapter->erp_lock, flags);
2049 + }
2050 +
2051 +@@ -344,9 +425,6 @@ static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear,
2052 + zfcp_erp_port_block(port, clear);
2053 + zfcp_scsi_schedule_rport_block(port);
2054 +
2055 +- if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
2056 +- return;
2057 +-
2058 + zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
2059 + port->adapter, port, NULL, id, 0);
2060 + }
2061 +@@ -372,12 +450,6 @@ static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
2062 + zfcp_erp_port_block(port, clear);
2063 + zfcp_scsi_schedule_rport_block(port);
2064 +
2065 +- if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
2066 +- /* ensure propagation of failed status to new devices */
2067 +- zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
2068 +- return -EIO;
2069 +- }
2070 +-
2071 + return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
2072 + port->adapter, port, NULL, id, 0);
2073 + }
2074 +@@ -417,9 +489,6 @@ static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
2075 +
2076 + zfcp_erp_lun_block(sdev, clear);
2077 +
2078 +- if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
2079 +- return;
2080 +-
2081 + zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter,
2082 + zfcp_sdev->port, sdev, id, act_status);
2083 + }
2084 +diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
2085 +index 7a7984a50683..b326f05c7f89 100644
2086 +--- a/drivers/s390/scsi/zfcp_ext.h
2087 ++++ b/drivers/s390/scsi/zfcp_ext.h
2088 +@@ -52,10 +52,15 @@ extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
2089 + extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
2090 + extern void zfcp_dbf_scsi(char *, int, struct scsi_cmnd *,
2091 + struct zfcp_fsf_req *);
2092 ++extern void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter,
2093 ++ unsigned int scsi_id, int ret);
2094 +
2095 + /* zfcp_erp.c */
2096 + extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
2097 + extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32);
2098 ++extern void zfcp_erp_port_forced_no_port_dbf(char *id,
2099 ++ struct zfcp_adapter *adapter,
2100 ++ u64 port_name, u32 port_id);
2101 + extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *);
2102 + extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *);
2103 + extern void zfcp_erp_set_port_status(struct zfcp_port *, u32);
2104 +diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
2105 +index bb99db2948ab..3afb200b2829 100644
2106 +--- a/drivers/s390/scsi/zfcp_scsi.c
2107 ++++ b/drivers/s390/scsi/zfcp_scsi.c
2108 +@@ -180,6 +180,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
2109 + if (abrt_req)
2110 + break;
2111 +
2112 ++ zfcp_dbf_scsi_abort("abrt_wt", scpnt, NULL);
2113 + zfcp_erp_wait(adapter);
2114 + ret = fc_block_scsi_eh(scpnt);
2115 + if (ret) {
2116 +@@ -276,6 +277,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
2117 + if (fsf_req)
2118 + break;
2119 +
2120 ++ zfcp_dbf_scsi_devreset("wait", scpnt, tm_flags, NULL);
2121 + zfcp_erp_wait(adapter);
2122 + ret = fc_block_scsi_eh(scpnt);
2123 + if (ret) {
2124 +@@ -322,15 +324,16 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
2125 + {
2126 + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
2127 + struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
2128 +- int ret;
2129 ++ int ret = SUCCESS, fc_ret;
2130 +
2131 + zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
2132 + zfcp_erp_wait(adapter);
2133 +- ret = fc_block_scsi_eh(scpnt);
2134 +- if (ret)
2135 +- return ret;
2136 ++ fc_ret = fc_block_scsi_eh(scpnt);
2137 ++ if (fc_ret)
2138 ++ ret = fc_ret;
2139 +
2140 +- return SUCCESS;
2141 ++ zfcp_dbf_scsi_eh("schrh_r", adapter, ~0, ret);
2142 ++ return ret;
2143 + }
2144 +
2145 + struct scsi_transport_template *zfcp_scsi_transport_template;
2146 +@@ -600,6 +603,11 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
2147 + if (port) {
2148 + zfcp_erp_port_forced_reopen(port, 0, "sctrpi1");
2149 + put_device(&port->dev);
2150 ++ } else {
2151 ++ zfcp_erp_port_forced_no_port_dbf(
2152 ++ "sctrpin", adapter,
2153 ++ rport->port_name /* zfcp_scsi_rport_register */,
2154 ++ rport->port_id /* zfcp_scsi_rport_register */);
2155 + }
2156 + }
2157 +
2158 +diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
2159 +index 4441a559f139..34bbcfcae67c 100644
2160 +--- a/drivers/scsi/qla2xxx/qla_init.c
2161 ++++ b/drivers/scsi/qla2xxx/qla_init.c
2162 +@@ -3319,7 +3319,8 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2163 + return;
2164 +
2165 + if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
2166 +- fcport->fp_speed > ha->link_data_rate)
2167 ++ fcport->fp_speed > ha->link_data_rate ||
2168 ++ !ha->flags.gpsc_supported)
2169 + return;
2170 +
2171 + rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
2172 +diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
2173 +index da46f0fba5da..6ff53b604ff6 100644
2174 +--- a/drivers/tty/serial/sh-sci.c
2175 ++++ b/drivers/tty/serial/sh-sci.c
2176 +@@ -2807,16 +2807,15 @@ static void serial_console_write(struct console *co, const char *s,
2177 + unsigned long flags;
2178 + int locked = 1;
2179 +
2180 +- local_irq_save(flags);
2181 + #if defined(SUPPORT_SYSRQ)
2182 + if (port->sysrq)
2183 + locked = 0;
2184 + else
2185 + #endif
2186 + if (oops_in_progress)
2187 +- locked = spin_trylock(&port->lock);
2188 ++ locked = spin_trylock_irqsave(&port->lock, flags);
2189 + else
2190 +- spin_lock(&port->lock);
2191 ++ spin_lock_irqsave(&port->lock, flags);
2192 +
2193 + /* first save SCSCR then disable interrupts, keep clock source */
2194 + ctrl = serial_port_in(port, SCSCR);
2195 +@@ -2835,8 +2834,7 @@ static void serial_console_write(struct console *co, const char *s,
2196 + serial_port_out(port, SCSCR, ctrl);
2197 +
2198 + if (locked)
2199 +- spin_unlock(&port->lock);
2200 +- local_irq_restore(flags);
2201 ++ spin_unlock_irqrestore(&port->lock, flags);
2202 + }
2203 +
2204 + static int serial_console_setup(struct console *co, char *options)
2205 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
2206 +index d8d992b73e88..8bf0090218dd 100644
2207 +--- a/drivers/usb/core/hub.c
2208 ++++ b/drivers/usb/core/hub.c
2209 +@@ -4509,7 +4509,9 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
2210 + * reset. But only on the first attempt,
2211 + * lest we get into a time out/reset loop
2212 + */
2213 +- if (r == 0 || (r == -ETIMEDOUT && retries == 0))
2214 ++ if (r == 0 || (r == -ETIMEDOUT &&
2215 ++ retries == 0 &&
2216 ++ udev->speed > USB_SPEED_FULL))
2217 + break;
2218 + }
2219 + udev->descriptor.bMaxPacketSize0 =
2220 +diff --git a/drivers/video/backlight/as3711_bl.c b/drivers/video/backlight/as3711_bl.c
2221 +index 734a9158946b..e55304d5cf07 100644
2222 +--- a/drivers/video/backlight/as3711_bl.c
2223 ++++ b/drivers/video/backlight/as3711_bl.c
2224 +@@ -262,10 +262,10 @@ static int as3711_bl_register(struct platform_device *pdev,
2225 + static int as3711_backlight_parse_dt(struct device *dev)
2226 + {
2227 + struct as3711_bl_pdata *pdata = dev_get_platdata(dev);
2228 +- struct device_node *bl =
2229 +- of_find_node_by_name(dev->parent->of_node, "backlight"), *fb;
2230 ++ struct device_node *bl, *fb;
2231 + int ret;
2232 +
2233 ++ bl = of_get_child_by_name(dev->parent->of_node, "backlight");
2234 + if (!bl) {
2235 + dev_dbg(dev, "backlight node not found\n");
2236 + return -ENODEV;
2237 +@@ -279,7 +279,7 @@ static int as3711_backlight_parse_dt(struct device *dev)
2238 + if (pdata->su1_max_uA <= 0)
2239 + ret = -EINVAL;
2240 + if (ret < 0)
2241 +- return ret;
2242 ++ goto err_put_bl;
2243 + }
2244 +
2245 + fb = of_parse_phandle(bl, "su2-dev", 0);
2246 +@@ -292,7 +292,7 @@ static int as3711_backlight_parse_dt(struct device *dev)
2247 + if (pdata->su2_max_uA <= 0)
2248 + ret = -EINVAL;
2249 + if (ret < 0)
2250 +- return ret;
2251 ++ goto err_put_bl;
2252 +
2253 + if (of_find_property(bl, "su2-feedback-voltage", NULL)) {
2254 + pdata->su2_feedback = AS3711_SU2_VOLTAGE;
2255 +@@ -314,8 +314,10 @@ static int as3711_backlight_parse_dt(struct device *dev)
2256 + pdata->su2_feedback = AS3711_SU2_CURR_AUTO;
2257 + count++;
2258 + }
2259 +- if (count != 1)
2260 +- return -EINVAL;
2261 ++ if (count != 1) {
2262 ++ ret = -EINVAL;
2263 ++ goto err_put_bl;
2264 ++ }
2265 +
2266 + count = 0;
2267 + if (of_find_property(bl, "su2-fbprot-lx-sd4", NULL)) {
2268 +@@ -334,8 +336,10 @@ static int as3711_backlight_parse_dt(struct device *dev)
2269 + pdata->su2_fbprot = AS3711_SU2_GPIO4;
2270 + count++;
2271 + }
2272 +- if (count != 1)
2273 +- return -EINVAL;
2274 ++ if (count != 1) {
2275 ++ ret = -EINVAL;
2276 ++ goto err_put_bl;
2277 ++ }
2278 +
2279 + count = 0;
2280 + if (of_find_property(bl, "su2-auto-curr1", NULL)) {
2281 +@@ -355,11 +359,20 @@ static int as3711_backlight_parse_dt(struct device *dev)
2282 + * At least one su2-auto-curr* must be specified iff
2283 + * AS3711_SU2_CURR_AUTO is used
2284 + */
2285 +- if (!count ^ (pdata->su2_feedback != AS3711_SU2_CURR_AUTO))
2286 +- return -EINVAL;
2287 ++ if (!count ^ (pdata->su2_feedback != AS3711_SU2_CURR_AUTO)) {
2288 ++ ret = -EINVAL;
2289 ++ goto err_put_bl;
2290 ++ }
2291 + }
2292 +
2293 ++ of_node_put(bl);
2294 ++
2295 + return 0;
2296 ++
2297 ++err_put_bl:
2298 ++ of_node_put(bl);
2299 ++
2300 ++ return ret;
2301 + }
2302 +
2303 + static int as3711_backlight_probe(struct platform_device *pdev)
2304 +diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
2305 +index 7b738d60ecc2..f3aa6088f1d9 100644
2306 +--- a/drivers/video/backlight/max8925_bl.c
2307 ++++ b/drivers/video/backlight/max8925_bl.c
2308 +@@ -116,7 +116,7 @@ static void max8925_backlight_dt_init(struct platform_device *pdev)
2309 + if (!pdata)
2310 + return;
2311 +
2312 +- np = of_find_node_by_name(nproot, "backlight");
2313 ++ np = of_get_child_by_name(nproot, "backlight");
2314 + if (!np) {
2315 + dev_err(&pdev->dev, "failed to find backlight node\n");
2316 + return;
2317 +@@ -125,6 +125,8 @@ static void max8925_backlight_dt_init(struct platform_device *pdev)
2318 + if (!of_property_read_u32(np, "maxim,max8925-dual-string", &val))
2319 + pdata->dual_string = val;
2320 +
2321 ++ of_node_put(np);
2322 ++
2323 + pdev->dev.platform_data = pdata;
2324 + }
2325 +
2326 +diff --git a/drivers/video/backlight/tps65217_bl.c b/drivers/video/backlight/tps65217_bl.c
2327 +index fd524ad860a5..f45d0c9467db 100644
2328 +--- a/drivers/video/backlight/tps65217_bl.c
2329 ++++ b/drivers/video/backlight/tps65217_bl.c
2330 +@@ -184,11 +184,11 @@ static struct tps65217_bl_pdata *
2331 + tps65217_bl_parse_dt(struct platform_device *pdev)
2332 + {
2333 + struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent);
2334 +- struct device_node *node = of_node_get(tps->dev->of_node);
2335 ++ struct device_node *node;
2336 + struct tps65217_bl_pdata *pdata, *err;
2337 + u32 val;
2338 +
2339 +- node = of_find_node_by_name(node, "backlight");
2340 ++ node = of_get_child_by_name(tps->dev->of_node, "backlight");
2341 + if (!node)
2342 + return ERR_PTR(-ENODEV);
2343 +
2344 +diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
2345 +index 98af9e02959b..9fe0d0bcdf62 100644
2346 +--- a/drivers/video/fbdev/uvesafb.c
2347 ++++ b/drivers/video/fbdev/uvesafb.c
2348 +@@ -1059,7 +1059,8 @@ static int uvesafb_setcmap(struct fb_cmap *cmap, struct fb_info *info)
2349 + info->cmap.len || cmap->start < info->cmap.start)
2350 + return -EINVAL;
2351 +
2352 +- entries = kmalloc(sizeof(*entries) * cmap->len, GFP_KERNEL);
2353 ++ entries = kmalloc_array(cmap->len, sizeof(*entries),
2354 ++ GFP_KERNEL);
2355 + if (!entries)
2356 + return -ENOMEM;
2357 +
2358 +diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
2359 +index ab0931e7a9bb..aa458f2fced1 100644
2360 +--- a/drivers/w1/w1.c
2361 ++++ b/drivers/w1/w1.c
2362 +@@ -741,7 +741,7 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
2363 +
2364 + /* slave modules need to be loaded in a context with unlocked mutex */
2365 + mutex_unlock(&dev->mutex);
2366 +- request_module("w1-family-0x%02x", rn->family);
2367 ++ request_module("w1-family-0x%02X", rn->family);
2368 + mutex_lock(&dev->mutex);
2369 +
2370 + spin_lock(&w1_flock);
2371 +diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
2372 +index 6d3b32ccc2c4..1435d8c58ea0 100644
2373 +--- a/drivers/xen/events/events_base.c
2374 ++++ b/drivers/xen/events/events_base.c
2375 +@@ -637,8 +637,6 @@ static void __unbind_from_irq(unsigned int irq)
2376 + xen_irq_info_cleanup(info);
2377 + }
2378 +
2379 +- BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
2380 +-
2381 + xen_free_irq(irq);
2382 + }
2383 +
2384 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
2385 +index f073de65e818..bd036557c6bc 100644
2386 +--- a/fs/btrfs/inode.c
2387 ++++ b/fs/btrfs/inode.c
2388 +@@ -1230,6 +1230,8 @@ static noinline int csum_exist_in_range(struct btrfs_root *root,
2389 + list_del(&sums->list);
2390 + kfree(sums);
2391 + }
2392 ++ if (ret < 0)
2393 ++ return ret;
2394 + return 1;
2395 + }
2396 +
2397 +@@ -1381,10 +1383,23 @@ static noinline int run_delalloc_nocow(struct inode *inode,
2398 + goto out_check;
2399 + if (btrfs_extent_readonly(root, disk_bytenr))
2400 + goto out_check;
2401 +- if (btrfs_cross_ref_exist(trans, root, ino,
2402 ++ ret = btrfs_cross_ref_exist(trans, root, ino,
2403 + found_key.offset -
2404 +- extent_offset, disk_bytenr))
2405 ++ extent_offset, disk_bytenr);
2406 ++ if (ret) {
2407 ++ /*
2408 ++ * ret could be -EIO if the above fails to read
2409 ++ * metadata.
2410 ++ */
2411 ++ if (ret < 0) {
2412 ++ if (cow_start != (u64)-1)
2413 ++ cur_offset = cow_start;
2414 ++ goto error;
2415 ++ }
2416 ++
2417 ++ WARN_ON_ONCE(nolock);
2418 + goto out_check;
2419 ++ }
2420 + disk_bytenr += extent_offset;
2421 + disk_bytenr += cur_offset - found_key.offset;
2422 + num_bytes = min(end + 1, extent_end) - cur_offset;
2423 +@@ -1402,8 +1417,20 @@ static noinline int run_delalloc_nocow(struct inode *inode,
2424 + * this ensure that csum for a given extent are
2425 + * either valid or do not exist.
2426 + */
2427 +- if (csum_exist_in_range(root, disk_bytenr, num_bytes))
2428 ++ ret = csum_exist_in_range(root, disk_bytenr, num_bytes);
2429 ++ if (ret) {
2430 ++ /*
2431 ++ * ret could be -EIO if the above fails to read
2432 ++ * metadata.
2433 ++ */
2434 ++ if (ret < 0) {
2435 ++ if (cow_start != (u64)-1)
2436 ++ cur_offset = cow_start;
2437 ++ goto error;
2438 ++ }
2439 ++ WARN_ON_ONCE(nolock);
2440 + goto out_check;
2441 ++ }
2442 + if (!btrfs_inc_nocow_writers(root->fs_info,
2443 + disk_bytenr))
2444 + goto out_check;
2445 +@@ -9561,6 +9588,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
2446 + u64 new_idx = 0;
2447 + u64 root_objectid;
2448 + int ret;
2449 ++ int ret2;
2450 + bool root_log_pinned = false;
2451 + bool dest_log_pinned = false;
2452 +
2453 +@@ -9751,7 +9779,8 @@ static int btrfs_rename_exchange(struct inode *old_dir,
2454 + dest_log_pinned = false;
2455 + }
2456 + }
2457 +- ret = btrfs_end_transaction(trans, root);
2458 ++ ret2 = btrfs_end_transaction(trans, root);
2459 ++ ret = ret ? ret : ret2;
2460 + out_notrans:
2461 + if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
2462 + up_read(&dest->fs_info->subvol_sem);
2463 +diff --git a/fs/fuse/control.c b/fs/fuse/control.c
2464 +index 6e22748b0704..e25c40c10f4f 100644
2465 +--- a/fs/fuse/control.c
2466 ++++ b/fs/fuse/control.c
2467 +@@ -211,10 +211,11 @@ static struct dentry *fuse_ctl_add_dentry(struct dentry *parent,
2468 + if (!dentry)
2469 + return NULL;
2470 +
2471 +- fc->ctl_dentry[fc->ctl_ndents++] = dentry;
2472 + inode = new_inode(fuse_control_sb);
2473 +- if (!inode)
2474 ++ if (!inode) {
2475 ++ dput(dentry);
2476 + return NULL;
2477 ++ }
2478 +
2479 + inode->i_ino = get_next_ino();
2480 + inode->i_mode = mode;
2481 +@@ -228,6 +229,9 @@ static struct dentry *fuse_ctl_add_dentry(struct dentry *parent,
2482 + set_nlink(inode, nlink);
2483 + inode->i_private = fc;
2484 + d_add(dentry, inode);
2485 ++
2486 ++ fc->ctl_dentry[fc->ctl_ndents++] = dentry;
2487 ++
2488 + return dentry;
2489 + }
2490 +
2491 +@@ -284,7 +288,10 @@ void fuse_ctl_remove_conn(struct fuse_conn *fc)
2492 + for (i = fc->ctl_ndents - 1; i >= 0; i--) {
2493 + struct dentry *dentry = fc->ctl_dentry[i];
2494 + d_inode(dentry)->i_private = NULL;
2495 +- d_drop(dentry);
2496 ++ if (!i) {
2497 ++ /* Get rid of submounts: */
2498 ++ d_invalidate(dentry);
2499 ++ }
2500 + dput(dentry);
2501 + }
2502 + drop_nlink(d_inode(fuse_control_sb->s_root));
2503 +diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
2504 +index 4bbad745415a..cca8dd3bda09 100644
2505 +--- a/fs/fuse/dir.c
2506 ++++ b/fs/fuse/dir.c
2507 +@@ -1633,8 +1633,19 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
2508 + return err;
2509 +
2510 + if (attr->ia_valid & ATTR_OPEN) {
2511 +- if (fc->atomic_o_trunc)
2512 ++ /* This is coming from open(..., ... | O_TRUNC); */
2513 ++ WARN_ON(!(attr->ia_valid & ATTR_SIZE));
2514 ++ WARN_ON(attr->ia_size != 0);
2515 ++ if (fc->atomic_o_trunc) {
2516 ++ /*
2517 ++ * No need to send request to userspace, since actual
2518 ++ * truncation has already been done by OPEN. But still
2519 ++ * need to truncate page cache.
2520 ++ */
2521 ++ i_size_write(inode, 0);
2522 ++ truncate_pagecache(inode, 0);
2523 + return 0;
2524 ++ }
2525 + file = NULL;
2526 + }
2527 +
2528 +diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
2529 +index 6fe6a88ecb4a..f95e1d49b048 100644
2530 +--- a/fs/fuse/inode.c
2531 ++++ b/fs/fuse/inode.c
2532 +@@ -1184,6 +1184,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
2533 + err_put_conn:
2534 + fuse_bdi_destroy(fc);
2535 + fuse_conn_put(fc);
2536 ++ sb->s_fs_info = NULL;
2537 + err_fput:
2538 + fput(file);
2539 + err:
2540 +diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
2541 +index e9aa235e9d10..2e7ebd9d7168 100644
2542 +--- a/fs/nfs/callback_proc.c
2543 ++++ b/fs/nfs/callback_proc.c
2544 +@@ -402,11 +402,8 @@ validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
2545 + return htonl(NFS4ERR_SEQ_FALSE_RETRY);
2546 + }
2547 +
2548 +- /* Wraparound */
2549 +- if (unlikely(slot->seq_nr == 0xFFFFFFFFU)) {
2550 +- if (args->csa_sequenceid == 1)
2551 +- return htonl(NFS4_OK);
2552 +- } else if (likely(args->csa_sequenceid == slot->seq_nr + 1))
2553 ++ /* Note: wraparound relies on seq_nr being of type u32 */
2554 ++ if (likely(args->csa_sequenceid == slot->seq_nr + 1))
2555 + return htonl(NFS4_OK);
2556 +
2557 + /* Misordered request */
2558 +diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c
2559 +index f1160cdd4682..3d4602d66068 100644
2560 +--- a/fs/nfs/nfs4idmap.c
2561 ++++ b/fs/nfs/nfs4idmap.c
2562 +@@ -343,7 +343,7 @@ static ssize_t nfs_idmap_lookup_name(__u32 id, const char *type, char *buf,
2563 + int id_len;
2564 + ssize_t ret;
2565 +
2566 +- id_len = snprintf(id_str, sizeof(id_str), "%u", id);
2567 ++ id_len = nfs_map_numeric_to_string(id, id_str, sizeof(id_str));
2568 + ret = nfs_idmap_get_key(id_str, id_len, type, buf, buflen, idmap);
2569 + if (ret < 0)
2570 + return -EINVAL;
2571 +@@ -626,7 +626,8 @@ static int nfs_idmap_read_and_verify_message(struct idmap_msg *im,
2572 + if (strcmp(upcall->im_name, im->im_name) != 0)
2573 + break;
2574 + /* Note: here we store the NUL terminator too */
2575 +- len = sprintf(id_str, "%d", im->im_id) + 1;
2576 ++ len = 1 + nfs_map_numeric_to_string(im->im_id, id_str,
2577 ++ sizeof(id_str));
2578 + ret = nfs_idmap_instantiate(key, authkey, id_str, len);
2579 + break;
2580 + case IDMAP_CONV_IDTONAME:
2581 +diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
2582 +index 2c4f7a22e128..bdbd9e6d1ace 100644
2583 +--- a/fs/nfsd/nfs4xdr.c
2584 ++++ b/fs/nfsd/nfs4xdr.c
2585 +@@ -3638,7 +3638,8 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
2586 + nfserr = nfserr_resource;
2587 + goto err_no_verf;
2588 + }
2589 +- maxcount = min_t(u32, readdir->rd_maxcount, INT_MAX);
2590 ++ maxcount = svc_max_payload(resp->rqstp);
2591 ++ maxcount = min_t(u32, readdir->rd_maxcount, maxcount);
2592 + /*
2593 + * Note the rfc defines rd_maxcount as the size of the
2594 + * READDIR4resok structure, which includes the verifier above
2595 +@@ -3652,7 +3653,7 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
2596 +
2597 + /* RFC 3530 14.2.24 allows us to ignore dircount when it's 0: */
2598 + if (!readdir->rd_dircount)
2599 +- readdir->rd_dircount = INT_MAX;
2600 ++ readdir->rd_dircount = svc_max_payload(resp->rqstp);
2601 +
2602 + readdir->xdr = xdr;
2603 + readdir->rd_maxcount = maxcount;
2604 +diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
2605 +index 7d764e3b6c79..504658fd0d08 100644
2606 +--- a/fs/ubifs/journal.c
2607 ++++ b/fs/ubifs/journal.c
2608 +@@ -1265,7 +1265,7 @@ static int recomp_data_node(const struct ubifs_info *c,
2609 + int err, len, compr_type, out_len;
2610 +
2611 + out_len = le32_to_cpu(dn->size);
2612 +- buf = kmalloc(out_len * WORST_COMPR_FACTOR, GFP_NOFS);
2613 ++ buf = kmalloc_array(out_len, WORST_COMPR_FACTOR, GFP_NOFS);
2614 + if (!buf)
2615 + return -ENOMEM;
2616 +
2617 +diff --git a/fs/udf/directory.c b/fs/udf/directory.c
2618 +index 988d5352bdb8..48ef184929ec 100644
2619 +--- a/fs/udf/directory.c
2620 ++++ b/fs/udf/directory.c
2621 +@@ -150,6 +150,9 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
2622 + sizeof(struct fileIdentDesc));
2623 + }
2624 + }
2625 ++ /* Got last entry outside of dir size - fs is corrupted! */
2626 ++ if (*nf_pos > dir->i_size)
2627 ++ return NULL;
2628 + return fi;
2629 + }
2630 +
2631 +diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
2632 +index f6a816129856..bd738aafd432 100644
2633 +--- a/include/linux/blkdev.h
2634 ++++ b/include/linux/blkdev.h
2635 +@@ -901,8 +901,8 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q,
2636 + if (!q->limits.chunk_sectors)
2637 + return q->limits.max_sectors;
2638 +
2639 +- return q->limits.chunk_sectors -
2640 +- (offset & (q->limits.chunk_sectors - 1));
2641 ++ return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors -
2642 ++ (offset & (q->limits.chunk_sectors - 1))));
2643 + }
2644 +
2645 + static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
2646 +diff --git a/include/linux/compiler.h b/include/linux/compiler.h
2647 +index 5ce911db7d88..4f3dfabb680f 100644
2648 +--- a/include/linux/compiler.h
2649 ++++ b/include/linux/compiler.h
2650 +@@ -113,7 +113,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
2651 + #define unlikely_notrace(x) __builtin_expect(!!(x), 0)
2652 +
2653 + #define __branch_check__(x, expect) ({ \
2654 +- int ______r; \
2655 ++ long ______r; \
2656 + static struct ftrace_branch_data \
2657 + __attribute__((__aligned__(4))) \
2658 + __attribute__((section("_ftrace_annotated_branch"))) \
2659 +diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h
2660 +index 70a5164f4728..821965c90070 100644
2661 +--- a/include/linux/iio/buffer.h
2662 ++++ b/include/linux/iio/buffer.h
2663 +@@ -61,7 +61,7 @@ struct iio_buffer_access_funcs {
2664 + int (*request_update)(struct iio_buffer *buffer);
2665 +
2666 + int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd);
2667 +- int (*set_length)(struct iio_buffer *buffer, int length);
2668 ++ int (*set_length)(struct iio_buffer *buffer, unsigned int length);
2669 +
2670 + int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
2671 + int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
2672 +@@ -96,8 +96,8 @@ struct iio_buffer_access_funcs {
2673 + * @watermark: [INTERN] number of datums to wait for poll/read.
2674 + */
2675 + struct iio_buffer {
2676 +- int length;
2677 +- int bytes_per_datum;
2678 ++ unsigned int length;
2679 ++ size_t bytes_per_datum;
2680 + struct attribute_group *scan_el_attrs;
2681 + long *scan_mask;
2682 + bool scan_timestamp;
2683 +diff --git a/kernel/printk/nmi.c b/kernel/printk/nmi.c
2684 +index 16bab471c7e2..5fa65aa904d3 100644
2685 +--- a/kernel/printk/nmi.c
2686 ++++ b/kernel/printk/nmi.c
2687 +@@ -63,6 +63,7 @@ static int vprintk_nmi(const char *fmt, va_list args)
2688 + struct nmi_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
2689 + int add = 0;
2690 + size_t len;
2691 ++ va_list ap;
2692 +
2693 + again:
2694 + len = atomic_read(&s->len);
2695 +@@ -79,7 +80,9 @@ static int vprintk_nmi(const char *fmt, va_list args)
2696 + if (!len)
2697 + smp_rmb();
2698 +
2699 +- add = vsnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, args);
2700 ++ va_copy(ap, args);
2701 ++ add = vsnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, ap);
2702 ++ va_end(ap);
2703 +
2704 + /*
2705 + * Do it once again if the buffer has been flushed in the meantime.
2706 +diff --git a/kernel/time/time.c b/kernel/time/time.c
2707 +index bd62fb8e8e77..39468651a064 100644
2708 +--- a/kernel/time/time.c
2709 ++++ b/kernel/time/time.c
2710 +@@ -28,6 +28,7 @@
2711 + */
2712 +
2713 + #include <linux/export.h>
2714 ++#include <linux/kernel.h>
2715 + #include <linux/timex.h>
2716 + #include <linux/capability.h>
2717 + #include <linux/timekeeper_internal.h>
2718 +@@ -258,9 +259,10 @@ unsigned int jiffies_to_msecs(const unsigned long j)
2719 + return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
2720 + #else
2721 + # if BITS_PER_LONG == 32
2722 +- return (HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32;
2723 ++ return (HZ_TO_MSEC_MUL32 * j + (1ULL << HZ_TO_MSEC_SHR32) - 1) >>
2724 ++ HZ_TO_MSEC_SHR32;
2725 + # else
2726 +- return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN;
2727 ++ return DIV_ROUND_UP(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN);
2728 + # endif
2729 + #endif
2730 + }
2731 +diff --git a/lib/vsprintf.c b/lib/vsprintf.c
2732 +index 0967771d8f7f..79ba3cc07026 100644
2733 +--- a/lib/vsprintf.c
2734 ++++ b/lib/vsprintf.c
2735 +@@ -1391,9 +1391,6 @@ char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec,
2736 + return string(buf, end, NULL, spec);
2737 +
2738 + switch (fmt[1]) {
2739 +- case 'r':
2740 +- return number(buf, end, clk_get_rate(clk), spec);
2741 +-
2742 + case 'n':
2743 + default:
2744 + #ifdef CONFIG_COMMON_CLK
2745 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2746 +index 183436e4a8c1..f03a1430a3cb 100644
2747 +--- a/sound/pci/hda/patch_realtek.c
2748 ++++ b/sound/pci/hda/patch_realtek.c
2749 +@@ -2448,6 +2448,7 @@ static const struct snd_pci_quirk alc262_fixup_tbl[] = {
2750 + SND_PCI_QUIRK(0x10cf, 0x1397, "Fujitsu Lifebook S7110", ALC262_FIXUP_FSC_S7110),
2751 + SND_PCI_QUIRK(0x10cf, 0x142d, "Fujitsu Lifebook E8410", ALC262_FIXUP_BENQ),
2752 + SND_PCI_QUIRK(0x10f1, 0x2915, "Tyan Thunder n6650W", ALC262_FIXUP_TYAN),
2753 ++ SND_PCI_QUIRK(0x1734, 0x1141, "FSC ESPRIMO U9210", ALC262_FIXUP_FSC_H270),
2754 + SND_PCI_QUIRK(0x1734, 0x1147, "FSC Celsius H270", ALC262_FIXUP_FSC_H270),
2755 + SND_PCI_QUIRK(0x17aa, 0x384e, "Lenovo 3000", ALC262_FIXUP_LENOVO_3000),
2756 + SND_PCI_QUIRK(0x17ff, 0x0560, "Benq ED8", ALC262_FIXUP_BENQ),
2757 +@@ -4473,7 +4474,6 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
2758 + struct alc_spec *spec = codec->spec;
2759 +
2760 + if (action == HDA_FIXUP_ACT_PRE_PROBE) {
2761 +- spec->shutup = alc_no_shutup; /* reduce click noise */
2762 + spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */
2763 + spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
2764 + codec->power_save_node = 0; /* avoid click noises */
2765 +@@ -4835,6 +4835,13 @@ static void alc280_fixup_hp_9480m(struct hda_codec *codec,
2766 + /* for hda_fixup_thinkpad_acpi() */
2767 + #include "thinkpad_helper.c"
2768 +
2769 ++static void alc_fixup_thinkpad_acpi(struct hda_codec *codec,
2770 ++ const struct hda_fixup *fix, int action)
2771 ++{
2772 ++ alc_fixup_no_shutup(codec, fix, action); /* reduce click noise */
2773 ++ hda_fixup_thinkpad_acpi(codec, fix, action);
2774 ++}
2775 ++
2776 + /* for dell wmi mic mute led */
2777 + #include "dell_wmi_helper.c"
2778 +
2779 +@@ -5350,7 +5357,7 @@ static const struct hda_fixup alc269_fixups[] = {
2780 + },
2781 + [ALC269_FIXUP_THINKPAD_ACPI] = {
2782 + .type = HDA_FIXUP_FUNC,
2783 +- .v.func = hda_fixup_thinkpad_acpi,
2784 ++ .v.func = alc_fixup_thinkpad_acpi,
2785 + .chained = true,
2786 + .chain_id = ALC269_FIXUP_SKU_IGNORE,
2787 + },
2788 +diff --git a/sound/soc/cirrus/edb93xx.c b/sound/soc/cirrus/edb93xx.c
2789 +index 85962657aabe..517963ef4847 100644
2790 +--- a/sound/soc/cirrus/edb93xx.c
2791 ++++ b/sound/soc/cirrus/edb93xx.c
2792 +@@ -67,7 +67,7 @@ static struct snd_soc_dai_link edb93xx_dai = {
2793 + .cpu_dai_name = "ep93xx-i2s",
2794 + .codec_name = "spi0.0",
2795 + .codec_dai_name = "cs4271-hifi",
2796 +- .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_IF |
2797 ++ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
2798 + SND_SOC_DAIFMT_CBS_CFS,
2799 + .ops = &edb93xx_ops,
2800 + };
2801 +diff --git a/sound/soc/cirrus/ep93xx-i2s.c b/sound/soc/cirrus/ep93xx-i2s.c
2802 +index 934f8aefdd90..0dc3852c4621 100644
2803 +--- a/sound/soc/cirrus/ep93xx-i2s.c
2804 ++++ b/sound/soc/cirrus/ep93xx-i2s.c
2805 +@@ -51,7 +51,9 @@
2806 + #define EP93XX_I2S_WRDLEN_24 (1 << 0)
2807 + #define EP93XX_I2S_WRDLEN_32 (2 << 0)
2808 +
2809 +-#define EP93XX_I2S_LINCTRLDATA_R_JUST (1 << 2) /* Right justify */
2810 ++#define EP93XX_I2S_RXLINCTRLDATA_R_JUST BIT(1) /* Right justify */
2811 ++
2812 ++#define EP93XX_I2S_TXLINCTRLDATA_R_JUST BIT(2) /* Right justify */
2813 +
2814 + #define EP93XX_I2S_CLKCFG_LRS (1 << 0) /* lrclk polarity */
2815 + #define EP93XX_I2S_CLKCFG_CKP (1 << 1) /* Bit clock polarity */
2816 +@@ -170,25 +172,25 @@ static int ep93xx_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
2817 + unsigned int fmt)
2818 + {
2819 + struct ep93xx_i2s_info *info = snd_soc_dai_get_drvdata(cpu_dai);
2820 +- unsigned int clk_cfg, lin_ctrl;
2821 ++ unsigned int clk_cfg;
2822 ++ unsigned int txlin_ctrl = 0;
2823 ++ unsigned int rxlin_ctrl = 0;
2824 +
2825 + clk_cfg = ep93xx_i2s_read_reg(info, EP93XX_I2S_RXCLKCFG);
2826 +- lin_ctrl = ep93xx_i2s_read_reg(info, EP93XX_I2S_RXLINCTRLDATA);
2827 +
2828 + switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
2829 + case SND_SOC_DAIFMT_I2S:
2830 + clk_cfg |= EP93XX_I2S_CLKCFG_REL;
2831 +- lin_ctrl &= ~EP93XX_I2S_LINCTRLDATA_R_JUST;
2832 + break;
2833 +
2834 + case SND_SOC_DAIFMT_LEFT_J:
2835 + clk_cfg &= ~EP93XX_I2S_CLKCFG_REL;
2836 +- lin_ctrl &= ~EP93XX_I2S_LINCTRLDATA_R_JUST;
2837 + break;
2838 +
2839 + case SND_SOC_DAIFMT_RIGHT_J:
2840 + clk_cfg &= ~EP93XX_I2S_CLKCFG_REL;
2841 +- lin_ctrl |= EP93XX_I2S_LINCTRLDATA_R_JUST;
2842 ++ rxlin_ctrl |= EP93XX_I2S_RXLINCTRLDATA_R_JUST;
2843 ++ txlin_ctrl |= EP93XX_I2S_TXLINCTRLDATA_R_JUST;
2844 + break;
2845 +
2846 + default:
2847 +@@ -213,32 +215,32 @@ static int ep93xx_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
2848 + switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
2849 + case SND_SOC_DAIFMT_NB_NF:
2850 + /* Negative bit clock, lrclk low on left word */
2851 +- clk_cfg &= ~(EP93XX_I2S_CLKCFG_CKP | EP93XX_I2S_CLKCFG_REL);
2852 ++ clk_cfg &= ~(EP93XX_I2S_CLKCFG_CKP | EP93XX_I2S_CLKCFG_LRS);
2853 + break;
2854 +
2855 + case SND_SOC_DAIFMT_NB_IF:
2856 + /* Negative bit clock, lrclk low on right word */
2857 + clk_cfg &= ~EP93XX_I2S_CLKCFG_CKP;
2858 +- clk_cfg |= EP93XX_I2S_CLKCFG_REL;
2859 ++ clk_cfg |= EP93XX_I2S_CLKCFG_LRS;
2860 + break;
2861 +
2862 + case SND_SOC_DAIFMT_IB_NF:
2863 + /* Positive bit clock, lrclk low on left word */
2864 + clk_cfg |= EP93XX_I2S_CLKCFG_CKP;
2865 +- clk_cfg &= ~EP93XX_I2S_CLKCFG_REL;
2866 ++ clk_cfg &= ~EP93XX_I2S_CLKCFG_LRS;
2867 + break;
2868 +
2869 + case SND_SOC_DAIFMT_IB_IF:
2870 + /* Positive bit clock, lrclk low on right word */
2871 +- clk_cfg |= EP93XX_I2S_CLKCFG_CKP | EP93XX_I2S_CLKCFG_REL;
2872 ++ clk_cfg |= EP93XX_I2S_CLKCFG_CKP | EP93XX_I2S_CLKCFG_LRS;
2873 + break;
2874 + }
2875 +
2876 + /* Write new register values */
2877 + ep93xx_i2s_write_reg(info, EP93XX_I2S_RXCLKCFG, clk_cfg);
2878 + ep93xx_i2s_write_reg(info, EP93XX_I2S_TXCLKCFG, clk_cfg);
2879 +- ep93xx_i2s_write_reg(info, EP93XX_I2S_RXLINCTRLDATA, lin_ctrl);
2880 +- ep93xx_i2s_write_reg(info, EP93XX_I2S_TXLINCTRLDATA, lin_ctrl);
2881 ++ ep93xx_i2s_write_reg(info, EP93XX_I2S_RXLINCTRLDATA, rxlin_ctrl);
2882 ++ ep93xx_i2s_write_reg(info, EP93XX_I2S_TXLINCTRLDATA, txlin_ctrl);
2883 + return 0;
2884 + }
2885 +
2886 +diff --git a/sound/soc/cirrus/snappercl15.c b/sound/soc/cirrus/snappercl15.c
2887 +index 98089df08df6..c6737a573bc0 100644
2888 +--- a/sound/soc/cirrus/snappercl15.c
2889 ++++ b/sound/soc/cirrus/snappercl15.c
2890 +@@ -72,7 +72,7 @@ static struct snd_soc_dai_link snappercl15_dai = {
2891 + .codec_dai_name = "tlv320aic23-hifi",
2892 + .codec_name = "tlv320aic23-codec.0-001a",
2893 + .platform_name = "ep93xx-i2s",
2894 +- .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_IF |
2895 ++ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
2896 + SND_SOC_DAIFMT_CBS_CFS,
2897 + .ops = &snappercl15_ops,
2898 + };
2899 +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
2900 +index 6780eba55ec2..0b5d132bc3dd 100644
2901 +--- a/sound/soc/soc-dapm.c
2902 ++++ b/sound/soc/soc-dapm.c
2903 +@@ -425,6 +425,8 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
2904 + static void dapm_kcontrol_free(struct snd_kcontrol *kctl)
2905 + {
2906 + struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl);
2907 ++
2908 ++ list_del(&data->paths);
2909 + kfree(data->wlist);
2910 + kfree(data);
2911 + }
2912 +diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
2913 +index d2c6cdd9d42b..8bec05365aae 100644
2914 +--- a/tools/perf/util/dso.c
2915 ++++ b/tools/perf/util/dso.c
2916 +@@ -253,6 +253,8 @@ int __kmod_path__parse(struct kmod_path *m, const char *path,
2917 + if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
2918 + (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
2919 + (strncmp(name, "[vdso]", 6) == 0) ||
2920 ++ (strncmp(name, "[vdso32]", 8) == 0) ||
2921 ++ (strncmp(name, "[vdsox32]", 9) == 0) ||
2922 + (strncmp(name, "[vsyscall]", 10) == 0)) {
2923 + m->kmod = false;
2924 +
2925 +diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
2926 +index cac39532c057..d27715ff9a5f 100644
2927 +--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
2928 ++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
2929 +@@ -112,6 +112,7 @@ struct intel_pt_decoder {
2930 + bool have_cyc;
2931 + bool fixup_last_mtc;
2932 + bool have_last_ip;
2933 ++ enum intel_pt_param_flags flags;
2934 + uint64_t pos;
2935 + uint64_t last_ip;
2936 + uint64_t ip;
2937 +@@ -215,6 +216,8 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
2938 + decoder->data = params->data;
2939 + decoder->return_compression = params->return_compression;
2940 +
2941 ++ decoder->flags = params->flags;
2942 ++
2943 + decoder->period = params->period;
2944 + decoder->period_type = params->period_type;
2945 +
2946 +@@ -1012,6 +1015,15 @@ static int intel_pt_walk_insn(struct intel_pt_decoder *decoder,
2947 + return err;
2948 + }
2949 +
2950 ++static inline bool intel_pt_fup_with_nlip(struct intel_pt_decoder *decoder,
2951 ++ struct intel_pt_insn *intel_pt_insn,
2952 ++ uint64_t ip, int err)
2953 ++{
2954 ++ return decoder->flags & INTEL_PT_FUP_WITH_NLIP && !err &&
2955 ++ intel_pt_insn->branch == INTEL_PT_BR_INDIRECT &&
2956 ++ ip == decoder->ip + intel_pt_insn->length;
2957 ++}
2958 ++
2959 + static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
2960 + {
2961 + struct intel_pt_insn intel_pt_insn;
2962 +@@ -1024,7 +1036,8 @@ static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
2963 + err = intel_pt_walk_insn(decoder, &intel_pt_insn, ip);
2964 + if (err == INTEL_PT_RETURN)
2965 + return 0;
2966 +- if (err == -EAGAIN) {
2967 ++ if (err == -EAGAIN ||
2968 ++ intel_pt_fup_with_nlip(decoder, &intel_pt_insn, ip, err)) {
2969 + if (decoder->set_fup_tx_flags) {
2970 + decoder->set_fup_tx_flags = false;
2971 + decoder->tx_flags = decoder->fup_tx_flags;
2972 +@@ -1034,7 +1047,7 @@ static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
2973 + decoder->state.flags = decoder->fup_tx_flags;
2974 + return 0;
2975 + }
2976 +- return err;
2977 ++ return -EAGAIN;
2978 + }
2979 + decoder->set_fup_tx_flags = false;
2980 + if (err)
2981 +@@ -1298,7 +1311,6 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder)
2982 + {
2983 + intel_pt_log("ERROR: Buffer overflow\n");
2984 + intel_pt_clear_tx_flags(decoder);
2985 +- decoder->have_tma = false;
2986 + decoder->cbr = 0;
2987 + decoder->timestamp_insn_cnt = 0;
2988 + decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
2989 +@@ -1517,7 +1529,6 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
2990 + case INTEL_PT_PSB:
2991 + case INTEL_PT_TSC:
2992 + case INTEL_PT_TMA:
2993 +- case INTEL_PT_CBR:
2994 + case INTEL_PT_MODE_TSX:
2995 + case INTEL_PT_BAD:
2996 + case INTEL_PT_PSBEND:
2997 +@@ -1526,6 +1537,10 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
2998 + decoder->pkt_step = 0;
2999 + return -ENOENT;
3000 +
3001 ++ case INTEL_PT_CBR:
3002 ++ intel_pt_calc_cbr(decoder);
3003 ++ break;
3004 ++
3005 + case INTEL_PT_OVF:
3006 + return intel_pt_overflow(decoder);
3007 +
3008 +diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
3009 +index 9ae4df1dcedc..2fe8f4c5aeb5 100644
3010 +--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
3011 ++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
3012 +@@ -53,6 +53,14 @@ enum {
3013 + INTEL_PT_ERR_MAX,
3014 + };
3015 +
3016 ++enum intel_pt_param_flags {
3017 ++ /*
3018 ++ * FUP packet can contain next linear instruction pointer instead of
3019 ++ * current linear instruction pointer.
3020 ++ */
3021 ++ INTEL_PT_FUP_WITH_NLIP = 1 << 0,
3022 ++};
3023 ++
3024 + struct intel_pt_state {
3025 + enum intel_pt_sample_type type;
3026 + int err;
3027 +@@ -92,6 +100,7 @@ struct intel_pt_params {
3028 + unsigned int mtc_period;
3029 + uint32_t tsc_ctc_ratio_n;
3030 + uint32_t tsc_ctc_ratio_d;
3031 ++ enum intel_pt_param_flags flags;
3032 + };
3033 +
3034 + struct intel_pt_decoder;
3035 +diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
3036 +index 7528ae4f7e28..e5c6caf913f3 100644
3037 +--- a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
3038 ++++ b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
3039 +@@ -281,7 +281,7 @@ static int intel_pt_get_cyc(unsigned int byte, const unsigned char *buf,
3040 + if (len < offs)
3041 + return INTEL_PT_NEED_MORE_BYTES;
3042 + byte = buf[offs++];
3043 +- payload |= (byte >> 1) << shift;
3044 ++ payload |= ((uint64_t)byte >> 1) << shift;
3045 + }
3046 +
3047 + packet->type = INTEL_PT_CYC;
3048 +diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
3049 +index b1161d725ce9..d40ab4cf8932 100644
3050 +--- a/tools/perf/util/intel-pt.c
3051 ++++ b/tools/perf/util/intel-pt.c
3052 +@@ -752,6 +752,7 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
3053 + unsigned int queue_nr)
3054 + {
3055 + struct intel_pt_params params = { .get_trace = 0, };
3056 ++ struct perf_env *env = pt->machine->env;
3057 + struct intel_pt_queue *ptq;
3058 +
3059 + ptq = zalloc(sizeof(struct intel_pt_queue));
3060 +@@ -832,6 +833,9 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
3061 + }
3062 + }
3063 +
3064 ++ if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18))
3065 ++ params.flags |= INTEL_PT_FUP_WITH_NLIP;
3066 ++
3067 + ptq->decoder = intel_pt_decoder_new(&params);
3068 + if (!ptq->decoder)
3069 + goto out_free;
3070 +@@ -1344,6 +1348,7 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
3071 +
3072 + if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
3073 + switch (ptq->switch_state) {
3074 ++ case INTEL_PT_SS_NOT_TRACING:
3075 + case INTEL_PT_SS_UNKNOWN:
3076 + case INTEL_PT_SS_EXPECTING_SWITCH_IP:
3077 + err = intel_pt_next_tid(pt, ptq);