Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Fri, 11 Aug 2017 17:41:59
Message-Id: 1502473304.9876d1f9f2910ed10d2b590547570755b6847dbc.mpagano@gentoo
1 commit: 9876d1f9f2910ed10d2b590547570755b6847dbc
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Aug 11 17:41:44 2017 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Aug 11 17:41:44 2017 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9876d1f9
7
8 Linux patch 4.9.42
9
10 0000_README | 4 +
11 1041_linux-4.9.42.patch | 3113 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 3117 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index eacc709..c5dce51 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -207,6 +207,10 @@ Patch: 1040_linux-4.9.41.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.9.41
21
22 +Patch: 1041_linux-4.9.42.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.9.42
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1041_linux-4.9.42.patch b/1041_linux-4.9.42.patch
31 new file mode 100644
32 index 0000000..7b92a09
33 --- /dev/null
34 +++ b/1041_linux-4.9.42.patch
35 @@ -0,0 +1,3113 @@
36 +diff --git a/Makefile b/Makefile
37 +index 82eb3d1ee801..34d4d9f8a4b2 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 9
43 +-SUBLEVEL = 41
44 ++SUBLEVEL = 42
45 + EXTRAVERSION =
46 + NAME = Roaring Lionus
47 +
48 +diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
49 +index 7037201c5e3a..f3baa896ce84 100644
50 +--- a/arch/arm/boot/dts/Makefile
51 ++++ b/arch/arm/boot/dts/Makefile
52 +@@ -820,6 +820,7 @@ dtb-$(CONFIG_MACH_SUN8I) += \
53 + sun8i-a83t-allwinner-h8homlet-v2.dtb \
54 + sun8i-a83t-cubietruck-plus.dtb \
55 + sun8i-h3-bananapi-m2-plus.dtb \
56 ++ sun8i-h3-nanopi-m1.dtb \
57 + sun8i-h3-nanopi-neo.dtb \
58 + sun8i-h3-orangepi-2.dtb \
59 + sun8i-h3-orangepi-lite.dtb \
60 +diff --git a/arch/arm/boot/dts/armada-388-gp.dts b/arch/arm/boot/dts/armada-388-gp.dts
61 +index 895fa6cfa15a..563901e0ec07 100644
62 +--- a/arch/arm/boot/dts/armada-388-gp.dts
63 ++++ b/arch/arm/boot/dts/armada-388-gp.dts
64 +@@ -75,7 +75,7 @@
65 + pinctrl-names = "default";
66 + pinctrl-0 = <&pca0_pins>;
67 + interrupt-parent = <&gpio0>;
68 +- interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
69 ++ interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
70 + gpio-controller;
71 + #gpio-cells = <2>;
72 + interrupt-controller;
73 +@@ -87,7 +87,7 @@
74 + compatible = "nxp,pca9555";
75 + pinctrl-names = "default";
76 + interrupt-parent = <&gpio0>;
77 +- interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
78 ++ interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
79 + gpio-controller;
80 + #gpio-cells = <2>;
81 + interrupt-controller;
82 +diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
83 +index 5ea4915f6d75..10d307408f23 100644
84 +--- a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
85 ++++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
86 +@@ -56,7 +56,7 @@
87 + };
88 +
89 + &pio {
90 +- mmc2_pins_nrst: mmc2@0 {
91 ++ mmc2_pins_nrst: mmc2-rst-pin {
92 + allwinner,pins = "PC16";
93 + allwinner,function = "gpio_out";
94 + allwinner,drive = <SUN4I_PINCTRL_10_MA>;
95 +diff --git a/arch/arm/boot/dts/tango4-vantage-1172.dts b/arch/arm/boot/dts/tango4-vantage-1172.dts
96 +index 4cab64cb581e..e3a51e3538b7 100644
97 +--- a/arch/arm/boot/dts/tango4-vantage-1172.dts
98 ++++ b/arch/arm/boot/dts/tango4-vantage-1172.dts
99 +@@ -21,7 +21,7 @@
100 + };
101 +
102 + &eth0 {
103 +- phy-connection-type = "rgmii";
104 ++ phy-connection-type = "rgmii-id";
105 + phy-handle = <&eth0_phy>;
106 + #address-cells = <1>;
107 + #size-cells = <0>;
108 +diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
109 +index bfe2a2f5a644..22b73112b75f 100644
110 +--- a/arch/arm/include/asm/ftrace.h
111 ++++ b/arch/arm/include/asm/ftrace.h
112 +@@ -54,6 +54,24 @@ static inline void *return_address(unsigned int level)
113 +
114 + #define ftrace_return_address(n) return_address(n)
115 +
116 ++#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
117 ++
118 ++static inline bool arch_syscall_match_sym_name(const char *sym,
119 ++ const char *name)
120 ++{
121 ++ if (!strcmp(sym, "sys_mmap2"))
122 ++ sym = "sys_mmap_pgoff";
123 ++ else if (!strcmp(sym, "sys_statfs64_wrapper"))
124 ++ sym = "sys_statfs64";
125 ++ else if (!strcmp(sym, "sys_fstatfs64_wrapper"))
126 ++ sym = "sys_fstatfs64";
127 ++ else if (!strcmp(sym, "sys_arm_fadvise64_64"))
128 ++ sym = "sys_fadvise64_64";
129 ++
130 ++ /* Ignore case since sym may start with "SyS" instead of "sys" */
131 ++ return !strcasecmp(sym, name);
132 ++}
133 ++
134 + #endif /* ifndef __ASSEMBLY__ */
135 +
136 + #endif /* _ASM_ARM_FTRACE */
137 +diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
138 +index c721ea2fdbd8..df757c9675e6 100644
139 +--- a/arch/parisc/kernel/cache.c
140 ++++ b/arch/parisc/kernel/cache.c
141 +@@ -604,13 +604,12 @@ void flush_cache_range(struct vm_area_struct *vma,
142 + if (parisc_requires_coherency())
143 + flush_tlb_range(vma, start, end);
144 +
145 +- if ((end - start) >= parisc_cache_flush_threshold) {
146 ++ if ((end - start) >= parisc_cache_flush_threshold
147 ++ || vma->vm_mm->context != mfsp(3)) {
148 + flush_cache_all();
149 + return;
150 + }
151 +
152 +- BUG_ON(vma->vm_mm->context != mfsp(3));
153 +-
154 + flush_user_dcache_range_asm(start, end);
155 + if (vma->vm_flags & VM_EXEC)
156 + flush_user_icache_range_asm(start, end);
157 +diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
158 +index 3c05c311e35e..028a22bfa90c 100644
159 +--- a/arch/powerpc/kernel/irq.c
160 ++++ b/arch/powerpc/kernel/irq.c
161 +@@ -146,6 +146,19 @@ notrace unsigned int __check_irq_replay(void)
162 +
163 + /* Clear bit 0 which we wouldn't clear otherwise */
164 + local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
165 ++ if (happened & PACA_IRQ_HARD_DIS) {
166 ++ /*
167 ++ * We may have missed a decrementer interrupt if hard disabled.
168 ++ * Check the decrementer register in case we had a rollover
169 ++ * while hard disabled.
170 ++ */
171 ++ if (!(happened & PACA_IRQ_DEC)) {
172 ++ if (decrementer_check_overflow()) {
173 ++ local_paca->irq_happened |= PACA_IRQ_DEC;
174 ++ happened |= PACA_IRQ_DEC;
175 ++ }
176 ++ }
177 ++ }
178 +
179 + /*
180 + * Force the delivery of pending soft-disabled interrupts on PS3.
181 +@@ -171,7 +184,7 @@ notrace unsigned int __check_irq_replay(void)
182 + * in case we also had a rollover while hard disabled
183 + */
184 + local_paca->irq_happened &= ~PACA_IRQ_DEC;
185 +- if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow())
186 ++ if (happened & PACA_IRQ_DEC)
187 + return 0x900;
188 +
189 + /* Finally check if an external interrupt happened */
190 +diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
191 +index 5c8f12fe9721..dcbb9144c16d 100644
192 +--- a/arch/powerpc/kernel/ptrace.c
193 ++++ b/arch/powerpc/kernel/ptrace.c
194 +@@ -127,12 +127,19 @@ static void flush_tmregs_to_thread(struct task_struct *tsk)
195 + * If task is not current, it will have been flushed already to
196 + * it's thread_struct during __switch_to().
197 + *
198 +- * A reclaim flushes ALL the state.
199 ++ * A reclaim flushes ALL the state or if not in TM save TM SPRs
200 ++ * in the appropriate thread structures from live.
201 + */
202 +
203 +- if (tsk == current && MSR_TM_SUSPENDED(mfmsr()))
204 +- tm_reclaim_current(TM_CAUSE_SIGNAL);
205 ++ if (tsk != current)
206 ++ return;
207 +
208 ++ if (MSR_TM_SUSPENDED(mfmsr())) {
209 ++ tm_reclaim_current(TM_CAUSE_SIGNAL);
210 ++ } else {
211 ++ tm_enable();
212 ++ tm_save_sprs(&(tsk->thread));
213 ++ }
214 + }
215 + #else
216 + static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
217 +diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h
218 +index ec9c04de3664..ff05992dae7a 100644
219 +--- a/arch/sparc/include/asm/trap_block.h
220 ++++ b/arch/sparc/include/asm/trap_block.h
221 +@@ -54,6 +54,7 @@ extern struct trap_per_cpu trap_block[NR_CPUS];
222 + void init_cur_cpu_trap(struct thread_info *);
223 + void setup_tba(void);
224 + extern int ncpus_probed;
225 ++extern u64 cpu_mondo_counter[NR_CPUS];
226 +
227 + unsigned long real_hard_smp_processor_id(void);
228 +
229 +diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
230 +index d5807d24b98f..2deb89ef1d5f 100644
231 +--- a/arch/sparc/kernel/smp_64.c
232 ++++ b/arch/sparc/kernel/smp_64.c
233 +@@ -621,22 +621,48 @@ static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt)
234 + }
235 + }
236 +
237 +-/* Multi-cpu list version. */
238 ++#define CPU_MONDO_COUNTER(cpuid) (cpu_mondo_counter[cpuid])
239 ++#define MONDO_USEC_WAIT_MIN 2
240 ++#define MONDO_USEC_WAIT_MAX 100
241 ++#define MONDO_RETRY_LIMIT 500000
242 ++
243 ++/* Multi-cpu list version.
244 ++ *
245 ++ * Deliver xcalls to 'cnt' number of cpus in 'cpu_list'.
246 ++ * Sometimes not all cpus receive the mondo, requiring us to re-send
247 ++ * the mondo until all cpus have received, or cpus are truly stuck
248 ++ * unable to receive mondo, and we timeout.
249 ++ * Occasionally a target cpu strand is borrowed briefly by hypervisor to
250 ++ * perform guest service, such as PCIe error handling. Consider the
251 ++ * service time, 1 second overall wait is reasonable for 1 cpu.
252 ++ * Here two in-between mondo check wait time are defined: 2 usec for
253 ++ * single cpu quick turn around and up to 100usec for large cpu count.
254 ++ * Deliver mondo to large number of cpus could take longer, we adjusts
255 ++ * the retry count as long as target cpus are making forward progress.
256 ++ */
257 + static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
258 + {
259 +- int retries, this_cpu, prev_sent, i, saw_cpu_error;
260 ++ int this_cpu, tot_cpus, prev_sent, i, rem;
261 ++ int usec_wait, retries, tot_retries;
262 ++ u16 first_cpu = 0xffff;
263 ++ unsigned long xc_rcvd = 0;
264 + unsigned long status;
265 ++ int ecpuerror_id = 0;
266 ++ int enocpu_id = 0;
267 + u16 *cpu_list;
268 ++ u16 cpu;
269 +
270 + this_cpu = smp_processor_id();
271 +-
272 + cpu_list = __va(tb->cpu_list_pa);
273 +-
274 +- saw_cpu_error = 0;
275 +- retries = 0;
276 ++ usec_wait = cnt * MONDO_USEC_WAIT_MIN;
277 ++ if (usec_wait > MONDO_USEC_WAIT_MAX)
278 ++ usec_wait = MONDO_USEC_WAIT_MAX;
279 ++ retries = tot_retries = 0;
280 ++ tot_cpus = cnt;
281 + prev_sent = 0;
282 ++
283 + do {
284 +- int forward_progress, n_sent;
285 ++ int n_sent, mondo_delivered, target_cpu_busy;
286 +
287 + status = sun4v_cpu_mondo_send(cnt,
288 + tb->cpu_list_pa,
289 +@@ -644,94 +670,113 @@ static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
290 +
291 + /* HV_EOK means all cpus received the xcall, we're done. */
292 + if (likely(status == HV_EOK))
293 +- break;
294 ++ goto xcall_done;
295 ++
296 ++ /* If not these non-fatal errors, panic */
297 ++ if (unlikely((status != HV_EWOULDBLOCK) &&
298 ++ (status != HV_ECPUERROR) &&
299 ++ (status != HV_ENOCPU)))
300 ++ goto fatal_errors;
301 +
302 + /* First, see if we made any forward progress.
303 ++ *
304 ++ * Go through the cpu_list, count the target cpus that have
305 ++ * received our mondo (n_sent), and those that did not (rem).
306 ++ * Re-pack cpu_list with the cpus remain to be retried in the
307 ++ * front - this simplifies tracking the truly stalled cpus.
308 + *
309 + * The hypervisor indicates successful sends by setting
310 + * cpu list entries to the value 0xffff.
311 ++ *
312 ++ * EWOULDBLOCK means some target cpus did not receive the
313 ++ * mondo and retry usually helps.
314 ++ *
315 ++ * ECPUERROR means at least one target cpu is in error state,
316 ++ * it's usually safe to skip the faulty cpu and retry.
317 ++ *
318 ++ * ENOCPU means one of the target cpu doesn't belong to the
319 ++ * domain, perhaps offlined which is unexpected, but not
320 ++ * fatal and it's okay to skip the offlined cpu.
321 + */
322 ++ rem = 0;
323 + n_sent = 0;
324 + for (i = 0; i < cnt; i++) {
325 +- if (likely(cpu_list[i] == 0xffff))
326 ++ cpu = cpu_list[i];
327 ++ if (likely(cpu == 0xffff)) {
328 + n_sent++;
329 ++ } else if ((status == HV_ECPUERROR) &&
330 ++ (sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) {
331 ++ ecpuerror_id = cpu + 1;
332 ++ } else if (status == HV_ENOCPU && !cpu_online(cpu)) {
333 ++ enocpu_id = cpu + 1;
334 ++ } else {
335 ++ cpu_list[rem++] = cpu;
336 ++ }
337 + }
338 +
339 +- forward_progress = 0;
340 +- if (n_sent > prev_sent)
341 +- forward_progress = 1;
342 ++ /* No cpu remained, we're done. */
343 ++ if (rem == 0)
344 ++ break;
345 +
346 +- prev_sent = n_sent;
347 ++ /* Otherwise, update the cpu count for retry. */
348 ++ cnt = rem;
349 +
350 +- /* If we get a HV_ECPUERROR, then one or more of the cpus
351 +- * in the list are in error state. Use the cpu_state()
352 +- * hypervisor call to find out which cpus are in error state.
353 ++ /* Record the overall number of mondos received by the
354 ++ * first of the remaining cpus.
355 + */
356 +- if (unlikely(status == HV_ECPUERROR)) {
357 +- for (i = 0; i < cnt; i++) {
358 +- long err;
359 +- u16 cpu;
360 ++ if (first_cpu != cpu_list[0]) {
361 ++ first_cpu = cpu_list[0];
362 ++ xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
363 ++ }
364 +
365 +- cpu = cpu_list[i];
366 +- if (cpu == 0xffff)
367 +- continue;
368 ++ /* Was any mondo delivered successfully? */
369 ++ mondo_delivered = (n_sent > prev_sent);
370 ++ prev_sent = n_sent;
371 +
372 +- err = sun4v_cpu_state(cpu);
373 +- if (err == HV_CPU_STATE_ERROR) {
374 +- saw_cpu_error = (cpu + 1);
375 +- cpu_list[i] = 0xffff;
376 +- }
377 +- }
378 +- } else if (unlikely(status != HV_EWOULDBLOCK))
379 +- goto fatal_mondo_error;
380 ++ /* or, was any target cpu busy processing other mondos? */
381 ++ target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu));
382 ++ xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
383 +
384 +- /* Don't bother rewriting the CPU list, just leave the
385 +- * 0xffff and non-0xffff entries in there and the
386 +- * hypervisor will do the right thing.
387 +- *
388 +- * Only advance timeout state if we didn't make any
389 +- * forward progress.
390 ++ /* Retry count is for no progress. If we're making progress,
391 ++ * reset the retry count.
392 + */
393 +- if (unlikely(!forward_progress)) {
394 +- if (unlikely(++retries > 10000))
395 +- goto fatal_mondo_timeout;
396 +-
397 +- /* Delay a little bit to let other cpus catch up
398 +- * on their cpu mondo queue work.
399 +- */
400 +- udelay(2 * cnt);
401 ++ if (likely(mondo_delivered || target_cpu_busy)) {
402 ++ tot_retries += retries;
403 ++ retries = 0;
404 ++ } else if (unlikely(retries > MONDO_RETRY_LIMIT)) {
405 ++ goto fatal_mondo_timeout;
406 + }
407 +- } while (1);
408 +
409 +- if (unlikely(saw_cpu_error))
410 +- goto fatal_mondo_cpu_error;
411 ++ /* Delay a little bit to let other cpus catch up on
412 ++ * their cpu mondo queue work.
413 ++ */
414 ++ if (!mondo_delivered)
415 ++ udelay(usec_wait);
416 +
417 +- return;
418 ++ retries++;
419 ++ } while (1);
420 +
421 +-fatal_mondo_cpu_error:
422 +- printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
423 +- "(including %d) were in error state\n",
424 +- this_cpu, saw_cpu_error - 1);
425 ++xcall_done:
426 ++ if (unlikely(ecpuerror_id > 0)) {
427 ++ pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n",
428 ++ this_cpu, ecpuerror_id - 1);
429 ++ } else if (unlikely(enocpu_id > 0)) {
430 ++ pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n",
431 ++ this_cpu, enocpu_id - 1);
432 ++ }
433 + return;
434 +
435 ++fatal_errors:
436 ++ /* fatal errors include bad alignment, etc */
437 ++ pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n",
438 ++ this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
439 ++ panic("Unexpected SUN4V mondo error %lu\n", status);
440 ++
441 + fatal_mondo_timeout:
442 +- printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
443 +- " progress after %d retries.\n",
444 +- this_cpu, retries);
445 +- goto dump_cpu_list_and_out;
446 +-
447 +-fatal_mondo_error:
448 +- printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
449 +- this_cpu, status);
450 +- printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
451 +- "mondo_block_pa(%lx)\n",
452 +- this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
453 +-
454 +-dump_cpu_list_and_out:
455 +- printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
456 +- for (i = 0; i < cnt; i++)
457 +- printk("%u ", cpu_list[i]);
458 +- printk("]\n");
459 ++ /* some cpus being non-responsive to the cpu mondo */
460 ++ pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n",
461 ++ this_cpu, first_cpu, (tot_retries + retries), tot_cpus);
462 ++ panic("SUN4V mondo timeout panic\n");
463 + }
464 +
465 + static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
466 +diff --git a/arch/sparc/kernel/sun4v_ivec.S b/arch/sparc/kernel/sun4v_ivec.S
467 +index 559bc5e9c199..34631995859a 100644
468 +--- a/arch/sparc/kernel/sun4v_ivec.S
469 ++++ b/arch/sparc/kernel/sun4v_ivec.S
470 +@@ -26,6 +26,21 @@ sun4v_cpu_mondo:
471 + ldxa [%g0] ASI_SCRATCHPAD, %g4
472 + sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
473 +
474 ++ /* Get smp_processor_id() into %g3 */
475 ++ sethi %hi(trap_block), %g5
476 ++ or %g5, %lo(trap_block), %g5
477 ++ sub %g4, %g5, %g3
478 ++ srlx %g3, TRAP_BLOCK_SZ_SHIFT, %g3
479 ++
480 ++ /* Increment cpu_mondo_counter[smp_processor_id()] */
481 ++ sethi %hi(cpu_mondo_counter), %g5
482 ++ or %g5, %lo(cpu_mondo_counter), %g5
483 ++ sllx %g3, 3, %g3
484 ++ add %g5, %g3, %g5
485 ++ ldx [%g5], %g3
486 ++ add %g3, 1, %g3
487 ++ stx %g3, [%g5]
488 ++
489 + /* Get CPU mondo queue base phys address into %g7. */
490 + ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
491 +
492 +diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
493 +index d44fb806bbd7..32dafb920908 100644
494 +--- a/arch/sparc/kernel/traps_64.c
495 ++++ b/arch/sparc/kernel/traps_64.c
496 +@@ -2732,6 +2732,7 @@ void do_getpsr(struct pt_regs *regs)
497 + }
498 + }
499 +
500 ++u64 cpu_mondo_counter[NR_CPUS] = {0};
501 + struct trap_per_cpu trap_block[NR_CPUS];
502 + EXPORT_SYMBOL(trap_block);
503 +
504 +diff --git a/arch/sparc/lib/U3memcpy.S b/arch/sparc/lib/U3memcpy.S
505 +index 54f98706b03b..5a8cb37f0a3b 100644
506 +--- a/arch/sparc/lib/U3memcpy.S
507 ++++ b/arch/sparc/lib/U3memcpy.S
508 +@@ -145,13 +145,13 @@ ENDPROC(U3_retl_o2_plus_GS_plus_0x08)
509 + ENTRY(U3_retl_o2_and_7_plus_GS)
510 + and %o2, 7, %o2
511 + retl
512 +- add %o2, GLOBAL_SPARE, %o2
513 ++ add %o2, GLOBAL_SPARE, %o0
514 + ENDPROC(U3_retl_o2_and_7_plus_GS)
515 + ENTRY(U3_retl_o2_and_7_plus_GS_plus_8)
516 + add GLOBAL_SPARE, 8, GLOBAL_SPARE
517 + and %o2, 7, %o2
518 + retl
519 +- add %o2, GLOBAL_SPARE, %o2
520 ++ add %o2, GLOBAL_SPARE, %o0
521 + ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8)
522 + #endif
523 +
524 +diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
525 +index cc3bd583dce1..9e240fcba784 100644
526 +--- a/arch/x86/boot/string.c
527 ++++ b/arch/x86/boot/string.c
528 +@@ -14,6 +14,7 @@
529 +
530 + #include <linux/types.h>
531 + #include "ctype.h"
532 ++#include "string.h"
533 +
534 + int memcmp(const void *s1, const void *s2, size_t len)
535 + {
536 +diff --git a/arch/x86/boot/string.h b/arch/x86/boot/string.h
537 +index 725e820602b1..113588ddb43f 100644
538 +--- a/arch/x86/boot/string.h
539 ++++ b/arch/x86/boot/string.h
540 +@@ -18,4 +18,13 @@ int memcmp(const void *s1, const void *s2, size_t len);
541 + #define memset(d,c,l) __builtin_memset(d,c,l)
542 + #define memcmp __builtin_memcmp
543 +
544 ++extern int strcmp(const char *str1, const char *str2);
545 ++extern int strncmp(const char *cs, const char *ct, size_t count);
546 ++extern size_t strlen(const char *s);
547 ++extern char *strstr(const char *s1, const char *s2);
548 ++extern size_t strnlen(const char *s, size_t maxlen);
549 ++extern unsigned int atou(const char *s);
550 ++extern unsigned long long simple_strtoull(const char *cp, char **endp,
551 ++ unsigned int base);
552 ++
553 + #endif /* BOOT_STRING_H */
554 +diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
555 +index 9cf697ceedbf..55ffd9dc2258 100644
556 +--- a/arch/x86/kernel/kvm.c
557 ++++ b/arch/x86/kernel/kvm.c
558 +@@ -152,6 +152,8 @@ void kvm_async_pf_task_wait(u32 token)
559 + if (hlist_unhashed(&n.link))
560 + break;
561 +
562 ++ rcu_irq_exit();
563 ++
564 + if (!n.halted) {
565 + local_irq_enable();
566 + schedule();
567 +@@ -160,11 +162,11 @@ void kvm_async_pf_task_wait(u32 token)
568 + /*
569 + * We cannot reschedule. So halt.
570 + */
571 +- rcu_irq_exit();
572 + native_safe_halt();
573 + local_irq_disable();
574 +- rcu_irq_enter();
575 + }
576 ++
577 ++ rcu_irq_enter();
578 + }
579 + if (!n.halted)
580 + finish_swait(&n.wq, &wait);
581 +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
582 +index 8e575fbdf31d..e3e10e8f6f6a 100644
583 +--- a/drivers/ata/libata-scsi.c
584 ++++ b/drivers/ata/libata-scsi.c
585 +@@ -2971,10 +2971,12 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
586 + static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
587 + {
588 + if (!sata_pmp_attached(ap)) {
589 +- if (likely(devno < ata_link_max_devices(&ap->link)))
590 ++ if (likely(devno >= 0 &&
591 ++ devno < ata_link_max_devices(&ap->link)))
592 + return &ap->link.device[devno];
593 + } else {
594 +- if (likely(devno < ap->nr_pmp_links))
595 ++ if (likely(devno >= 0 &&
596 ++ devno < ap->nr_pmp_links))
597 + return &ap->pmp_link[devno].device[0];
598 + }
599 +
600 +diff --git a/drivers/base/property.c b/drivers/base/property.c
601 +index 43a36d68c3fd..06f66687fe0b 100644
602 +--- a/drivers/base/property.c
603 ++++ b/drivers/base/property.c
604 +@@ -182,11 +182,12 @@ static int pset_prop_read_string(struct property_set *pset,
605 + return 0;
606 + }
607 +
608 +-static inline struct fwnode_handle *dev_fwnode(struct device *dev)
609 ++struct fwnode_handle *dev_fwnode(struct device *dev)
610 + {
611 + return IS_ENABLED(CONFIG_OF) && dev->of_node ?
612 + &dev->of_node->fwnode : dev->fwnode;
613 + }
614 ++EXPORT_SYMBOL_GPL(dev_fwnode);
615 +
616 + /**
617 + * device_property_present - check if a property of a device is present
618 +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
619 +index c9441f9d4585..98b767d3171e 100644
620 +--- a/drivers/block/nbd.c
621 ++++ b/drivers/block/nbd.c
622 +@@ -929,6 +929,7 @@ static int __init nbd_init(void)
623 + return -ENOMEM;
624 +
625 + for (i = 0; i < nbds_max; i++) {
626 ++ struct request_queue *q;
627 + struct gendisk *disk = alloc_disk(1 << part_shift);
628 + if (!disk)
629 + goto out;
630 +@@ -954,12 +955,13 @@ static int __init nbd_init(void)
631 + * every gendisk to have its very own request_queue struct.
632 + * These structs are big so we dynamically allocate them.
633 + */
634 +- disk->queue = blk_mq_init_queue(&nbd_dev[i].tag_set);
635 +- if (!disk->queue) {
636 ++ q = blk_mq_init_queue(&nbd_dev[i].tag_set);
637 ++ if (IS_ERR(q)) {
638 + blk_mq_free_tag_set(&nbd_dev[i].tag_set);
639 + put_disk(disk);
640 + goto out;
641 + }
642 ++ disk->queue = q;
643 +
644 + /*
645 + * Tell the block layer that we are not a rotational device
646 +diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
647 +index 3c3b8f601469..10332c24f961 100644
648 +--- a/drivers/block/virtio_blk.c
649 ++++ b/drivers/block/virtio_blk.c
650 +@@ -630,11 +630,12 @@ static int virtblk_probe(struct virtio_device *vdev)
651 + if (err)
652 + goto out_put_disk;
653 +
654 +- q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
655 ++ q = blk_mq_init_queue(&vblk->tag_set);
656 + if (IS_ERR(q)) {
657 + err = -ENOMEM;
658 + goto out_free_tags;
659 + }
660 ++ vblk->disk->queue = q;
661 +
662 + q->queuedata = vblk;
663 +
664 +diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
665 +index 8c8b495cbf0d..cdc092a1d9ef 100644
666 +--- a/drivers/clk/samsung/clk-exynos5420.c
667 ++++ b/drivers/clk/samsung/clk-exynos5420.c
668 +@@ -586,7 +586,7 @@ static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = {
669 + GATE(CLK_ACLK550_CAM, "aclk550_cam", "mout_user_aclk550_cam",
670 + GATE_BUS_TOP, 24, 0, 0),
671 + GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler",
672 +- GATE_BUS_TOP, 27, 0, 0),
673 ++ GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0),
674 + };
675 +
676 + static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = {
677 +@@ -956,20 +956,20 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
678 + GATE(CLK_SMMU_G2D, "smmu_g2d", "aclk333_g2d", GATE_IP_G2D, 7, 0, 0),
679 +
680 + GATE(0, "aclk200_fsys", "mout_user_aclk200_fsys",
681 +- GATE_BUS_FSYS0, 9, CLK_IGNORE_UNUSED, 0),
682 ++ GATE_BUS_FSYS0, 9, CLK_IS_CRITICAL, 0),
683 + GATE(0, "aclk200_fsys2", "mout_user_aclk200_fsys2",
684 + GATE_BUS_FSYS0, 10, CLK_IGNORE_UNUSED, 0),
685 +
686 + GATE(0, "aclk333_g2d", "mout_user_aclk333_g2d",
687 + GATE_BUS_TOP, 0, CLK_IGNORE_UNUSED, 0),
688 + GATE(0, "aclk266_g2d", "mout_user_aclk266_g2d",
689 +- GATE_BUS_TOP, 1, CLK_IGNORE_UNUSED, 0),
690 ++ GATE_BUS_TOP, 1, CLK_IS_CRITICAL, 0),
691 + GATE(0, "aclk300_jpeg", "mout_user_aclk300_jpeg",
692 + GATE_BUS_TOP, 4, CLK_IGNORE_UNUSED, 0),
693 + GATE(0, "aclk333_432_isp0", "mout_user_aclk333_432_isp0",
694 + GATE_BUS_TOP, 5, 0, 0),
695 + GATE(0, "aclk300_gscl", "mout_user_aclk300_gscl",
696 +- GATE_BUS_TOP, 6, CLK_IGNORE_UNUSED, 0),
697 ++ GATE_BUS_TOP, 6, CLK_IS_CRITICAL, 0),
698 + GATE(0, "aclk333_432_gscl", "mout_user_aclk333_432_gscl",
699 + GATE_BUS_TOP, 7, CLK_IGNORE_UNUSED, 0),
700 + GATE(0, "aclk333_432_isp", "mout_user_aclk333_432_isp",
701 +@@ -983,20 +983,20 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
702 + GATE(0, "aclk166", "mout_user_aclk166",
703 + GATE_BUS_TOP, 14, CLK_IGNORE_UNUSED, 0),
704 + GATE(CLK_ACLK333, "aclk333", "mout_user_aclk333",
705 +- GATE_BUS_TOP, 15, CLK_IGNORE_UNUSED, 0),
706 ++ GATE_BUS_TOP, 15, CLK_IS_CRITICAL, 0),
707 + GATE(0, "aclk400_isp", "mout_user_aclk400_isp",
708 + GATE_BUS_TOP, 16, 0, 0),
709 + GATE(0, "aclk400_mscl", "mout_user_aclk400_mscl",
710 + GATE_BUS_TOP, 17, 0, 0),
711 + GATE(0, "aclk200_disp1", "mout_user_aclk200_disp1",
712 +- GATE_BUS_TOP, 18, 0, 0),
713 ++ GATE_BUS_TOP, 18, CLK_IS_CRITICAL, 0),
714 + GATE(CLK_SCLK_MPHY_IXTAL24, "sclk_mphy_ixtal24", "mphy_refclk_ixtal24",
715 + GATE_BUS_TOP, 28, 0, 0),
716 + GATE(CLK_SCLK_HSIC_12M, "sclk_hsic_12m", "ff_hsic_12m",
717 + GATE_BUS_TOP, 29, 0, 0),
718 +
719 + GATE(0, "aclk300_disp1", "mout_user_aclk300_disp1",
720 +- SRC_MASK_TOP2, 24, 0, 0),
721 ++ SRC_MASK_TOP2, 24, CLK_IS_CRITICAL, 0),
722 +
723 + GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk",
724 + SRC_MASK_TOP7, 20, 0, 0),
725 +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
726 +index f2bb5122d2c2..063d176baa24 100644
727 +--- a/drivers/gpio/gpiolib.c
728 ++++ b/drivers/gpio/gpiolib.c
729 +@@ -703,24 +703,23 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
730 + {
731 + struct lineevent_state *le = p;
732 + struct gpioevent_data ge;
733 +- int ret;
734 ++ int ret, level;
735 +
736 + ge.timestamp = ktime_get_real_ns();
737 ++ level = gpiod_get_value_cansleep(le->desc);
738 +
739 + if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
740 + && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
741 +- int level = gpiod_get_value_cansleep(le->desc);
742 +-
743 + if (level)
744 + /* Emit low-to-high event */
745 + ge.id = GPIOEVENT_EVENT_RISING_EDGE;
746 + else
747 + /* Emit high-to-low event */
748 + ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
749 +- } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) {
750 ++ } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE && level) {
751 + /* Emit low-to-high event */
752 + ge.id = GPIOEVENT_EVENT_RISING_EDGE;
753 +- } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
754 ++ } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE && !level) {
755 + /* Emit high-to-low event */
756 + ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
757 + } else {
758 +diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
759 +index dc9511c5ecb8..327bdf13e8bc 100644
760 +--- a/drivers/gpu/drm/amd/amdgpu/si.c
761 ++++ b/drivers/gpu/drm/amd/amdgpu/si.c
762 +@@ -1301,6 +1301,7 @@ static void si_init_golden_registers(struct amdgpu_device *adev)
763 + amdgpu_program_register_sequence(adev,
764 + pitcairn_mgcg_cgcg_init,
765 + (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
766 ++ break;
767 + case CHIP_VERDE:
768 + amdgpu_program_register_sequence(adev,
769 + verde_golden_registers,
770 +@@ -1325,6 +1326,7 @@ static void si_init_golden_registers(struct amdgpu_device *adev)
771 + amdgpu_program_register_sequence(adev,
772 + oland_mgcg_cgcg_init,
773 + (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
774 ++ break;
775 + case CHIP_HAINAN:
776 + amdgpu_program_register_sequence(adev,
777 + hainan_golden_registers,
778 +diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
779 +index 2242a80866a9..dc2976c2bed3 100644
780 +--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
781 ++++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
782 +@@ -337,7 +337,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
783 + info->fbops = &virtio_gpufb_ops;
784 + info->pixmap.flags = FB_PIXMAP_SYSTEM;
785 +
786 +- info->screen_base = obj->vmap;
787 ++ info->screen_buffer = obj->vmap;
788 + info->screen_size = obj->gem_base.size;
789 + drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
790 + drm_fb_helper_fill_var(info, &vfbdev->helper,
791 +diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
792 +index f1510cc76d2d..9398143d7c5e 100644
793 +--- a/drivers/infiniband/hw/cxgb4/cm.c
794 ++++ b/drivers/infiniband/hw/cxgb4/cm.c
795 +@@ -1804,20 +1804,21 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
796 + skb_trim(skb, dlen);
797 + mutex_lock(&ep->com.mutex);
798 +
799 +- /* update RX credits */
800 +- update_rx_credits(ep, dlen);
801 +-
802 + switch (ep->com.state) {
803 + case MPA_REQ_SENT:
804 ++ update_rx_credits(ep, dlen);
805 + ep->rcv_seq += dlen;
806 + disconnect = process_mpa_reply(ep, skb);
807 + break;
808 + case MPA_REQ_WAIT:
809 ++ update_rx_credits(ep, dlen);
810 + ep->rcv_seq += dlen;
811 + disconnect = process_mpa_request(ep, skb);
812 + break;
813 + case FPDU_MODE: {
814 + struct c4iw_qp_attributes attrs;
815 ++
816 ++ update_rx_credits(ep, dlen);
817 + BUG_ON(!ep->com.qp);
818 + if (status)
819 + pr_err("%s Unexpected streaming data." \
820 +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
821 +index 41800b6d492e..c380b7e8f1c6 100644
822 +--- a/drivers/iommu/amd_iommu.c
823 ++++ b/drivers/iommu/amd_iommu.c
824 +@@ -4294,6 +4294,7 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
825 + /* Setting */
826 + irte->hi.fields.ga_root_ptr = (pi_data->base >> 12);
827 + irte->hi.fields.vector = vcpu_pi_info->vector;
828 ++ irte->lo.fields_vapic.ga_log_intr = 1;
829 + irte->lo.fields_vapic.guest_mode = 1;
830 + irte->lo.fields_vapic.ga_tag = pi_data->ga_tag;
831 +
832 +diff --git a/drivers/media/pci/saa7164/saa7164-bus.c b/drivers/media/pci/saa7164/saa7164-bus.c
833 +index a18fe5d47238..b4857cd7069e 100644
834 +--- a/drivers/media/pci/saa7164/saa7164-bus.c
835 ++++ b/drivers/media/pci/saa7164/saa7164-bus.c
836 +@@ -393,11 +393,11 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
837 + msg_tmp.size = le16_to_cpu((__force __le16)msg_tmp.size);
838 + msg_tmp.command = le32_to_cpu((__force __le32)msg_tmp.command);
839 + msg_tmp.controlselector = le16_to_cpu((__force __le16)msg_tmp.controlselector);
840 ++ memcpy(msg, &msg_tmp, sizeof(*msg));
841 +
842 + /* No need to update the read positions, because this was a peek */
843 + /* If the caller specifically want to peek, return */
844 + if (peekonly) {
845 +- memcpy(msg, &msg_tmp, sizeof(*msg));
846 + goto peekout;
847 + }
848 +
849 +@@ -442,21 +442,15 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
850 + space_rem = bus->m_dwSizeGetRing - curr_grp;
851 +
852 + if (space_rem < sizeof(*msg)) {
853 +- /* msg wraps around the ring */
854 +- memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, space_rem);
855 +- memcpy_fromio((u8 *)msg + space_rem, bus->m_pdwGetRing,
856 +- sizeof(*msg) - space_rem);
857 + if (buf)
858 + memcpy_fromio(buf, bus->m_pdwGetRing + sizeof(*msg) -
859 + space_rem, buf_size);
860 +
861 + } else if (space_rem == sizeof(*msg)) {
862 +- memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
863 + if (buf)
864 + memcpy_fromio(buf, bus->m_pdwGetRing, buf_size);
865 + } else {
866 + /* Additional data wraps around the ring */
867 +- memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
868 + if (buf) {
869 + memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp +
870 + sizeof(*msg), space_rem - sizeof(*msg));
871 +@@ -469,15 +463,10 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
872 +
873 + } else {
874 + /* No wrapping */
875 +- memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
876 + if (buf)
877 + memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp + sizeof(*msg),
878 + buf_size);
879 + }
880 +- /* Convert from little endian to CPU */
881 +- msg->size = le16_to_cpu((__force __le16)msg->size);
882 +- msg->command = le32_to_cpu((__force __le32)msg->command);
883 +- msg->controlselector = le16_to_cpu((__force __le16)msg->controlselector);
884 +
885 + /* Update the read positions, adjusting the ring */
886 + saa7164_writel(bus->m_dwGetReadPos, new_grp);
887 +diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
888 +index 6efb2f1631c4..bdb7a0a00932 100644
889 +--- a/drivers/media/platform/davinci/vpfe_capture.c
890 ++++ b/drivers/media/platform/davinci/vpfe_capture.c
891 +@@ -1725,27 +1725,9 @@ static long vpfe_param_handler(struct file *file, void *priv,
892 +
893 + switch (cmd) {
894 + case VPFE_CMD_S_CCDC_RAW_PARAMS:
895 ++ ret = -EINVAL;
896 + v4l2_warn(&vpfe_dev->v4l2_dev,
897 +- "VPFE_CMD_S_CCDC_RAW_PARAMS: experimental ioctl\n");
898 +- if (ccdc_dev->hw_ops.set_params) {
899 +- ret = ccdc_dev->hw_ops.set_params(param);
900 +- if (ret) {
901 +- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
902 +- "Error setting parameters in CCDC\n");
903 +- goto unlock_out;
904 +- }
905 +- ret = vpfe_get_ccdc_image_format(vpfe_dev,
906 +- &vpfe_dev->fmt);
907 +- if (ret < 0) {
908 +- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
909 +- "Invalid image format at CCDC\n");
910 +- goto unlock_out;
911 +- }
912 +- } else {
913 +- ret = -EINVAL;
914 +- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
915 +- "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
916 +- }
917 ++ "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
918 + break;
919 + default:
920 + ret = -ENOTTY;
921 +diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
922 +index c3277308a70b..b49f80cb49c9 100644
923 +--- a/drivers/media/rc/ir-lirc-codec.c
924 ++++ b/drivers/media/rc/ir-lirc-codec.c
925 +@@ -254,7 +254,7 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
926 + return 0;
927 +
928 + case LIRC_GET_REC_RESOLUTION:
929 +- val = dev->rx_resolution;
930 ++ val = dev->rx_resolution / 1000;
931 + break;
932 +
933 + case LIRC_SET_WIDEBAND_RECEIVER:
934 +diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
935 +index 98f25ffb4258..848b3453517e 100644
936 +--- a/drivers/mmc/core/host.c
937 ++++ b/drivers/mmc/core/host.c
938 +@@ -179,19 +179,17 @@ static void mmc_retune_timer(unsigned long data)
939 + */
940 + int mmc_of_parse(struct mmc_host *host)
941 + {
942 +- struct device_node *np;
943 ++ struct device *dev = host->parent;
944 + u32 bus_width;
945 + int ret;
946 + bool cd_cap_invert, cd_gpio_invert = false;
947 + bool ro_cap_invert, ro_gpio_invert = false;
948 +
949 +- if (!host->parent || !host->parent->of_node)
950 ++ if (!dev || !dev_fwnode(dev))
951 + return 0;
952 +
953 +- np = host->parent->of_node;
954 +-
955 + /* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */
956 +- if (of_property_read_u32(np, "bus-width", &bus_width) < 0) {
957 ++ if (device_property_read_u32(dev, "bus-width", &bus_width) < 0) {
958 + dev_dbg(host->parent,
959 + "\"bus-width\" property is missing, assuming 1 bit.\n");
960 + bus_width = 1;
961 +@@ -213,7 +211,7 @@ int mmc_of_parse(struct mmc_host *host)
962 + }
963 +
964 + /* f_max is obtained from the optional "max-frequency" property */
965 +- of_property_read_u32(np, "max-frequency", &host->f_max);
966 ++ device_property_read_u32(dev, "max-frequency", &host->f_max);
967 +
968 + /*
969 + * Configure CD and WP pins. They are both by default active low to
970 +@@ -228,12 +226,12 @@ int mmc_of_parse(struct mmc_host *host)
971 + */
972 +
973 + /* Parse Card Detection */
974 +- if (of_property_read_bool(np, "non-removable")) {
975 ++ if (device_property_read_bool(dev, "non-removable")) {
976 + host->caps |= MMC_CAP_NONREMOVABLE;
977 + } else {
978 +- cd_cap_invert = of_property_read_bool(np, "cd-inverted");
979 ++ cd_cap_invert = device_property_read_bool(dev, "cd-inverted");
980 +
981 +- if (of_property_read_bool(np, "broken-cd"))
982 ++ if (device_property_read_bool(dev, "broken-cd"))
983 + host->caps |= MMC_CAP_NEEDS_POLL;
984 +
985 + ret = mmc_gpiod_request_cd(host, "cd", 0, true,
986 +@@ -259,7 +257,7 @@ int mmc_of_parse(struct mmc_host *host)
987 + }
988 +
989 + /* Parse Write Protection */
990 +- ro_cap_invert = of_property_read_bool(np, "wp-inverted");
991 ++ ro_cap_invert = device_property_read_bool(dev, "wp-inverted");
992 +
993 + ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert);
994 + if (!ret)
995 +@@ -267,62 +265,62 @@ int mmc_of_parse(struct mmc_host *host)
996 + else if (ret != -ENOENT && ret != -ENOSYS)
997 + return ret;
998 +
999 +- if (of_property_read_bool(np, "disable-wp"))
1000 ++ if (device_property_read_bool(dev, "disable-wp"))
1001 + host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
1002 +
1003 + /* See the comment on CD inversion above */
1004 + if (ro_cap_invert ^ ro_gpio_invert)
1005 + host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
1006 +
1007 +- if (of_property_read_bool(np, "cap-sd-highspeed"))
1008 ++ if (device_property_read_bool(dev, "cap-sd-highspeed"))
1009 + host->caps |= MMC_CAP_SD_HIGHSPEED;
1010 +- if (of_property_read_bool(np, "cap-mmc-highspeed"))
1011 ++ if (device_property_read_bool(dev, "cap-mmc-highspeed"))
1012 + host->caps |= MMC_CAP_MMC_HIGHSPEED;
1013 +- if (of_property_read_bool(np, "sd-uhs-sdr12"))
1014 ++ if (device_property_read_bool(dev, "sd-uhs-sdr12"))
1015 + host->caps |= MMC_CAP_UHS_SDR12;
1016 +- if (of_property_read_bool(np, "sd-uhs-sdr25"))
1017 ++ if (device_property_read_bool(dev, "sd-uhs-sdr25"))
1018 + host->caps |= MMC_CAP_UHS_SDR25;
1019 +- if (of_property_read_bool(np, "sd-uhs-sdr50"))
1020 ++ if (device_property_read_bool(dev, "sd-uhs-sdr50"))
1021 + host->caps |= MMC_CAP_UHS_SDR50;
1022 +- if (of_property_read_bool(np, "sd-uhs-sdr104"))
1023 ++ if (device_property_read_bool(dev, "sd-uhs-sdr104"))
1024 + host->caps |= MMC_CAP_UHS_SDR104;
1025 +- if (of_property_read_bool(np, "sd-uhs-ddr50"))
1026 ++ if (device_property_read_bool(dev, "sd-uhs-ddr50"))
1027 + host->caps |= MMC_CAP_UHS_DDR50;
1028 +- if (of_property_read_bool(np, "cap-power-off-card"))
1029 ++ if (device_property_read_bool(dev, "cap-power-off-card"))
1030 + host->caps |= MMC_CAP_POWER_OFF_CARD;
1031 +- if (of_property_read_bool(np, "cap-mmc-hw-reset"))
1032 ++ if (device_property_read_bool(dev, "cap-mmc-hw-reset"))
1033 + host->caps |= MMC_CAP_HW_RESET;
1034 +- if (of_property_read_bool(np, "cap-sdio-irq"))
1035 ++ if (device_property_read_bool(dev, "cap-sdio-irq"))
1036 + host->caps |= MMC_CAP_SDIO_IRQ;
1037 +- if (of_property_read_bool(np, "full-pwr-cycle"))
1038 ++ if (device_property_read_bool(dev, "full-pwr-cycle"))
1039 + host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
1040 +- if (of_property_read_bool(np, "keep-power-in-suspend"))
1041 ++ if (device_property_read_bool(dev, "keep-power-in-suspend"))
1042 + host->pm_caps |= MMC_PM_KEEP_POWER;
1043 +- if (of_property_read_bool(np, "wakeup-source") ||
1044 +- of_property_read_bool(np, "enable-sdio-wakeup")) /* legacy */
1045 ++ if (device_property_read_bool(dev, "wakeup-source") ||
1046 ++ device_property_read_bool(dev, "enable-sdio-wakeup")) /* legacy */
1047 + host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
1048 +- if (of_property_read_bool(np, "mmc-ddr-1_8v"))
1049 ++ if (device_property_read_bool(dev, "mmc-ddr-1_8v"))
1050 + host->caps |= MMC_CAP_1_8V_DDR;
1051 +- if (of_property_read_bool(np, "mmc-ddr-1_2v"))
1052 ++ if (device_property_read_bool(dev, "mmc-ddr-1_2v"))
1053 + host->caps |= MMC_CAP_1_2V_DDR;
1054 +- if (of_property_read_bool(np, "mmc-hs200-1_8v"))
1055 ++ if (device_property_read_bool(dev, "mmc-hs200-1_8v"))
1056 + host->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
1057 +- if (of_property_read_bool(np, "mmc-hs200-1_2v"))
1058 ++ if (device_property_read_bool(dev, "mmc-hs200-1_2v"))
1059 + host->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
1060 +- if (of_property_read_bool(np, "mmc-hs400-1_8v"))
1061 ++ if (device_property_read_bool(dev, "mmc-hs400-1_8v"))
1062 + host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR;
1063 +- if (of_property_read_bool(np, "mmc-hs400-1_2v"))
1064 ++ if (device_property_read_bool(dev, "mmc-hs400-1_2v"))
1065 + host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR;
1066 +- if (of_property_read_bool(np, "mmc-hs400-enhanced-strobe"))
1067 ++ if (device_property_read_bool(dev, "mmc-hs400-enhanced-strobe"))
1068 + host->caps2 |= MMC_CAP2_HS400_ES;
1069 +- if (of_property_read_bool(np, "no-sdio"))
1070 ++ if (device_property_read_bool(dev, "no-sdio"))
1071 + host->caps2 |= MMC_CAP2_NO_SDIO;
1072 +- if (of_property_read_bool(np, "no-sd"))
1073 ++ if (device_property_read_bool(dev, "no-sd"))
1074 + host->caps2 |= MMC_CAP2_NO_SD;
1075 +- if (of_property_read_bool(np, "no-mmc"))
1076 ++ if (device_property_read_bool(dev, "no-mmc"))
1077 + host->caps2 |= MMC_CAP2_NO_MMC;
1078 +
1079 +- host->dsr_req = !of_property_read_u32(np, "dsr", &host->dsr);
1080 ++ host->dsr_req = !device_property_read_u32(dev, "dsr", &host->dsr);
1081 + if (host->dsr_req && (host->dsr & ~0xffff)) {
1082 + dev_err(host->parent,
1083 + "device tree specified broken value for DSR: 0x%x, ignoring\n",
1084 +diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
1085 +index f57700c4b8f0..323dba35bc9a 100644
1086 +--- a/drivers/mmc/core/mmc.c
1087 ++++ b/drivers/mmc/core/mmc.c
1088 +@@ -1690,7 +1690,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
1089 + err = mmc_select_hs400(card);
1090 + if (err)
1091 + goto free_card;
1092 +- } else {
1093 ++ } else if (!mmc_card_hs400es(card)) {
1094 + /* Select the desired bus width optionally */
1095 + err = mmc_select_bus_width(card);
1096 + if (err > 0 && mmc_card_hs(card)) {
1097 +diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
1098 +index df478ae72e23..f81f4175f49a 100644
1099 +--- a/drivers/mmc/host/dw_mmc.c
1100 ++++ b/drivers/mmc/host/dw_mmc.c
1101 +@@ -2610,8 +2610,8 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
1102 + host->slot[id] = slot;
1103 +
1104 + mmc->ops = &dw_mci_ops;
1105 +- if (of_property_read_u32_array(host->dev->of_node,
1106 +- "clock-freq-min-max", freq, 2)) {
1107 ++ if (device_property_read_u32_array(host->dev, "clock-freq-min-max",
1108 ++ freq, 2)) {
1109 + mmc->f_min = DW_MCI_FREQ_MIN;
1110 + mmc->f_max = DW_MCI_FREQ_MAX;
1111 + } else {
1112 +@@ -2709,7 +2709,6 @@ static void dw_mci_init_dma(struct dw_mci *host)
1113 + {
1114 + int addr_config;
1115 + struct device *dev = host->dev;
1116 +- struct device_node *np = dev->of_node;
1117 +
1118 + /*
1119 + * Check tansfer mode from HCON[17:16]
1120 +@@ -2770,8 +2769,9 @@ static void dw_mci_init_dma(struct dw_mci *host)
1121 + dev_info(host->dev, "Using internal DMA controller.\n");
1122 + } else {
1123 + /* TRANS_MODE_EDMAC: check dma bindings again */
1124 +- if ((of_property_count_strings(np, "dma-names") < 0) ||
1125 +- (!of_find_property(np, "dmas", NULL))) {
1126 ++ if ((device_property_read_string_array(dev, "dma-names",
1127 ++ NULL, 0) < 0) ||
1128 ++ !device_property_present(dev, "dmas")) {
1129 + goto no_dma;
1130 + }
1131 + host->dma_ops = &dw_mci_edmac_ops;
1132 +@@ -2931,7 +2931,6 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
1133 + {
1134 + struct dw_mci_board *pdata;
1135 + struct device *dev = host->dev;
1136 +- struct device_node *np = dev->of_node;
1137 + const struct dw_mci_drv_data *drv_data = host->drv_data;
1138 + int ret;
1139 + u32 clock_frequency;
1140 +@@ -2948,15 +2947,16 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
1141 + }
1142 +
1143 + /* find out number of slots supported */
1144 +- of_property_read_u32(np, "num-slots", &pdata->num_slots);
1145 ++ device_property_read_u32(dev, "num-slots", &pdata->num_slots);
1146 +
1147 +- if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
1148 ++ if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
1149 + dev_info(dev,
1150 + "fifo-depth property not found, using value of FIFOTH register as default\n");
1151 +
1152 +- of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
1153 ++ device_property_read_u32(dev, "card-detect-delay",
1154 ++ &pdata->detect_delay_ms);
1155 +
1156 +- if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
1157 ++ if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency))
1158 + pdata->bus_hz = clock_frequency;
1159 +
1160 + if (drv_data && drv_data->parse_dt) {
1161 +diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
1162 +index a8b430ff117b..83b84ffec27d 100644
1163 +--- a/drivers/mmc/host/sdhci-of-at91.c
1164 ++++ b/drivers/mmc/host/sdhci-of-at91.c
1165 +@@ -31,6 +31,7 @@
1166 +
1167 + #define SDMMC_MC1R 0x204
1168 + #define SDMMC_MC1R_DDR BIT(3)
1169 ++#define SDMMC_MC1R_FCD BIT(7)
1170 + #define SDMMC_CACR 0x230
1171 + #define SDMMC_CACR_CAPWREN BIT(0)
1172 + #define SDMMC_CACR_KEY (0x46 << 8)
1173 +@@ -43,6 +44,15 @@ struct sdhci_at91_priv {
1174 + struct clk *mainck;
1175 + };
1176 +
1177 ++static void sdhci_at91_set_force_card_detect(struct sdhci_host *host)
1178 ++{
1179 ++ u8 mc1r;
1180 ++
1181 ++ mc1r = readb(host->ioaddr + SDMMC_MC1R);
1182 ++ mc1r |= SDMMC_MC1R_FCD;
1183 ++ writeb(mc1r, host->ioaddr + SDMMC_MC1R);
1184 ++}
1185 ++
1186 + static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
1187 + {
1188 + u16 clk;
1189 +@@ -112,10 +122,18 @@ void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, unsigned int timing)
1190 + sdhci_set_uhs_signaling(host, timing);
1191 + }
1192 +
1193 ++static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
1194 ++{
1195 ++ sdhci_reset(host, mask);
1196 ++
1197 ++ if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
1198 ++ sdhci_at91_set_force_card_detect(host);
1199 ++}
1200 ++
1201 + static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
1202 + .set_clock = sdhci_at91_set_clock,
1203 + .set_bus_width = sdhci_set_bus_width,
1204 +- .reset = sdhci_reset,
1205 ++ .reset = sdhci_at91_reset,
1206 + .set_uhs_signaling = sdhci_at91_set_uhs_signaling,
1207 + .set_power = sdhci_at91_set_power,
1208 + };
1209 +@@ -322,6 +340,21 @@ static int sdhci_at91_probe(struct platform_device *pdev)
1210 + host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
1211 + }
1212 +
1213 ++ /*
1214 ++ * If the device attached to the MMC bus is not removable, it is safer
1215 ++ * to set the Force Card Detect bit. People often don't connect the
1216 ++ * card detect signal and use this pin for another purpose. If the card
1217 ++ * detect pin is not muxed to SDHCI controller, a default value is
1218 ++ * used. This value can be different from a SoC revision to another
1219 ++ * one. Problems come when this default value is not card present. To
1220 ++ * avoid this case, if the device is non removable then the card
1221 ++ * detection procedure using the SDMCC_CD signal is bypassed.
1222 ++ * This bit is reset when a software reset for all command is performed
1223 ++ * so we need to implement our own reset function to set back this bit.
1224 ++ */
1225 ++ if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
1226 ++ sdhci_at91_set_force_card_detect(host);
1227 ++
1228 + pm_runtime_put_autosuspend(&pdev->dev);
1229 +
1230 + return 0;
1231 +diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
1232 +index 947adda3397d..3ec573c13dac 100644
1233 +--- a/drivers/net/dsa/b53/b53_common.c
1234 ++++ b/drivers/net/dsa/b53/b53_common.c
1235 +@@ -1558,6 +1558,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
1236 + .dev_name = "BCM53125",
1237 + .vlans = 4096,
1238 + .enabled_ports = 0xff,
1239 ++ .arl_entries = 4,
1240 + .cpu_port = B53_CPU_PORT,
1241 + .vta_regs = B53_VTA_REGS,
1242 + .duplex_reg = B53_DUPLEX_STAT_GE,
1243 +diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
1244 +index e078d8da978c..29d29af612d1 100644
1245 +--- a/drivers/net/ethernet/aurora/nb8800.c
1246 ++++ b/drivers/net/ethernet/aurora/nb8800.c
1247 +@@ -609,7 +609,7 @@ static void nb8800_mac_config(struct net_device *dev)
1248 + mac_mode |= HALF_DUPLEX;
1249 +
1250 + if (gigabit) {
1251 +- if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII)
1252 ++ if (phy_interface_is_rgmii(dev->phydev))
1253 + mac_mode |= RGMII_MODE;
1254 +
1255 + mac_mode |= GMAC_MODE;
1256 +@@ -1277,11 +1277,10 @@ static int nb8800_tangox_init(struct net_device *dev)
1257 + break;
1258 +
1259 + case PHY_INTERFACE_MODE_RGMII:
1260 +- pad_mode = PAD_MODE_RGMII;
1261 +- break;
1262 +-
1263 ++ case PHY_INTERFACE_MODE_RGMII_ID:
1264 ++ case PHY_INTERFACE_MODE_RGMII_RXID:
1265 + case PHY_INTERFACE_MODE_RGMII_TXID:
1266 +- pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY;
1267 ++ pad_mode = PAD_MODE_RGMII;
1268 + break;
1269 +
1270 + default:
1271 +diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
1272 +index a927a730da10..edae2dcc4927 100644
1273 +--- a/drivers/net/ethernet/broadcom/tg3.c
1274 ++++ b/drivers/net/ethernet/broadcom/tg3.c
1275 +@@ -8720,11 +8720,14 @@ static void tg3_free_consistent(struct tg3 *tp)
1276 + tg3_mem_rx_release(tp);
1277 + tg3_mem_tx_release(tp);
1278 +
1279 ++ /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
1280 ++ tg3_full_lock(tp, 0);
1281 + if (tp->hw_stats) {
1282 + dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
1283 + tp->hw_stats, tp->stats_mapping);
1284 + tp->hw_stats = NULL;
1285 + }
1286 ++ tg3_full_unlock(tp);
1287 + }
1288 +
1289 + /*
1290 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
1291 +index cb45390c7623..f7fabecc104f 100644
1292 +--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
1293 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
1294 +@@ -770,6 +770,10 @@ static void cb_timeout_handler(struct work_struct *work)
1295 + mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
1296 + }
1297 +
1298 ++static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
1299 ++static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
1300 ++ struct mlx5_cmd_msg *msg);
1301 ++
1302 + static void cmd_work_handler(struct work_struct *work)
1303 + {
1304 + struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
1305 +@@ -779,16 +783,27 @@ static void cmd_work_handler(struct work_struct *work)
1306 + struct mlx5_cmd_layout *lay;
1307 + struct semaphore *sem;
1308 + unsigned long flags;
1309 ++ int alloc_ret;
1310 +
1311 + sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
1312 + down(sem);
1313 + if (!ent->page_queue) {
1314 +- ent->idx = alloc_ent(cmd);
1315 +- if (ent->idx < 0) {
1316 ++ alloc_ret = alloc_ent(cmd);
1317 ++ if (alloc_ret < 0) {
1318 ++ if (ent->callback) {
1319 ++ ent->callback(-EAGAIN, ent->context);
1320 ++ mlx5_free_cmd_msg(dev, ent->out);
1321 ++ free_msg(dev, ent->in);
1322 ++ free_cmd(ent);
1323 ++ } else {
1324 ++ ent->ret = -EAGAIN;
1325 ++ complete(&ent->done);
1326 ++ }
1327 + mlx5_core_err(dev, "failed to allocate command entry\n");
1328 + up(sem);
1329 + return;
1330 + }
1331 ++ ent->idx = alloc_ret;
1332 + } else {
1333 + ent->idx = cmd->max_reg_cmds;
1334 + spin_lock_irqsave(&cmd->alloc_lock, flags);
1335 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
1336 +index 13dc388667b6..1612ec0d9103 100644
1337 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
1338 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
1339 +@@ -62,12 +62,14 @@ static void mlx5e_timestamp_overflow(struct work_struct *work)
1340 + struct delayed_work *dwork = to_delayed_work(work);
1341 + struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp,
1342 + overflow_work);
1343 ++ struct mlx5e_priv *priv = container_of(tstamp, struct mlx5e_priv, tstamp);
1344 + unsigned long flags;
1345 +
1346 + write_lock_irqsave(&tstamp->lock, flags);
1347 + timecounter_read(&tstamp->clock);
1348 + write_unlock_irqrestore(&tstamp->lock, flags);
1349 +- schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period);
1350 ++ queue_delayed_work(priv->wq, &tstamp->overflow_work,
1351 ++ msecs_to_jiffies(tstamp->overflow_period * 1000));
1352 + }
1353 +
1354 + int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
1355 +@@ -263,7 +265,7 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
1356 +
1357 + INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow);
1358 + if (tstamp->overflow_period)
1359 +- schedule_delayed_work(&tstamp->overflow_work, 0);
1360 ++ queue_delayed_work(priv->wq, &tstamp->overflow_work, 0);
1361 + else
1362 + mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n");
1363 +
1364 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
1365 +index e034dbc4913d..cf070fc0fb6b 100644
1366 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
1367 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
1368 +@@ -276,7 +276,7 @@ static void add_rule_to_list(struct mlx5e_priv *priv,
1369 +
1370 + static bool outer_header_zero(u32 *match_criteria)
1371 + {
1372 +- int size = MLX5_ST_SZ_BYTES(fte_match_param);
1373 ++ int size = MLX5_FLD_SZ_BYTES(fte_match_param, outer_headers);
1374 + char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
1375 + outer_headers);
1376 +
1377 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
1378 +index 6ffd5d2a70aa..52a38106448e 100644
1379 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
1380 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
1381 +@@ -651,9 +651,14 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
1382 + int vport;
1383 + int err;
1384 +
1385 ++ /* disable PF RoCE so missed packets don't go through RoCE steering */
1386 ++ mlx5_dev_list_lock();
1387 ++ mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1388 ++ mlx5_dev_list_unlock();
1389 ++
1390 + err = esw_create_offloads_fdb_table(esw, nvports);
1391 + if (err)
1392 +- return err;
1393 ++ goto create_fdb_err;
1394 +
1395 + err = esw_create_offloads_table(esw);
1396 + if (err)
1397 +@@ -673,11 +678,6 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
1398 + goto err_reps;
1399 + }
1400 +
1401 +- /* disable PF RoCE so missed packets don't go through RoCE steering */
1402 +- mlx5_dev_list_lock();
1403 +- mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1404 +- mlx5_dev_list_unlock();
1405 +-
1406 + return 0;
1407 +
1408 + err_reps:
1409 +@@ -694,6 +694,13 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
1410 +
1411 + create_ft_err:
1412 + esw_destroy_offloads_fdb_table(esw);
1413 ++
1414 ++create_fdb_err:
1415 ++ /* enable back PF RoCE */
1416 ++ mlx5_dev_list_lock();
1417 ++ mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1418 ++ mlx5_dev_list_unlock();
1419 ++
1420 + return err;
1421 + }
1422 +
1423 +@@ -701,11 +708,6 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
1424 + {
1425 + int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
1426 +
1427 +- /* enable back PF RoCE */
1428 +- mlx5_dev_list_lock();
1429 +- mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1430 +- mlx5_dev_list_unlock();
1431 +-
1432 + mlx5_eswitch_disable_sriov(esw);
1433 + err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
1434 + if (err) {
1435 +@@ -715,6 +717,11 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
1436 + esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
1437 + }
1438 +
1439 ++ /* enable back PF RoCE */
1440 ++ mlx5_dev_list_lock();
1441 ++ mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1442 ++ mlx5_dev_list_unlock();
1443 ++
1444 + return err;
1445 + }
1446 +
1447 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
1448 +index b5d5519542e8..0ca4623bda6b 100644
1449 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
1450 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
1451 +@@ -157,22 +157,17 @@ static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev)
1452 + static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
1453 + u8 *port1, u8 *port2)
1454 + {
1455 +- if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
1456 +- if (tracker->netdev_state[0].tx_enabled) {
1457 +- *port1 = 1;
1458 +- *port2 = 1;
1459 +- } else {
1460 +- *port1 = 2;
1461 +- *port2 = 2;
1462 +- }
1463 +- } else {
1464 +- *port1 = 1;
1465 +- *port2 = 2;
1466 +- if (!tracker->netdev_state[0].link_up)
1467 +- *port1 = 2;
1468 +- else if (!tracker->netdev_state[1].link_up)
1469 +- *port2 = 1;
1470 ++ *port1 = 1;
1471 ++ *port2 = 2;
1472 ++ if (!tracker->netdev_state[0].tx_enabled ||
1473 ++ !tracker->netdev_state[0].link_up) {
1474 ++ *port1 = 2;
1475 ++ return;
1476 + }
1477 ++
1478 ++ if (!tracker->netdev_state[1].tx_enabled ||
1479 ++ !tracker->netdev_state[1].link_up)
1480 ++ *port2 = 1;
1481 + }
1482 +
1483 + static void mlx5_activate_lag(struct mlx5_lag *ldev,
1484 +diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
1485 +index 12be259394c6..2140dedab712 100644
1486 +--- a/drivers/net/ethernet/renesas/sh_eth.c
1487 ++++ b/drivers/net/ethernet/renesas/sh_eth.c
1488 +@@ -574,6 +574,7 @@ static struct sh_eth_cpu_data r8a7740_data = {
1489 + .rpadir_value = 2 << 16,
1490 + .no_trimd = 1,
1491 + .no_ade = 1,
1492 ++ .hw_crc = 1,
1493 + .tsu = 1,
1494 + .select_mii = 1,
1495 + .shift_rd0 = 1,
1496 +@@ -802,7 +803,7 @@ static struct sh_eth_cpu_data sh7734_data = {
1497 +
1498 + .ecsr_value = ECSR_ICD | ECSR_MPD,
1499 + .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
1500 +- .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
1501 ++ .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff,
1502 +
1503 + .tx_check = EESR_TC1 | EESR_FTC,
1504 + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
1505 +@@ -832,7 +833,7 @@ static struct sh_eth_cpu_data sh7763_data = {
1506 +
1507 + .ecsr_value = ECSR_ICD | ECSR_MPD,
1508 + .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
1509 +- .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
1510 ++ .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff,
1511 +
1512 + .tx_check = EESR_TC1 | EESR_FTC,
1513 + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
1514 +diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
1515 +index bca6a1e72d1d..e1bb802d4a4d 100644
1516 +--- a/drivers/net/irda/mcs7780.c
1517 ++++ b/drivers/net/irda/mcs7780.c
1518 +@@ -141,9 +141,19 @@ static int mcs_set_reg(struct mcs_cb *mcs, __u16 reg, __u16 val)
1519 + static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val)
1520 + {
1521 + struct usb_device *dev = mcs->usbdev;
1522 +- int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
1523 +- MCS_RD_RTYPE, 0, reg, val, 2,
1524 +- msecs_to_jiffies(MCS_CTRL_TIMEOUT));
1525 ++ void *dmabuf;
1526 ++ int ret;
1527 ++
1528 ++ dmabuf = kmalloc(sizeof(__u16), GFP_KERNEL);
1529 ++ if (!dmabuf)
1530 ++ return -ENOMEM;
1531 ++
1532 ++ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
1533 ++ MCS_RD_RTYPE, 0, reg, dmabuf, 2,
1534 ++ msecs_to_jiffies(MCS_CTRL_TIMEOUT));
1535 ++
1536 ++ memcpy(val, dmabuf, sizeof(__u16));
1537 ++ kfree(dmabuf);
1538 +
1539 + return ret;
1540 + }
1541 +diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
1542 +index 4cad95552cf1..01cf094bee18 100644
1543 +--- a/drivers/net/phy/dp83867.c
1544 ++++ b/drivers/net/phy/dp83867.c
1545 +@@ -29,6 +29,7 @@
1546 + #define MII_DP83867_MICR 0x12
1547 + #define MII_DP83867_ISR 0x13
1548 + #define DP83867_CTRL 0x1f
1549 ++#define DP83867_CFG3 0x1e
1550 +
1551 + /* Extended Registers */
1552 + #define DP83867_RGMIICTL 0x0032
1553 +@@ -90,6 +91,8 @@ static int dp83867_config_intr(struct phy_device *phydev)
1554 + micr_status |=
1555 + (MII_DP83867_MICR_AN_ERR_INT_EN |
1556 + MII_DP83867_MICR_SPEED_CHNG_INT_EN |
1557 ++ MII_DP83867_MICR_AUTONEG_COMP_INT_EN |
1558 ++ MII_DP83867_MICR_LINK_STS_CHNG_INT_EN |
1559 + MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN |
1560 + MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN);
1561 +
1562 +@@ -190,6 +193,13 @@ static int dp83867_config_init(struct phy_device *phydev)
1563 + DP83867_DEVADDR, delay);
1564 + }
1565 +
1566 ++ /* Enable Interrupt output INT_OE in CFG3 register */
1567 ++ if (phy_interrupt_is_valid(phydev)) {
1568 ++ val = phy_read(phydev, DP83867_CFG3);
1569 ++ val |= BIT(7);
1570 ++ phy_write(phydev, DP83867_CFG3, val);
1571 ++ }
1572 ++
1573 + return 0;
1574 + }
1575 +
1576 +diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
1577 +index edd30ebbf275..775a6e1fdef9 100644
1578 +--- a/drivers/net/phy/phy.c
1579 ++++ b/drivers/net/phy/phy.c
1580 +@@ -674,6 +674,9 @@ void phy_stop_machine(struct phy_device *phydev)
1581 + if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
1582 + phydev->state = PHY_UP;
1583 + mutex_unlock(&phydev->lock);
1584 ++
1585 ++ /* Now we can run the state machine synchronously */
1586 ++ phy_state_machine(&phydev->state_queue.work);
1587 + }
1588 +
1589 + /**
1590 +@@ -1060,6 +1063,15 @@ void phy_state_machine(struct work_struct *work)
1591 + if (old_link != phydev->link)
1592 + phydev->state = PHY_CHANGELINK;
1593 + }
1594 ++ /*
1595 ++ * Failsafe: check that nobody set phydev->link=0 between two
1596 ++ * poll cycles, otherwise we won't leave RUNNING state as long
1597 ++ * as link remains down.
1598 ++ */
1599 ++ if (!phydev->link && phydev->state == PHY_RUNNING) {
1600 ++ phydev->state = PHY_CHANGELINK;
1601 ++ phydev_err(phydev, "no link in PHY_RUNNING\n");
1602 ++ }
1603 + break;
1604 + case PHY_CHANGELINK:
1605 + err = phy_read_status(phydev);
1606 +diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
1607 +index 9e7b7836774f..bf02f8e4648a 100644
1608 +--- a/drivers/net/phy/phy_device.c
1609 ++++ b/drivers/net/phy/phy_device.c
1610 +@@ -1714,6 +1714,8 @@ static int phy_remove(struct device *dev)
1611 + {
1612 + struct phy_device *phydev = to_phy_device(dev);
1613 +
1614 ++ cancel_delayed_work_sync(&phydev->state_queue);
1615 ++
1616 + mutex_lock(&phydev->lock);
1617 + phydev->state = PHY_DOWN;
1618 + mutex_unlock(&phydev->lock);
1619 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
1620 +index 8744b9beda33..8e3c6f4bdaa0 100644
1621 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
1622 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
1623 +@@ -4161,11 +4161,6 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
1624 + goto fail;
1625 + }
1626 +
1627 +- /* allocate scatter-gather table. sg support
1628 +- * will be disabled upon allocation failure.
1629 +- */
1630 +- brcmf_sdiod_sgtable_alloc(bus->sdiodev);
1631 +-
1632 + /* Query the F2 block size, set roundup accordingly */
1633 + bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
1634 + bus->roundup = min(max_roundup, bus->blocksize);
1635 +diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
1636 +index 4b97371c3b42..838946d17b59 100644
1637 +--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
1638 ++++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
1639 +@@ -1190,11 +1190,11 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
1640 + next_reclaimed;
1641 + IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
1642 + next_reclaimed);
1643 ++ iwlagn_check_ratid_empty(priv, sta_id, tid);
1644 + }
1645 +
1646 + iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
1647 +
1648 +- iwlagn_check_ratid_empty(priv, sta_id, tid);
1649 + freed = 0;
1650 +
1651 + /* process frames */
1652 +diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
1653 +index 3ce1f7da8647..cb7365bdf6e0 100644
1654 +--- a/drivers/net/xen-netback/common.h
1655 ++++ b/drivers/net/xen-netback/common.h
1656 +@@ -199,6 +199,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
1657 + unsigned long remaining_credit;
1658 + struct timer_list credit_timeout;
1659 + u64 credit_window_start;
1660 ++ bool rate_limited;
1661 +
1662 + /* Statistics */
1663 + struct xenvif_stats stats;
1664 +diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
1665 +index b009d7966b46..5bfaf5578810 100644
1666 +--- a/drivers/net/xen-netback/interface.c
1667 ++++ b/drivers/net/xen-netback/interface.c
1668 +@@ -105,7 +105,11 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
1669 +
1670 + if (work_done < budget) {
1671 + napi_complete(napi);
1672 +- xenvif_napi_schedule_or_enable_events(queue);
1673 ++ /* If the queue is rate-limited, it shall be
1674 ++ * rescheduled in the timer callback.
1675 ++ */
1676 ++ if (likely(!queue->rate_limited))
1677 ++ xenvif_napi_schedule_or_enable_events(queue);
1678 + }
1679 +
1680 + return work_done;
1681 +diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
1682 +index 47b481095d77..d9b5b73c35a0 100644
1683 +--- a/drivers/net/xen-netback/netback.c
1684 ++++ b/drivers/net/xen-netback/netback.c
1685 +@@ -179,6 +179,7 @@ static void tx_add_credit(struct xenvif_queue *queue)
1686 + max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
1687 +
1688 + queue->remaining_credit = min(max_credit, max_burst);
1689 ++ queue->rate_limited = false;
1690 + }
1691 +
1692 + void xenvif_tx_credit_callback(unsigned long data)
1693 +@@ -685,8 +686,10 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
1694 + msecs_to_jiffies(queue->credit_usec / 1000);
1695 +
1696 + /* Timer could already be pending in rare cases. */
1697 +- if (timer_pending(&queue->credit_timeout))
1698 ++ if (timer_pending(&queue->credit_timeout)) {
1699 ++ queue->rate_limited = true;
1700 + return true;
1701 ++ }
1702 +
1703 + /* Passed the point where we can replenish credit? */
1704 + if (time_after_eq64(now, next_credit)) {
1705 +@@ -701,6 +704,7 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
1706 + mod_timer(&queue->credit_timeout,
1707 + next_credit);
1708 + queue->credit_window_start = next_credit;
1709 ++ queue->rate_limited = true;
1710 +
1711 + return true;
1712 + }
1713 +diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
1714 +index ad33238cef17..8c4641b518b5 100644
1715 +--- a/drivers/scsi/qla2xxx/qla_attr.c
1716 ++++ b/drivers/scsi/qla2xxx/qla_attr.c
1717 +@@ -243,12 +243,15 @@ qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
1718 + struct qla_hw_data *ha = vha->hw;
1719 + ssize_t rval = 0;
1720 +
1721 ++ mutex_lock(&ha->optrom_mutex);
1722 ++
1723 + if (ha->optrom_state != QLA_SREADING)
1724 +- return 0;
1725 ++ goto out;
1726 +
1727 +- mutex_lock(&ha->optrom_mutex);
1728 + rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
1729 + ha->optrom_region_size);
1730 ++
1731 ++out:
1732 + mutex_unlock(&ha->optrom_mutex);
1733 +
1734 + return rval;
1735 +@@ -263,14 +266,19 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
1736 + struct device, kobj)));
1737 + struct qla_hw_data *ha = vha->hw;
1738 +
1739 +- if (ha->optrom_state != QLA_SWRITING)
1740 ++ mutex_lock(&ha->optrom_mutex);
1741 ++
1742 ++ if (ha->optrom_state != QLA_SWRITING) {
1743 ++ mutex_unlock(&ha->optrom_mutex);
1744 + return -EINVAL;
1745 +- if (off > ha->optrom_region_size)
1746 ++ }
1747 ++ if (off > ha->optrom_region_size) {
1748 ++ mutex_unlock(&ha->optrom_mutex);
1749 + return -ERANGE;
1750 ++ }
1751 + if (off + count > ha->optrom_region_size)
1752 + count = ha->optrom_region_size - off;
1753 +
1754 +- mutex_lock(&ha->optrom_mutex);
1755 + memcpy(&ha->optrom_buffer[off], buf, count);
1756 + mutex_unlock(&ha->optrom_mutex);
1757 +
1758 +diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
1759 +index 2b1456e5e221..c1eafbd7610a 100644
1760 +--- a/drivers/spi/spi-axi-spi-engine.c
1761 ++++ b/drivers/spi/spi-axi-spi-engine.c
1762 +@@ -494,7 +494,8 @@ static int spi_engine_probe(struct platform_device *pdev)
1763 + SPI_ENGINE_VERSION_MAJOR(version),
1764 + SPI_ENGINE_VERSION_MINOR(version),
1765 + SPI_ENGINE_VERSION_PATCH(version));
1766 +- return -ENODEV;
1767 ++ ret = -ENODEV;
1768 ++ goto err_put_master;
1769 + }
1770 +
1771 + spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
1772 +diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
1773 +index 6693d7c69f97..e8efb4299a95 100644
1774 +--- a/drivers/target/iscsi/iscsi_target_nego.c
1775 ++++ b/drivers/target/iscsi/iscsi_target_nego.c
1776 +@@ -490,14 +490,60 @@ static void iscsi_target_restore_sock_callbacks(struct iscsi_conn *conn)
1777 +
1778 + static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *);
1779 +
1780 +-static bool iscsi_target_sk_state_check(struct sock *sk)
1781 ++static bool __iscsi_target_sk_check_close(struct sock *sk)
1782 + {
1783 + if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) {
1784 +- pr_debug("iscsi_target_sk_state_check: TCP_CLOSE_WAIT|TCP_CLOSE,"
1785 ++ pr_debug("__iscsi_target_sk_check_close: TCP_CLOSE_WAIT|TCP_CLOSE,"
1786 + "returning FALSE\n");
1787 +- return false;
1788 ++ return true;
1789 + }
1790 +- return true;
1791 ++ return false;
1792 ++}
1793 ++
1794 ++static bool iscsi_target_sk_check_close(struct iscsi_conn *conn)
1795 ++{
1796 ++ bool state = false;
1797 ++
1798 ++ if (conn->sock) {
1799 ++ struct sock *sk = conn->sock->sk;
1800 ++
1801 ++ read_lock_bh(&sk->sk_callback_lock);
1802 ++ state = (__iscsi_target_sk_check_close(sk) ||
1803 ++ test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
1804 ++ read_unlock_bh(&sk->sk_callback_lock);
1805 ++ }
1806 ++ return state;
1807 ++}
1808 ++
1809 ++static bool iscsi_target_sk_check_flag(struct iscsi_conn *conn, unsigned int flag)
1810 ++{
1811 ++ bool state = false;
1812 ++
1813 ++ if (conn->sock) {
1814 ++ struct sock *sk = conn->sock->sk;
1815 ++
1816 ++ read_lock_bh(&sk->sk_callback_lock);
1817 ++ state = test_bit(flag, &conn->login_flags);
1818 ++ read_unlock_bh(&sk->sk_callback_lock);
1819 ++ }
1820 ++ return state;
1821 ++}
1822 ++
1823 ++static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned int flag)
1824 ++{
1825 ++ bool state = false;
1826 ++
1827 ++ if (conn->sock) {
1828 ++ struct sock *sk = conn->sock->sk;
1829 ++
1830 ++ write_lock_bh(&sk->sk_callback_lock);
1831 ++ state = (__iscsi_target_sk_check_close(sk) ||
1832 ++ test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
1833 ++ if (!state)
1834 ++ clear_bit(flag, &conn->login_flags);
1835 ++ write_unlock_bh(&sk->sk_callback_lock);
1836 ++ }
1837 ++ return state;
1838 + }
1839 +
1840 + static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login)
1841 +@@ -537,6 +583,20 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
1842 +
1843 + pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n",
1844 + conn, current->comm, current->pid);
1845 ++ /*
1846 ++ * If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready()
1847 ++ * before initial PDU processing in iscsi_target_start_negotiation()
1848 ++ * has completed, go ahead and retry until it's cleared.
1849 ++ *
1850 ++ * Otherwise if the TCP connection drops while this is occuring,
1851 ++ * iscsi_target_start_negotiation() will detect the failure, call
1852 ++ * cancel_delayed_work_sync(&conn->login_work), and cleanup the
1853 ++ * remaining iscsi connection resources from iscsi_np process context.
1854 ++ */
1855 ++ if (iscsi_target_sk_check_flag(conn, LOGIN_FLAGS_INITIAL_PDU)) {
1856 ++ schedule_delayed_work(&conn->login_work, msecs_to_jiffies(10));
1857 ++ return;
1858 ++ }
1859 +
1860 + spin_lock(&tpg->tpg_state_lock);
1861 + state = (tpg->tpg_state == TPG_STATE_ACTIVE);
1862 +@@ -544,26 +604,12 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
1863 +
1864 + if (!state) {
1865 + pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n");
1866 +- iscsi_target_restore_sock_callbacks(conn);
1867 +- iscsi_target_login_drop(conn, login);
1868 +- iscsit_deaccess_np(np, tpg, tpg_np);
1869 +- return;
1870 ++ goto err;
1871 + }
1872 +
1873 +- if (conn->sock) {
1874 +- struct sock *sk = conn->sock->sk;
1875 +-
1876 +- read_lock_bh(&sk->sk_callback_lock);
1877 +- state = iscsi_target_sk_state_check(sk);
1878 +- read_unlock_bh(&sk->sk_callback_lock);
1879 +-
1880 +- if (!state) {
1881 +- pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
1882 +- iscsi_target_restore_sock_callbacks(conn);
1883 +- iscsi_target_login_drop(conn, login);
1884 +- iscsit_deaccess_np(np, tpg, tpg_np);
1885 +- return;
1886 +- }
1887 ++ if (iscsi_target_sk_check_close(conn)) {
1888 ++ pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
1889 ++ goto err;
1890 + }
1891 +
1892 + conn->login_kworker = current;
1893 +@@ -581,34 +627,29 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
1894 + flush_signals(current);
1895 + conn->login_kworker = NULL;
1896 +
1897 +- if (rc < 0) {
1898 +- iscsi_target_restore_sock_callbacks(conn);
1899 +- iscsi_target_login_drop(conn, login);
1900 +- iscsit_deaccess_np(np, tpg, tpg_np);
1901 +- return;
1902 +- }
1903 ++ if (rc < 0)
1904 ++ goto err;
1905 +
1906 + pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n",
1907 + conn, current->comm, current->pid);
1908 +
1909 + rc = iscsi_target_do_login(conn, login);
1910 + if (rc < 0) {
1911 +- iscsi_target_restore_sock_callbacks(conn);
1912 +- iscsi_target_login_drop(conn, login);
1913 +- iscsit_deaccess_np(np, tpg, tpg_np);
1914 ++ goto err;
1915 + } else if (!rc) {
1916 +- if (conn->sock) {
1917 +- struct sock *sk = conn->sock->sk;
1918 +-
1919 +- write_lock_bh(&sk->sk_callback_lock);
1920 +- clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags);
1921 +- write_unlock_bh(&sk->sk_callback_lock);
1922 +- }
1923 ++ if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_READ_ACTIVE))
1924 ++ goto err;
1925 + } else if (rc == 1) {
1926 + iscsi_target_nego_release(conn);
1927 + iscsi_post_login_handler(np, conn, zero_tsih);
1928 + iscsit_deaccess_np(np, tpg, tpg_np);
1929 + }
1930 ++ return;
1931 ++
1932 ++err:
1933 ++ iscsi_target_restore_sock_callbacks(conn);
1934 ++ iscsi_target_login_drop(conn, login);
1935 ++ iscsit_deaccess_np(np, tpg, tpg_np);
1936 + }
1937 +
1938 + static void iscsi_target_do_cleanup(struct work_struct *work)
1939 +@@ -656,31 +697,54 @@ static void iscsi_target_sk_state_change(struct sock *sk)
1940 + orig_state_change(sk);
1941 + return;
1942 + }
1943 ++ state = __iscsi_target_sk_check_close(sk);
1944 ++ pr_debug("__iscsi_target_sk_close_change: state: %d\n", state);
1945 ++
1946 + if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) {
1947 + pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change"
1948 + " conn: %p\n", conn);
1949 ++ if (state)
1950 ++ set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
1951 + write_unlock_bh(&sk->sk_callback_lock);
1952 + orig_state_change(sk);
1953 + return;
1954 + }
1955 +- if (test_and_set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
1956 ++ if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
1957 + pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n",
1958 + conn);
1959 + write_unlock_bh(&sk->sk_callback_lock);
1960 + orig_state_change(sk);
1961 + return;
1962 + }
1963 ++ /*
1964 ++ * If the TCP connection has dropped, go ahead and set LOGIN_FLAGS_CLOSED,
1965 ++ * but only queue conn->login_work -> iscsi_target_do_login_rx()
1966 ++ * processing if LOGIN_FLAGS_INITIAL_PDU has already been cleared.
1967 ++ *
1968 ++ * When iscsi_target_do_login_rx() runs, iscsi_target_sk_check_close()
1969 ++ * will detect the dropped TCP connection from delayed workqueue context.
1970 ++ *
1971 ++ * If LOGIN_FLAGS_INITIAL_PDU is still set, which means the initial
1972 ++ * iscsi_target_start_negotiation() is running, iscsi_target_do_login()
1973 ++ * via iscsi_target_sk_check_close() or iscsi_target_start_negotiation()
1974 ++ * via iscsi_target_sk_check_and_clear() is responsible for detecting the
1975 ++ * dropped TCP connection in iscsi_np process context, and cleaning up
1976 ++ * the remaining iscsi connection resources.
1977 ++ */
1978 ++ if (state) {
1979 ++ pr_debug("iscsi_target_sk_state_change got failed state\n");
1980 ++ set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
1981 ++ state = test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
1982 ++ write_unlock_bh(&sk->sk_callback_lock);
1983 +
1984 +- state = iscsi_target_sk_state_check(sk);
1985 +- write_unlock_bh(&sk->sk_callback_lock);
1986 +-
1987 +- pr_debug("iscsi_target_sk_state_change: state: %d\n", state);
1988 ++ orig_state_change(sk);
1989 +
1990 +- if (!state) {
1991 +- pr_debug("iscsi_target_sk_state_change got failed state\n");
1992 +- schedule_delayed_work(&conn->login_cleanup_work, 0);
1993 ++ if (!state)
1994 ++ schedule_delayed_work(&conn->login_work, 0);
1995 + return;
1996 + }
1997 ++ write_unlock_bh(&sk->sk_callback_lock);
1998 ++
1999 + orig_state_change(sk);
2000 + }
2001 +
2002 +@@ -945,6 +1009,15 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
2003 + if (iscsi_target_handle_csg_one(conn, login) < 0)
2004 + return -1;
2005 + if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
2006 ++ /*
2007 ++ * Check to make sure the TCP connection has not
2008 ++ * dropped asynchronously while session reinstatement
2009 ++ * was occuring in this kthread context, before
2010 ++ * transitioning to full feature phase operation.
2011 ++ */
2012 ++ if (iscsi_target_sk_check_close(conn))
2013 ++ return -1;
2014 ++
2015 + login->tsih = conn->sess->tsih;
2016 + login->login_complete = 1;
2017 + iscsi_target_restore_sock_callbacks(conn);
2018 +@@ -971,21 +1044,6 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
2019 + break;
2020 + }
2021 +
2022 +- if (conn->sock) {
2023 +- struct sock *sk = conn->sock->sk;
2024 +- bool state;
2025 +-
2026 +- read_lock_bh(&sk->sk_callback_lock);
2027 +- state = iscsi_target_sk_state_check(sk);
2028 +- read_unlock_bh(&sk->sk_callback_lock);
2029 +-
2030 +- if (!state) {
2031 +- pr_debug("iscsi_target_do_login() failed state for"
2032 +- " conn: %p\n", conn);
2033 +- return -1;
2034 +- }
2035 +- }
2036 +-
2037 + return 0;
2038 + }
2039 +
2040 +@@ -1252,13 +1310,25 @@ int iscsi_target_start_negotiation(
2041 + if (conn->sock) {
2042 + struct sock *sk = conn->sock->sk;
2043 +
2044 +- write_lock_bh(&sk->sk_callback_lock);
2045 +- set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
2046 +- write_unlock_bh(&sk->sk_callback_lock);
2047 +- }
2048 ++ write_lock_bh(&sk->sk_callback_lock);
2049 ++ set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
2050 ++ set_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
2051 ++ write_unlock_bh(&sk->sk_callback_lock);
2052 ++ }
2053 ++ /*
2054 ++ * If iscsi_target_do_login returns zero to signal more PDU
2055 ++ * exchanges are required to complete the login, go ahead and
2056 ++ * clear LOGIN_FLAGS_INITIAL_PDU but only if the TCP connection
2057 ++ * is still active.
2058 ++ *
2059 ++ * Otherwise if TCP connection dropped asynchronously, go ahead
2060 ++ * and perform connection cleanup now.
2061 ++ */
2062 ++ ret = iscsi_target_do_login(conn, login);
2063 ++ if (!ret && iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU))
2064 ++ ret = -1;
2065 +
2066 +- ret = iscsi_target_do_login(conn, login);
2067 +- if (ret < 0) {
2068 ++ if (ret < 0) {
2069 + cancel_delayed_work_sync(&conn->login_work);
2070 + cancel_delayed_work_sync(&conn->login_cleanup_work);
2071 + iscsi_target_restore_sock_callbacks(conn);
2072 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
2073 +index 14a37ff0b9e3..705bb5f5a87f 100644
2074 +--- a/fs/btrfs/extent-tree.c
2075 ++++ b/fs/btrfs/extent-tree.c
2076 +@@ -4759,10 +4759,6 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
2077 + else
2078 + flush = BTRFS_RESERVE_NO_FLUSH;
2079 + spin_lock(&space_info->lock);
2080 +- if (can_overcommit(root, space_info, orig, flush)) {
2081 +- spin_unlock(&space_info->lock);
2082 +- break;
2083 +- }
2084 + if (list_empty(&space_info->tickets) &&
2085 + list_empty(&space_info->priority_tickets)) {
2086 + spin_unlock(&space_info->lock);
2087 +diff --git a/fs/ext4/file.c b/fs/ext4/file.c
2088 +index 9e77c089e8cb..d17d12ed6f73 100644
2089 +--- a/fs/ext4/file.c
2090 ++++ b/fs/ext4/file.c
2091 +@@ -469,6 +469,8 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
2092 + lastoff = page_offset(page);
2093 + bh = head = page_buffers(page);
2094 + do {
2095 ++ if (lastoff + bh->b_size <= startoff)
2096 ++ goto next;
2097 + if (buffer_uptodate(bh) ||
2098 + buffer_unwritten(bh)) {
2099 + if (whence == SEEK_DATA)
2100 +@@ -483,6 +485,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
2101 + unlock_page(page);
2102 + goto out;
2103 + }
2104 ++next:
2105 + lastoff += bh->b_size;
2106 + bh = bh->b_this_page;
2107 + } while (bh != head);
2108 +diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
2109 +index cf681004b196..95bf46654153 100644
2110 +--- a/fs/ext4/resize.c
2111 ++++ b/fs/ext4/resize.c
2112 +@@ -1926,7 +1926,8 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
2113 + n_desc_blocks = o_desc_blocks +
2114 + le16_to_cpu(es->s_reserved_gdt_blocks);
2115 + n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
2116 +- n_blocks_count = n_group * EXT4_BLOCKS_PER_GROUP(sb);
2117 ++ n_blocks_count = (ext4_fsblk_t)n_group *
2118 ++ EXT4_BLOCKS_PER_GROUP(sb);
2119 + n_group--; /* set to last group number */
2120 + }
2121 +
2122 +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
2123 +index 7e0c002c12e9..eb20b8767f3c 100644
2124 +--- a/fs/f2fs/super.c
2125 ++++ b/fs/f2fs/super.c
2126 +@@ -1424,6 +1424,8 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
2127 + unsigned int total, fsmeta;
2128 + struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
2129 + struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2130 ++ unsigned int main_segs, blocks_per_seg;
2131 ++ int i;
2132 +
2133 + total = le32_to_cpu(raw_super->segment_count);
2134 + fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
2135 +@@ -1435,6 +1437,20 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
2136 + if (unlikely(fsmeta >= total))
2137 + return 1;
2138 +
2139 ++ main_segs = le32_to_cpu(raw_super->segment_count_main);
2140 ++ blocks_per_seg = sbi->blocks_per_seg;
2141 ++
2142 ++ for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
2143 ++ if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
2144 ++ le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
2145 ++ return 1;
2146 ++ }
2147 ++ for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
2148 ++ if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
2149 ++ le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
2150 ++ return 1;
2151 ++ }
2152 ++
2153 + if (unlikely(f2fs_cp_error(sbi))) {
2154 + f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
2155 + return 1;
2156 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
2157 +index 46ca7881d80d..a53b8e0c896a 100644
2158 +--- a/fs/nfs/nfs4proc.c
2159 ++++ b/fs/nfs/nfs4proc.c
2160 +@@ -7410,7 +7410,7 @@ static void nfs4_exchange_id_done(struct rpc_task *task, void *data)
2161 + cdata->res.server_scope = NULL;
2162 + }
2163 + /* Save the EXCHANGE_ID verifier session trunk tests */
2164 +- memcpy(clp->cl_confirm.data, cdata->args.verifier->data,
2165 ++ memcpy(clp->cl_confirm.data, cdata->args.verifier.data,
2166 + sizeof(clp->cl_confirm.data));
2167 + }
2168 + out:
2169 +@@ -7447,7 +7447,6 @@ static const struct rpc_call_ops nfs4_exchange_id_call_ops = {
2170 + static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
2171 + u32 sp4_how, struct rpc_xprt *xprt)
2172 + {
2173 +- nfs4_verifier verifier;
2174 + struct rpc_message msg = {
2175 + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
2176 + .rpc_cred = cred,
2177 +@@ -7470,8 +7469,7 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
2178 + if (!calldata)
2179 + goto out;
2180 +
2181 +- if (!xprt)
2182 +- nfs4_init_boot_verifier(clp, &verifier);
2183 ++ nfs4_init_boot_verifier(clp, &calldata->args.verifier);
2184 +
2185 + status = nfs4_init_uniform_client_string(clp);
2186 + if (status)
2187 +@@ -7516,9 +7514,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
2188 + task_setup_data.rpc_xprt = xprt;
2189 + task_setup_data.flags =
2190 + RPC_TASK_SOFT|RPC_TASK_SOFTCONN|RPC_TASK_ASYNC;
2191 +- calldata->args.verifier = &clp->cl_confirm;
2192 +- } else {
2193 +- calldata->args.verifier = &verifier;
2194 ++ memcpy(calldata->args.verifier.data, clp->cl_confirm.data,
2195 ++ sizeof(calldata->args.verifier.data));
2196 + }
2197 + calldata->args.client = clp;
2198 + #ifdef CONFIG_NFS_V4_1_MIGRATION
2199 +diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
2200 +index c9c4d9855976..5e2724a928ed 100644
2201 +--- a/fs/nfs/nfs4xdr.c
2202 ++++ b/fs/nfs/nfs4xdr.c
2203 +@@ -1761,7 +1761,7 @@ static void encode_exchange_id(struct xdr_stream *xdr,
2204 + int len = 0;
2205 +
2206 + encode_op_hdr(xdr, OP_EXCHANGE_ID, decode_exchange_id_maxsz, hdr);
2207 +- encode_nfs4_verifier(xdr, args->verifier);
2208 ++ encode_nfs4_verifier(xdr, &args->verifier);
2209 +
2210 + encode_string(xdr, strlen(args->client->cl_owner_id),
2211 + args->client->cl_owner_id);
2212 +diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
2213 +index bfc204e70338..cd32a49ae81e 100644
2214 +--- a/include/linux/cpuset.h
2215 ++++ b/include/linux/cpuset.h
2216 +@@ -16,6 +16,19 @@
2217 +
2218 + #ifdef CONFIG_CPUSETS
2219 +
2220 ++/*
2221 ++ * Static branch rewrites can happen in an arbitrary order for a given
2222 ++ * key. In code paths where we need to loop with read_mems_allowed_begin() and
2223 ++ * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
2224 ++ * to ensure that begin() always gets rewritten before retry() in the
2225 ++ * disabled -> enabled transition. If not, then if local irqs are disabled
2226 ++ * around the loop, we can deadlock since retry() would always be
2227 ++ * comparing the latest value of the mems_allowed seqcount against 0 as
2228 ++ * begin() still would see cpusets_enabled() as false. The enabled -> disabled
2229 ++ * transition should happen in reverse order for the same reasons (want to stop
2230 ++ * looking at real value of mems_allowed.sequence in retry() first).
2231 ++ */
2232 ++extern struct static_key_false cpusets_pre_enable_key;
2233 + extern struct static_key_false cpusets_enabled_key;
2234 + static inline bool cpusets_enabled(void)
2235 + {
2236 +@@ -30,12 +43,14 @@ static inline int nr_cpusets(void)
2237 +
2238 + static inline void cpuset_inc(void)
2239 + {
2240 ++ static_branch_inc(&cpusets_pre_enable_key);
2241 + static_branch_inc(&cpusets_enabled_key);
2242 + }
2243 +
2244 + static inline void cpuset_dec(void)
2245 + {
2246 + static_branch_dec(&cpusets_enabled_key);
2247 ++ static_branch_dec(&cpusets_pre_enable_key);
2248 + }
2249 +
2250 + extern int cpuset_init(void);
2251 +@@ -113,7 +128,7 @@ extern void cpuset_print_current_mems_allowed(void);
2252 + */
2253 + static inline unsigned int read_mems_allowed_begin(void)
2254 + {
2255 +- if (!cpusets_enabled())
2256 ++ if (!static_branch_unlikely(&cpusets_pre_enable_key))
2257 + return 0;
2258 +
2259 + return read_seqcount_begin(&current->mems_allowed_seq);
2260 +@@ -127,7 +142,7 @@ static inline unsigned int read_mems_allowed_begin(void)
2261 + */
2262 + static inline bool read_mems_allowed_retry(unsigned int seq)
2263 + {
2264 +- if (!cpusets_enabled())
2265 ++ if (!static_branch_unlikely(&cpusets_enabled_key))
2266 + return false;
2267 +
2268 + return read_seqcount_retry(&current->mems_allowed_seq, seq);
2269 +diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
2270 +index 08d947fc4c59..e8471c2ca83a 100644
2271 +--- a/include/linux/mm_types.h
2272 ++++ b/include/linux/mm_types.h
2273 +@@ -507,6 +507,10 @@ struct mm_struct {
2274 + * PROT_NONE or PROT_NUMA mapped page.
2275 + */
2276 + bool tlb_flush_pending;
2277 ++#endif
2278 ++#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
2279 ++ /* See flush_tlb_batched_pending() */
2280 ++ bool tlb_flush_batched;
2281 + #endif
2282 + struct uprobes_state uprobes_state;
2283 + #ifdef CONFIG_X86_INTEL_MPX
2284 +diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
2285 +index beb1e10f446e..3bf867a0c3b3 100644
2286 +--- a/include/linux/nfs_xdr.h
2287 ++++ b/include/linux/nfs_xdr.h
2288 +@@ -1199,7 +1199,7 @@ struct nfs41_state_protection {
2289 +
2290 + struct nfs41_exchange_id_args {
2291 + struct nfs_client *client;
2292 +- nfs4_verifier *verifier;
2293 ++ nfs4_verifier verifier;
2294 + u32 flags;
2295 + struct nfs41_state_protection state_protect;
2296 + };
2297 +diff --git a/include/linux/property.h b/include/linux/property.h
2298 +index 856e50b2140c..338f9b76914b 100644
2299 +--- a/include/linux/property.h
2300 ++++ b/include/linux/property.h
2301 +@@ -33,6 +33,8 @@ enum dev_dma_attr {
2302 + DEV_DMA_COHERENT,
2303 + };
2304 +
2305 ++struct fwnode_handle *dev_fwnode(struct device *dev);
2306 ++
2307 + bool device_property_present(struct device *dev, const char *propname);
2308 + int device_property_read_u8_array(struct device *dev, const char *propname,
2309 + u8 *val, size_t nval);
2310 +diff --git a/include/linux/sched.h b/include/linux/sched.h
2311 +index f425eb3318ab..14f58cf06054 100644
2312 +--- a/include/linux/sched.h
2313 ++++ b/include/linux/sched.h
2314 +@@ -830,6 +830,16 @@ struct signal_struct {
2315 +
2316 + #define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
2317 +
2318 ++#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
2319 ++ SIGNAL_STOP_CONTINUED)
2320 ++
2321 ++static inline void signal_set_stop_flags(struct signal_struct *sig,
2322 ++ unsigned int flags)
2323 ++{
2324 ++ WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
2325 ++ sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
2326 ++}
2327 ++
2328 + /* If true, all threads except ->group_exit_task have pending SIGKILL */
2329 + static inline int signal_group_exit(const struct signal_struct *sig)
2330 + {
2331 +diff --git a/include/linux/slab.h b/include/linux/slab.h
2332 +index 084b12bad198..4c5363566815 100644
2333 +--- a/include/linux/slab.h
2334 ++++ b/include/linux/slab.h
2335 +@@ -226,7 +226,7 @@ static inline const char *__check_heap_object(const void *ptr,
2336 + * (PAGE_SIZE*2). Larger requests are passed to the page allocator.
2337 + */
2338 + #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
2339 +-#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
2340 ++#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
2341 + #ifndef KMALLOC_SHIFT_LOW
2342 + #define KMALLOC_SHIFT_LOW 3
2343 + #endif
2344 +@@ -239,7 +239,7 @@ static inline const char *__check_heap_object(const void *ptr,
2345 + * be allocated from the same page.
2346 + */
2347 + #define KMALLOC_SHIFT_HIGH PAGE_SHIFT
2348 +-#define KMALLOC_SHIFT_MAX 30
2349 ++#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
2350 + #ifndef KMALLOC_SHIFT_LOW
2351 + #define KMALLOC_SHIFT_LOW 3
2352 + #endif
2353 +diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
2354 +index fc6e22186405..733a21ef8da4 100644
2355 +--- a/include/linux/workqueue.h
2356 ++++ b/include/linux/workqueue.h
2357 +@@ -311,6 +311,7 @@ enum {
2358 +
2359 + __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
2360 + __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
2361 ++ __WQ_ORDERED_EXPLICIT = 1 << 18, /* internal: alloc_ordered_workqueue() */
2362 + __WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */
2363 +
2364 + WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
2365 +@@ -409,7 +410,8 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
2366 + * Pointer to the allocated workqueue on success, %NULL on failure.
2367 + */
2368 + #define alloc_ordered_workqueue(fmt, flags, args...) \
2369 +- alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
2370 ++ alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \
2371 ++ __WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
2372 +
2373 + #define create_workqueue(name) \
2374 + alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
2375 +diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
2376 +index e0f4109e64c6..c2aa73e5e6bb 100644
2377 +--- a/include/net/iw_handler.h
2378 ++++ b/include/net/iw_handler.h
2379 +@@ -556,7 +556,8 @@ iwe_stream_add_point(struct iw_request_info *info, char *stream, char *ends,
2380 + memcpy(stream + lcp_len,
2381 + ((char *) &iwe->u) + IW_EV_POINT_OFF,
2382 + IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN);
2383 +- memcpy(stream + point_len, extra, iwe->u.data.length);
2384 ++ if (iwe->u.data.length && extra)
2385 ++ memcpy(stream + point_len, extra, iwe->u.data.length);
2386 + stream += event_len;
2387 + }
2388 + return stream;
2389 +diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
2390 +index 31acc3f4f132..61d9ce89d10d 100644
2391 +--- a/include/net/sctp/sctp.h
2392 ++++ b/include/net/sctp/sctp.h
2393 +@@ -460,6 +460,8 @@ _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member)
2394 +
2395 + #define _sctp_walk_params(pos, chunk, end, member)\
2396 + for (pos.v = chunk->member;\
2397 ++ (pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) <=\
2398 ++ (void *)chunk + end) &&\
2399 + pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
2400 + ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\
2401 + pos.v += SCTP_PAD4(ntohs(pos.p->length)))
2402 +@@ -470,6 +472,8 @@ _sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length))
2403 + #define _sctp_walk_errors(err, chunk_hdr, end)\
2404 + for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
2405 + sizeof(sctp_chunkhdr_t));\
2406 ++ ((void *)err + offsetof(sctp_errhdr_t, length) + sizeof(err->length) <=\
2407 ++ (void *)chunk_hdr + end) &&\
2408 + (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
2409 + ntohs(err->length) >= sizeof(sctp_errhdr_t); \
2410 + err = (sctp_errhdr_t *)((void *)err + SCTP_PAD4(ntohs(err->length))))
2411 +diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
2412 +index 33b2e75bf2eb..c8132b419148 100644
2413 +--- a/include/target/iscsi/iscsi_target_core.h
2414 ++++ b/include/target/iscsi/iscsi_target_core.h
2415 +@@ -563,6 +563,7 @@ struct iscsi_conn {
2416 + #define LOGIN_FLAGS_READ_ACTIVE 1
2417 + #define LOGIN_FLAGS_CLOSED 2
2418 + #define LOGIN_FLAGS_READY 4
2419 ++#define LOGIN_FLAGS_INITIAL_PDU 8
2420 + unsigned long login_flags;
2421 + struct delayed_work login_work;
2422 + struct delayed_work login_cleanup_work;
2423 +diff --git a/kernel/cgroup.c b/kernel/cgroup.c
2424 +index 1fde8eec9529..4c233437ee1a 100644
2425 +--- a/kernel/cgroup.c
2426 ++++ b/kernel/cgroup.c
2427 +@@ -3487,11 +3487,11 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
2428 + cgrp->subtree_control &= ~disable;
2429 +
2430 + ret = cgroup_apply_control(cgrp);
2431 +-
2432 + cgroup_finalize_control(cgrp, ret);
2433 ++ if (ret)
2434 ++ goto out_unlock;
2435 +
2436 + kernfs_activate(cgrp->kn);
2437 +- ret = 0;
2438 + out_unlock:
2439 + cgroup_kn_unlock(of->kn);
2440 + return ret ?: nbytes;
2441 +@@ -5718,6 +5718,10 @@ int __init cgroup_init(void)
2442 +
2443 + if (ss->bind)
2444 + ss->bind(init_css_set.subsys[ssid]);
2445 ++
2446 ++ mutex_lock(&cgroup_mutex);
2447 ++ css_populate_dir(init_css_set.subsys[ssid]);
2448 ++ mutex_unlock(&cgroup_mutex);
2449 + }
2450 +
2451 + /* init_css_set.subsys[] has been updated, re-hash */
2452 +diff --git a/kernel/cpuset.c b/kernel/cpuset.c
2453 +index 24d175d2b62d..247afb108343 100644
2454 +--- a/kernel/cpuset.c
2455 ++++ b/kernel/cpuset.c
2456 +@@ -61,6 +61,7 @@
2457 + #include <linux/cgroup.h>
2458 + #include <linux/wait.h>
2459 +
2460 ++DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
2461 + DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
2462 +
2463 + /* See "Frequency meter" comments, below. */
2464 +diff --git a/kernel/signal.c b/kernel/signal.c
2465 +index deb04d5983ed..e48668c3c972 100644
2466 +--- a/kernel/signal.c
2467 ++++ b/kernel/signal.c
2468 +@@ -346,7 +346,7 @@ static bool task_participate_group_stop(struct task_struct *task)
2469 + * fresh group stop. Read comment in do_signal_stop() for details.
2470 + */
2471 + if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
2472 +- sig->flags = SIGNAL_STOP_STOPPED;
2473 ++ signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
2474 + return true;
2475 + }
2476 + return false;
2477 +@@ -845,7 +845,7 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force)
2478 + * will take ->siglock, notice SIGNAL_CLD_MASK, and
2479 + * notify its parent. See get_signal_to_deliver().
2480 + */
2481 +- signal->flags = why | SIGNAL_STOP_CONTINUED;
2482 ++ signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
2483 + signal->group_stop_count = 0;
2484 + signal->group_exit_code = 0;
2485 + }
2486 +diff --git a/kernel/time/timer.c b/kernel/time/timer.c
2487 +index c611c47de884..944ad64277a6 100644
2488 +--- a/kernel/time/timer.c
2489 ++++ b/kernel/time/timer.c
2490 +@@ -1536,7 +1536,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
2491 + base->is_idle = false;
2492 + } else {
2493 + if (!is_max_delta)
2494 +- expires = basem + (nextevt - basej) * TICK_NSEC;
2495 ++ expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
2496 + /*
2497 + * If we expect to sleep more than a tick, mark the base idle:
2498 + */
2499 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
2500 +index 479d840db286..776dda02e751 100644
2501 +--- a/kernel/workqueue.c
2502 ++++ b/kernel/workqueue.c
2503 +@@ -3730,8 +3730,12 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
2504 + return -EINVAL;
2505 +
2506 + /* creating multiple pwqs breaks ordering guarantee */
2507 +- if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
2508 +- return -EINVAL;
2509 ++ if (!list_empty(&wq->pwqs)) {
2510 ++ if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
2511 ++ return -EINVAL;
2512 ++
2513 ++ wq->flags &= ~__WQ_ORDERED;
2514 ++ }
2515 +
2516 + ctx = apply_wqattrs_prepare(wq, attrs);
2517 + if (!ctx)
2518 +@@ -3915,6 +3919,16 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
2519 + struct workqueue_struct *wq;
2520 + struct pool_workqueue *pwq;
2521 +
2522 ++ /*
2523 ++ * Unbound && max_active == 1 used to imply ordered, which is no
2524 ++ * longer the case on NUMA machines due to per-node pools. While
2525 ++ * alloc_ordered_workqueue() is the right way to create an ordered
2526 ++ * workqueue, keep the previous behavior to avoid subtle breakages
2527 ++ * on NUMA.
2528 ++ */
2529 ++ if ((flags & WQ_UNBOUND) && max_active == 1)
2530 ++ flags |= __WQ_ORDERED;
2531 ++
2532 + /* see the comment above the definition of WQ_POWER_EFFICIENT */
2533 + if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
2534 + flags |= WQ_UNBOUND;
2535 +@@ -4103,13 +4117,14 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
2536 + struct pool_workqueue *pwq;
2537 +
2538 + /* disallow meddling with max_active for ordered workqueues */
2539 +- if (WARN_ON(wq->flags & __WQ_ORDERED))
2540 ++ if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
2541 + return;
2542 +
2543 + max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
2544 +
2545 + mutex_lock(&wq->mutex);
2546 +
2547 ++ wq->flags &= ~__WQ_ORDERED;
2548 + wq->saved_max_active = max_active;
2549 +
2550 + for_each_pwq(pwq, wq)
2551 +@@ -5214,7 +5229,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
2552 + * attributes breaks ordering guarantee. Disallow exposing ordered
2553 + * workqueues.
2554 + */
2555 +- if (WARN_ON(wq->flags & __WQ_ORDERED))
2556 ++ if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
2557 + return -EINVAL;
2558 +
2559 + wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
2560 +diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
2561 +index a6c8db1d62f6..f60e67217f18 100644
2562 +--- a/lib/Kconfig.debug
2563 ++++ b/lib/Kconfig.debug
2564 +@@ -145,7 +145,7 @@ config DEBUG_INFO_REDUCED
2565 +
2566 + config DEBUG_INFO_SPLIT
2567 + bool "Produce split debuginfo in .dwo files"
2568 +- depends on DEBUG_INFO
2569 ++ depends on DEBUG_INFO && !FRV
2570 + help
2571 + Generate debug info into separate .dwo files. This significantly
2572 + reduces the build directory size for builds with DEBUG_INFO,
2573 +diff --git a/mm/internal.h b/mm/internal.h
2574 +index 537ac9951f5f..34a5459e5989 100644
2575 +--- a/mm/internal.h
2576 ++++ b/mm/internal.h
2577 +@@ -472,6 +472,7 @@ struct tlbflush_unmap_batch;
2578 + #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
2579 + void try_to_unmap_flush(void);
2580 + void try_to_unmap_flush_dirty(void);
2581 ++void flush_tlb_batched_pending(struct mm_struct *mm);
2582 + #else
2583 + static inline void try_to_unmap_flush(void)
2584 + {
2585 +@@ -479,7 +480,9 @@ static inline void try_to_unmap_flush(void)
2586 + static inline void try_to_unmap_flush_dirty(void)
2587 + {
2588 + }
2589 +-
2590 ++static inline void flush_tlb_batched_pending(struct mm_struct *mm)
2591 ++{
2592 ++}
2593 + #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
2594 +
2595 + extern const struct trace_print_flags pageflag_names[];
2596 +diff --git a/mm/madvise.c b/mm/madvise.c
2597 +index 93fb63e88b5e..253b1533fba5 100644
2598 +--- a/mm/madvise.c
2599 ++++ b/mm/madvise.c
2600 +@@ -21,6 +21,7 @@
2601 + #include <linux/swap.h>
2602 + #include <linux/swapops.h>
2603 + #include <linux/mmu_notifier.h>
2604 ++#include "internal.h"
2605 +
2606 + #include <asm/tlb.h>
2607 +
2608 +@@ -282,6 +283,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
2609 + return 0;
2610 +
2611 + orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
2612 ++ flush_tlb_batched_pending(mm);
2613 + arch_enter_lazy_mmu_mode();
2614 + for (; addr != end; pte++, addr += PAGE_SIZE) {
2615 + ptent = *pte;
2616 +diff --git a/mm/memory.c b/mm/memory.c
2617 +index e6a5a1f20492..9bf3da0d0e14 100644
2618 +--- a/mm/memory.c
2619 ++++ b/mm/memory.c
2620 +@@ -1124,6 +1124,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
2621 + init_rss_vec(rss);
2622 + start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
2623 + pte = start_pte;
2624 ++ flush_tlb_batched_pending(mm);
2625 + arch_enter_lazy_mmu_mode();
2626 + do {
2627 + pte_t ptent = *pte;
2628 +diff --git a/mm/mprotect.c b/mm/mprotect.c
2629 +index 11936526b08b..ae740c9b1f9b 100644
2630 +--- a/mm/mprotect.c
2631 ++++ b/mm/mprotect.c
2632 +@@ -74,6 +74,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
2633 + if (!pte)
2634 + return 0;
2635 +
2636 ++ flush_tlb_batched_pending(vma->vm_mm);
2637 + arch_enter_lazy_mmu_mode();
2638 + do {
2639 + oldpte = *pte;
2640 +diff --git a/mm/mremap.c b/mm/mremap.c
2641 +index 30d7d2482eea..15976716dd40 100644
2642 +--- a/mm/mremap.c
2643 ++++ b/mm/mremap.c
2644 +@@ -142,6 +142,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
2645 + new_ptl = pte_lockptr(mm, new_pmd);
2646 + if (new_ptl != old_ptl)
2647 + spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
2648 ++ flush_tlb_batched_pending(vma->vm_mm);
2649 + arch_enter_lazy_mmu_mode();
2650 +
2651 + for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
2652 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
2653 +index 56df8c24689d..77b797c2d094 100644
2654 +--- a/mm/page_alloc.c
2655 ++++ b/mm/page_alloc.c
2656 +@@ -1875,14 +1875,14 @@ int move_freepages(struct zone *zone,
2657 + #endif
2658 +
2659 + for (page = start_page; page <= end_page;) {
2660 +- /* Make sure we are not inadvertently changing nodes */
2661 +- VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2662 +-
2663 + if (!pfn_valid_within(page_to_pfn(page))) {
2664 + page++;
2665 + continue;
2666 + }
2667 +
2668 ++ /* Make sure we are not inadvertently changing nodes */
2669 ++ VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2670 ++
2671 + if (!PageBuddy(page)) {
2672 + page++;
2673 + continue;
2674 +@@ -6445,8 +6445,8 @@ unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
2675 + }
2676 +
2677 + if (pages && s)
2678 +- pr_info("Freeing %s memory: %ldK (%p - %p)\n",
2679 +- s, pages << (PAGE_SHIFT - 10), start, end);
2680 ++ pr_info("Freeing %s memory: %ldK\n",
2681 ++ s, pages << (PAGE_SHIFT - 10));
2682 +
2683 + return pages;
2684 + }
2685 +diff --git a/mm/rmap.c b/mm/rmap.c
2686 +index cd37c1c7e21b..94488b0362f8 100644
2687 +--- a/mm/rmap.c
2688 ++++ b/mm/rmap.c
2689 +@@ -616,6 +616,13 @@ static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
2690 + cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm));
2691 + tlb_ubc->flush_required = true;
2692 +
2693 ++ /*
2694 ++ * Ensure compiler does not re-order the setting of tlb_flush_batched
2695 ++ * before the PTE is cleared.
2696 ++ */
2697 ++ barrier();
2698 ++ mm->tlb_flush_batched = true;
2699 ++
2700 + /*
2701 + * If the PTE was dirty then it's best to assume it's writable. The
2702 + * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
2703 +@@ -643,6 +650,35 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
2704 +
2705 + return should_defer;
2706 + }
2707 ++
2708 ++/*
2709 ++ * Reclaim unmaps pages under the PTL but do not flush the TLB prior to
2710 ++ * releasing the PTL if TLB flushes are batched. It's possible for a parallel
2711 ++ * operation such as mprotect or munmap to race between reclaim unmapping
2712 ++ * the page and flushing the page. If this race occurs, it potentially allows
2713 ++ * access to data via a stale TLB entry. Tracking all mm's that have TLB
2714 ++ * batching in flight would be expensive during reclaim so instead track
2715 ++ * whether TLB batching occurred in the past and if so then do a flush here
2716 ++ * if required. This will cost one additional flush per reclaim cycle paid
2717 ++ * by the first operation at risk such as mprotect and mumap.
2718 ++ *
2719 ++ * This must be called under the PTL so that an access to tlb_flush_batched
2720 ++ * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise
2721 ++ * via the PTL.
2722 ++ */
2723 ++void flush_tlb_batched_pending(struct mm_struct *mm)
2724 ++{
2725 ++ if (mm->tlb_flush_batched) {
2726 ++ flush_tlb_mm(mm);
2727 ++
2728 ++ /*
2729 ++ * Do not allow the compiler to re-order the clearing of
2730 ++ * tlb_flush_batched before the tlb is flushed.
2731 ++ */
2732 ++ barrier();
2733 ++ mm->tlb_flush_batched = false;
2734 ++ }
2735 ++}
2736 + #else
2737 + static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
2738 + struct page *page, bool writable)
2739 +diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
2740 +index b94b1d293506..151e047ce072 100644
2741 +--- a/net/core/dev_ioctl.c
2742 ++++ b/net/core/dev_ioctl.c
2743 +@@ -28,6 +28,7 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg)
2744 +
2745 + if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2746 + return -EFAULT;
2747 ++ ifr.ifr_name[IFNAMSIZ-1] = 0;
2748 +
2749 + error = netdev_get_name(net, ifr.ifr_name, ifr.ifr_ifindex);
2750 + if (error)
2751 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
2752 +index 9c6fd7f83a4a..4d2629781e8b 100644
2753 +--- a/net/core/rtnetlink.c
2754 ++++ b/net/core/rtnetlink.c
2755 +@@ -1965,7 +1965,8 @@ static int do_setlink(const struct sk_buff *skb,
2756 + struct sockaddr *sa;
2757 + int len;
2758 +
2759 +- len = sizeof(sa_family_t) + dev->addr_len;
2760 ++ len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
2761 ++ sizeof(*sa));
2762 + sa = kmalloc(len, GFP_KERNEL);
2763 + if (!sa) {
2764 + err = -ENOMEM;
2765 +diff --git a/net/dccp/feat.c b/net/dccp/feat.c
2766 +index 1704948e6a12..f227f002c73d 100644
2767 +--- a/net/dccp/feat.c
2768 ++++ b/net/dccp/feat.c
2769 +@@ -1471,9 +1471,12 @@ int dccp_feat_init(struct sock *sk)
2770 + * singleton values (which always leads to failure).
2771 + * These settings can still (later) be overridden via sockopts.
2772 + */
2773 +- if (ccid_get_builtin_ccids(&tx.val, &tx.len) ||
2774 +- ccid_get_builtin_ccids(&rx.val, &rx.len))
2775 ++ if (ccid_get_builtin_ccids(&tx.val, &tx.len))
2776 + return -ENOBUFS;
2777 ++ if (ccid_get_builtin_ccids(&rx.val, &rx.len)) {
2778 ++ kfree(tx.val);
2779 ++ return -ENOBUFS;
2780 ++ }
2781 +
2782 + if (!dccp_feat_prefer(sysctl_dccp_tx_ccid, tx.val, tx.len) ||
2783 + !dccp_feat_prefer(sysctl_dccp_rx_ccid, rx.val, rx.len))
2784 +diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
2785 +index 86b0933ecd45..8fc160098e11 100644
2786 +--- a/net/dccp/ipv4.c
2787 ++++ b/net/dccp/ipv4.c
2788 +@@ -637,6 +637,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
2789 + goto drop_and_free;
2790 +
2791 + inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
2792 ++ reqsk_put(req);
2793 + return 0;
2794 +
2795 + drop_and_free:
2796 +diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
2797 +index 2ac9d2a1aaab..28e8252cc5ea 100644
2798 +--- a/net/dccp/ipv6.c
2799 ++++ b/net/dccp/ipv6.c
2800 +@@ -380,6 +380,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
2801 + goto drop_and_free;
2802 +
2803 + inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
2804 ++ reqsk_put(req);
2805 + return 0;
2806 +
2807 + drop_and_free:
2808 +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
2809 +index 3d92534c4450..968d8e165e3d 100644
2810 +--- a/net/ipv4/fib_frontend.c
2811 ++++ b/net/ipv4/fib_frontend.c
2812 +@@ -1319,13 +1319,14 @@ static struct pernet_operations fib_net_ops = {
2813 +
2814 + void __init ip_fib_init(void)
2815 + {
2816 +- rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
2817 +- rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
2818 +- rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
2819 ++ fib_trie_init();
2820 +
2821 + register_pernet_subsys(&fib_net_ops);
2822 ++
2823 + register_netdevice_notifier(&fib_netdev_notifier);
2824 + register_inetaddr_notifier(&fib_inetaddr_notifier);
2825 +
2826 +- fib_trie_init();
2827 ++ rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
2828 ++ rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
2829 ++ rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
2830 + }
2831 +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
2832 +index e5c1dbef3626..06215ba88b93 100644
2833 +--- a/net/ipv4/ip_output.c
2834 ++++ b/net/ipv4/ip_output.c
2835 +@@ -936,7 +936,8 @@ static int __ip_append_data(struct sock *sk,
2836 + csummode = CHECKSUM_PARTIAL;
2837 +
2838 + cork->length += length;
2839 +- if (((length > mtu) || (skb && skb_is_gso(skb))) &&
2840 ++ if ((((length + (skb ? skb->len : fragheaderlen)) > mtu) ||
2841 ++ (skb && skb_is_gso(skb))) &&
2842 + (sk->sk_protocol == IPPROTO_UDP) &&
2843 + (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
2844 + (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
2845 +diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
2846 +index fd8220213afc..146d86105183 100644
2847 +--- a/net/ipv4/netfilter/nf_reject_ipv4.c
2848 ++++ b/net/ipv4/netfilter/nf_reject_ipv4.c
2849 +@@ -126,6 +126,8 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
2850 + /* ip_route_me_harder expects skb->dst to be set */
2851 + skb_dst_set_noref(nskb, skb_dst(oldskb));
2852 +
2853 ++ nskb->mark = IP4_REPLY_MARK(net, oldskb->mark);
2854 ++
2855 + skb_reserve(nskb, LL_MAX_HEADER);
2856 + niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
2857 + ip4_dst_hoplimit(skb_dst(nskb)));
2858 +diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
2859 +index e3c4043c27de..b6f710d515d0 100644
2860 +--- a/net/ipv4/syncookies.c
2861 ++++ b/net/ipv4/syncookies.c
2862 +@@ -334,6 +334,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
2863 + treq = tcp_rsk(req);
2864 + treq->rcv_isn = ntohl(th->seq) - 1;
2865 + treq->snt_isn = cookie;
2866 ++ treq->txhash = net_tx_rndhash();
2867 + req->mss = mss;
2868 + ireq->ir_num = ntohs(th->dest);
2869 + ireq->ir_rmt_port = th->source;
2870 +diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
2871 +index 80bc36b25de2..566cfc50f7cf 100644
2872 +--- a/net/ipv4/sysctl_net_ipv4.c
2873 ++++ b/net/ipv4/sysctl_net_ipv4.c
2874 +@@ -958,7 +958,7 @@ static struct ctl_table ipv4_net_table[] = {
2875 + .data = &init_net.ipv4.sysctl_tcp_notsent_lowat,
2876 + .maxlen = sizeof(unsigned int),
2877 + .mode = 0644,
2878 +- .proc_handler = proc_dointvec,
2879 ++ .proc_handler = proc_douintvec,
2880 + },
2881 + #ifdef CONFIG_IP_ROUTE_MULTIPATH
2882 + {
2883 +diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
2884 +index 0ea66c2c9344..cb8db347c680 100644
2885 +--- a/net/ipv4/tcp_bbr.c
2886 ++++ b/net/ipv4/tcp_bbr.c
2887 +@@ -83,7 +83,8 @@ struct bbr {
2888 + cwnd_gain:10, /* current gain for setting cwnd */
2889 + full_bw_cnt:3, /* number of rounds without large bw gains */
2890 + cycle_idx:3, /* current index in pacing_gain cycle array */
2891 +- unused_b:6;
2892 ++ has_seen_rtt:1, /* have we seen an RTT sample yet? */
2893 ++ unused_b:5;
2894 + u32 prior_cwnd; /* prior cwnd upon entering loss recovery */
2895 + u32 full_bw; /* recent bw, to estimate if pipe is full */
2896 + };
2897 +@@ -182,6 +183,35 @@ static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
2898 + return rate >> BW_SCALE;
2899 + }
2900 +
2901 ++/* Convert a BBR bw and gain factor to a pacing rate in bytes per second. */
2902 ++static u32 bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain)
2903 ++{
2904 ++ u64 rate = bw;
2905 ++
2906 ++ rate = bbr_rate_bytes_per_sec(sk, rate, gain);
2907 ++ rate = min_t(u64, rate, sk->sk_max_pacing_rate);
2908 ++ return rate;
2909 ++}
2910 ++
2911 ++/* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
2912 ++static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
2913 ++{
2914 ++ struct tcp_sock *tp = tcp_sk(sk);
2915 ++ struct bbr *bbr = inet_csk_ca(sk);
2916 ++ u64 bw;
2917 ++ u32 rtt_us;
2918 ++
2919 ++ if (tp->srtt_us) { /* any RTT sample yet? */
2920 ++ rtt_us = max(tp->srtt_us >> 3, 1U);
2921 ++ bbr->has_seen_rtt = 1;
2922 ++ } else { /* no RTT sample yet */
2923 ++ rtt_us = USEC_PER_MSEC; /* use nominal default RTT */
2924 ++ }
2925 ++ bw = (u64)tp->snd_cwnd * BW_UNIT;
2926 ++ do_div(bw, rtt_us);
2927 ++ sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain);
2928 ++}
2929 ++
2930 + /* Pace using current bw estimate and a gain factor. In order to help drive the
2931 + * network toward lower queues while maintaining high utilization and low
2932 + * latency, the average pacing rate aims to be slightly (~1%) lower than the
2933 +@@ -191,12 +221,13 @@ static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
2934 + */
2935 + static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
2936 + {
2937 ++ struct tcp_sock *tp = tcp_sk(sk);
2938 + struct bbr *bbr = inet_csk_ca(sk);
2939 +- u64 rate = bw;
2940 ++ u32 rate = bbr_bw_to_pacing_rate(sk, bw, gain);
2941 +
2942 +- rate = bbr_rate_bytes_per_sec(sk, rate, gain);
2943 +- rate = min_t(u64, rate, sk->sk_max_pacing_rate);
2944 +- if (bbr->mode != BBR_STARTUP || rate > sk->sk_pacing_rate)
2945 ++ if (unlikely(!bbr->has_seen_rtt && tp->srtt_us))
2946 ++ bbr_init_pacing_rate_from_rtt(sk);
2947 ++ if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate)
2948 + sk->sk_pacing_rate = rate;
2949 + }
2950 +
2951 +@@ -769,7 +800,6 @@ static void bbr_init(struct sock *sk)
2952 + {
2953 + struct tcp_sock *tp = tcp_sk(sk);
2954 + struct bbr *bbr = inet_csk_ca(sk);
2955 +- u64 bw;
2956 +
2957 + bbr->prior_cwnd = 0;
2958 + bbr->tso_segs_goal = 0; /* default segs per skb until first ACK */
2959 +@@ -785,11 +815,8 @@ static void bbr_init(struct sock *sk)
2960 +
2961 + minmax_reset(&bbr->bw, bbr->rtt_cnt, 0); /* init max bw to 0 */
2962 +
2963 +- /* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
2964 +- bw = (u64)tp->snd_cwnd * BW_UNIT;
2965 +- do_div(bw, (tp->srtt_us >> 3) ? : USEC_PER_MSEC);
2966 +- sk->sk_pacing_rate = 0; /* force an update of sk_pacing_rate */
2967 +- bbr_set_pacing_rate(sk, bw, bbr_high_gain);
2968 ++ bbr->has_seen_rtt = 0;
2969 ++ bbr_init_pacing_rate_from_rtt(sk);
2970 +
2971 + bbr->restore_cwnd = 0;
2972 + bbr->round_start = 0;
2973 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
2974 +index 5a4b8e7bcedd..a5cdf2a23609 100644
2975 +--- a/net/ipv6/ip6_output.c
2976 ++++ b/net/ipv6/ip6_output.c
2977 +@@ -662,8 +662,6 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
2978 + *prevhdr = NEXTHDR_FRAGMENT;
2979 + tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
2980 + if (!tmp_hdr) {
2981 +- IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
2982 +- IPSTATS_MIB_FRAGFAILS);
2983 + err = -ENOMEM;
2984 + goto fail;
2985 + }
2986 +@@ -782,8 +780,6 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
2987 + frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
2988 + hroom + troom, GFP_ATOMIC);
2989 + if (!frag) {
2990 +- IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
2991 +- IPSTATS_MIB_FRAGFAILS);
2992 + err = -ENOMEM;
2993 + goto fail;
2994 + }
2995 +@@ -1376,7 +1372,7 @@ static int __ip6_append_data(struct sock *sk,
2996 + */
2997 +
2998 + cork->length += length;
2999 +- if ((((length + fragheaderlen) > mtu) ||
3000 ++ if ((((length + (skb ? skb->len : headersize)) > mtu) ||
3001 + (skb && skb_is_gso(skb))) &&
3002 + (sk->sk_protocol == IPPROTO_UDP) &&
3003 + (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
3004 +diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
3005 +index 10090400c72f..eedee5d108d9 100644
3006 +--- a/net/ipv6/netfilter/nf_reject_ipv6.c
3007 ++++ b/net/ipv6/netfilter/nf_reject_ipv6.c
3008 +@@ -157,6 +157,7 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
3009 + fl6.fl6_sport = otcph->dest;
3010 + fl6.fl6_dport = otcph->source;
3011 + fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev);
3012 ++ fl6.flowi6_mark = IP6_REPLY_MARK(net, oldskb->mark);
3013 + security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
3014 + dst = ip6_route_output(net, NULL, &fl6);
3015 + if (dst->error) {
3016 +@@ -180,6 +181,8 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
3017 +
3018 + skb_dst_set(nskb, dst);
3019 +
3020 ++ nskb->mark = fl6.flowi6_mark;
3021 ++
3022 + skb_reserve(nskb, hh_len + dst->header_len);
3023 + ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
3024 + ip6_dst_hoplimit(dst));
3025 +diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
3026 +index e9065b8d3af8..abb2c307fbe8 100644
3027 +--- a/net/ipv6/output_core.c
3028 ++++ b/net/ipv6/output_core.c
3029 +@@ -78,7 +78,7 @@ EXPORT_SYMBOL(ipv6_select_ident);
3030 +
3031 + int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
3032 + {
3033 +- u16 offset = sizeof(struct ipv6hdr);
3034 ++ unsigned int offset = sizeof(struct ipv6hdr);
3035 + unsigned int packet_len = skb_tail_pointer(skb) -
3036 + skb_network_header(skb);
3037 + int found_rhdr = 0;
3038 +@@ -86,6 +86,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
3039 +
3040 + while (offset <= packet_len) {
3041 + struct ipv6_opt_hdr *exthdr;
3042 ++ unsigned int len;
3043 +
3044 + switch (**nexthdr) {
3045 +
3046 +@@ -111,7 +112,10 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
3047 +
3048 + exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
3049 + offset);
3050 +- offset += ipv6_optlen(exthdr);
3051 ++ len = ipv6_optlen(exthdr);
3052 ++ if (len + offset >= IPV6_MAXPLEN)
3053 ++ return -EINVAL;
3054 ++ offset += len;
3055 + *nexthdr = &exthdr->nexthdr;
3056 + }
3057 +
3058 +diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
3059 +index 59c483937aec..7a86433d8896 100644
3060 +--- a/net/ipv6/syncookies.c
3061 ++++ b/net/ipv6/syncookies.c
3062 +@@ -209,6 +209,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
3063 + treq->snt_synack.v64 = 0;
3064 + treq->rcv_isn = ntohl(th->seq) - 1;
3065 + treq->snt_isn = cookie;
3066 ++ treq->txhash = net_tx_rndhash();
3067 +
3068 + /*
3069 + * We need to lookup the dst_entry to get the correct window size.
3070 +diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
3071 +index 48386bff8b4e..b28e45b691de 100644
3072 +--- a/net/openvswitch/conntrack.c
3073 ++++ b/net/openvswitch/conntrack.c
3074 +@@ -1088,8 +1088,8 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
3075 +
3076 + nla_for_each_nested(a, attr, rem) {
3077 + int type = nla_type(a);
3078 +- int maxlen = ovs_ct_attr_lens[type].maxlen;
3079 +- int minlen = ovs_ct_attr_lens[type].minlen;
3080 ++ int maxlen;
3081 ++ int minlen;
3082 +
3083 + if (type > OVS_CT_ATTR_MAX) {
3084 + OVS_NLERR(log,
3085 +@@ -1097,6 +1097,9 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
3086 + type, OVS_CT_ATTR_MAX);
3087 + return -EINVAL;
3088 + }
3089 ++
3090 ++ maxlen = ovs_ct_attr_lens[type].maxlen;
3091 ++ minlen = ovs_ct_attr_lens[type].minlen;
3092 + if (nla_len(a) < minlen || nla_len(a) > maxlen) {
3093 + OVS_NLERR(log,
3094 + "Conntrack attr type has unexpected length (type=%d, length=%d, expected=%d)",
3095 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
3096 +index 6a563e6e24de..365c83fcee02 100644
3097 +--- a/net/packet/af_packet.c
3098 ++++ b/net/packet/af_packet.c
3099 +@@ -4322,7 +4322,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
3100 + register_prot_hook(sk);
3101 + }
3102 + spin_unlock(&po->bind_lock);
3103 +- if (closing && (po->tp_version > TPACKET_V2)) {
3104 ++ if (pg_vec && (po->tp_version > TPACKET_V2)) {
3105 + /* Because we don't support block-based V3 on tx-ring */
3106 + if (!tx_ring)
3107 + prb_shutdown_retire_blk_timer(po, rb_queue);
3108 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3109 +index bb1aad39d987..6f337f00ba58 100644
3110 +--- a/sound/pci/hda/patch_realtek.c
3111 ++++ b/sound/pci/hda/patch_realtek.c
3112 +@@ -2233,6 +2233,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
3113 + SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
3114 + SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
3115 + SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
3116 ++ SND_PCI_QUIRK(0x104d, 0x9060, "Sony Vaio VPCL14M1R", ALC882_FIXUP_NO_PRIMARY_HP),
3117 + SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
3118 + SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
3119 +
3120 +diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
3121 +index 10c2a564a715..1ac96ef9ee20 100644
3122 +--- a/sound/soc/codecs/rt5645.c
3123 ++++ b/sound/soc/codecs/rt5645.c
3124 +@@ -3833,6 +3833,9 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
3125 + }
3126 + }
3127 +
3128 ++ regmap_update_bits(rt5645->regmap, RT5645_ADDA_CLK1,
3129 ++ RT5645_I2S_PD1_MASK, RT5645_I2S_PD1_2);
3130 ++
3131 + if (rt5645->pdata.jd_invert) {
3132 + regmap_update_bits(rt5645->regmap, RT5645_IRQ_CTRL2,
3133 + RT5645_JD_1_1_MASK, RT5645_JD_1_1_INV);
3134 +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
3135 +index 21c3ef01c438..80088c98ce27 100644
3136 +--- a/sound/soc/soc-pcm.c
3137 ++++ b/sound/soc/soc-pcm.c
3138 +@@ -181,6 +181,10 @@ int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir,
3139 + dev_dbg(be->dev, "ASoC: BE %s event %d dir %d\n",
3140 + be->dai_link->name, event, dir);
3141 +
3142 ++ if ((event == SND_SOC_DAPM_STREAM_STOP) &&
3143 ++ (be->dpcm[dir].users >= 1))
3144 ++ continue;
3145 ++
3146 + snd_soc_dapm_stream_event(be, dir, event);
3147 + }
3148 +