Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.12 commit in: /
Date: Fri, 11 Aug 2017 17:40:11
Message-Id: 1502473191.c20ad5072b7e3f69f4ff535dd534453ad3d7b8ec.mpagano@gentoo
1 commit: c20ad5072b7e3f69f4ff535dd534453ad3d7b8ec
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Aug 11 17:39:51 2017 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Aug 11 17:39:51 2017 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c20ad507
7
8 Linux patch 4.12.6
9
10 0000_README | 4 +
11 1005_linux-4.12.6.patch | 3935 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 3939 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 29e1ca2..b88e1e0 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -63,6 +63,10 @@ Patch: 1004_linux-4.12.5.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.12.5
21
22 +Patch: 1005_linux-4.12.6.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.12.6
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1005_linux-4.12.6.patch b/1005_linux-4.12.6.patch
31 new file mode 100644
32 index 0000000..461714c
33 --- /dev/null
34 +++ b/1005_linux-4.12.6.patch
35 @@ -0,0 +1,3935 @@
36 +diff --git a/Makefile b/Makefile
37 +index 382e967b0792..c8d80b50495a 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 12
43 +-SUBLEVEL = 5
44 ++SUBLEVEL = 6
45 + EXTRAVERSION =
46 + NAME = Fearless Coyote
47 +
48 +diff --git a/arch/arm/boot/dts/armada-388-gp.dts b/arch/arm/boot/dts/armada-388-gp.dts
49 +index 895fa6cfa15a..563901e0ec07 100644
50 +--- a/arch/arm/boot/dts/armada-388-gp.dts
51 ++++ b/arch/arm/boot/dts/armada-388-gp.dts
52 +@@ -75,7 +75,7 @@
53 + pinctrl-names = "default";
54 + pinctrl-0 = <&pca0_pins>;
55 + interrupt-parent = <&gpio0>;
56 +- interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
57 ++ interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
58 + gpio-controller;
59 + #gpio-cells = <2>;
60 + interrupt-controller;
61 +@@ -87,7 +87,7 @@
62 + compatible = "nxp,pca9555";
63 + pinctrl-names = "default";
64 + interrupt-parent = <&gpio0>;
65 +- interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
66 ++ interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
67 + gpio-controller;
68 + #gpio-cells = <2>;
69 + interrupt-controller;
70 +diff --git a/arch/arm/boot/dts/tango4-vantage-1172.dts b/arch/arm/boot/dts/tango4-vantage-1172.dts
71 +index 86d8df98802f..13bcc460bcb2 100644
72 +--- a/arch/arm/boot/dts/tango4-vantage-1172.dts
73 ++++ b/arch/arm/boot/dts/tango4-vantage-1172.dts
74 +@@ -22,7 +22,7 @@
75 + };
76 +
77 + &eth0 {
78 +- phy-connection-type = "rgmii";
79 ++ phy-connection-type = "rgmii-id";
80 + phy-handle = <&eth0_phy>;
81 + #address-cells = <1>;
82 + #size-cells = <0>;
83 +diff --git a/arch/arm/mach-mvebu/platsmp.c b/arch/arm/mach-mvebu/platsmp.c
84 +index e62273aacb43..4ffbbd217e82 100644
85 +--- a/arch/arm/mach-mvebu/platsmp.c
86 ++++ b/arch/arm/mach-mvebu/platsmp.c
87 +@@ -211,7 +211,7 @@ static int mv98dx3236_resume_set_cpu_boot_addr(int hw_cpu, void *boot_addr)
88 + return PTR_ERR(base);
89 +
90 + writel(0, base + MV98DX3236_CPU_RESUME_CTRL_REG);
91 +- writel(virt_to_phys(boot_addr), base + MV98DX3236_CPU_RESUME_ADDR_REG);
92 ++ writel(__pa_symbol(boot_addr), base + MV98DX3236_CPU_RESUME_ADDR_REG);
93 +
94 + iounmap(base);
95 +
96 +diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
97 +index bc179efb10ef..b69e4a4ecdd8 100644
98 +--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
99 ++++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
100 +@@ -219,7 +219,7 @@
101 + reg = <0x18800 0x100>, <0x18C00 0x20>;
102 + gpiosb: gpio {
103 + #gpio-cells = <2>;
104 +- gpio-ranges = <&pinctrl_sb 0 0 29>;
105 ++ gpio-ranges = <&pinctrl_sb 0 0 30>;
106 + gpio-controller;
107 + interrupts =
108 + <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
109 +diff --git a/arch/mips/include/asm/mach-ralink/ralink_regs.h b/arch/mips/include/asm/mach-ralink/ralink_regs.h
110 +index 9df1a53bcb36..b4e7dfa214eb 100644
111 +--- a/arch/mips/include/asm/mach-ralink/ralink_regs.h
112 ++++ b/arch/mips/include/asm/mach-ralink/ralink_regs.h
113 +@@ -13,6 +13,8 @@
114 + #ifndef _RALINK_REGS_H_
115 + #define _RALINK_REGS_H_
116 +
117 ++#include <linux/io.h>
118 ++
119 + enum ralink_soc_type {
120 + RALINK_UNKNOWN = 0,
121 + RT2880_SOC,
122 +diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
123 +index 88fe0aad4390..bc208136bbb2 100644
124 +--- a/arch/parisc/include/asm/thread_info.h
125 ++++ b/arch/parisc/include/asm/thread_info.h
126 +@@ -34,7 +34,7 @@ struct thread_info {
127 +
128 + /* thread information allocation */
129 +
130 +-#define THREAD_SIZE_ORDER 2 /* PA-RISC requires at least 16k stack */
131 ++#define THREAD_SIZE_ORDER 3 /* PA-RISC requires at least 32k stack */
132 + /* Be sure to hunt all references to this down when you change the size of
133 + * the kernel stack */
134 + #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
135 +diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
136 +index 85a92db70afc..19c0c141bc3f 100644
137 +--- a/arch/parisc/kernel/cache.c
138 ++++ b/arch/parisc/kernel/cache.c
139 +@@ -587,13 +587,12 @@ void flush_cache_range(struct vm_area_struct *vma,
140 + if (parisc_requires_coherency())
141 + flush_tlb_range(vma, start, end);
142 +
143 +- if ((end - start) >= parisc_cache_flush_threshold) {
144 ++ if ((end - start) >= parisc_cache_flush_threshold
145 ++ || vma->vm_mm->context != mfsp(3)) {
146 + flush_cache_all();
147 + return;
148 + }
149 +
150 +- BUG_ON(vma->vm_mm->context != mfsp(3));
151 +-
152 + flush_user_dcache_range_asm(start, end);
153 + if (vma->vm_flags & VM_EXEC)
154 + flush_user_icache_range_asm(start, end);
155 +diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
156 +index ba5e1c7b1f17..ef9a4eea662f 100644
157 +--- a/arch/parisc/kernel/irq.c
158 ++++ b/arch/parisc/kernel/irq.c
159 +@@ -380,7 +380,7 @@ static inline int eirr_to_irq(unsigned long eirr)
160 + /*
161 + * IRQ STACK - used for irq handler
162 + */
163 +-#define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */
164 ++#define IRQ_STACK_SIZE (4096 << 3) /* 32k irq stack size */
165 +
166 + union irq_stack_union {
167 + unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
168 +diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
169 +index 5c291df30fe3..40d8b552d15a 100644
170 +--- a/arch/powerpc/kernel/irq.c
171 ++++ b/arch/powerpc/kernel/irq.c
172 +@@ -145,6 +145,19 @@ notrace unsigned int __check_irq_replay(void)
173 +
174 + /* Clear bit 0 which we wouldn't clear otherwise */
175 + local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
176 ++ if (happened & PACA_IRQ_HARD_DIS) {
177 ++ /*
178 ++ * We may have missed a decrementer interrupt if hard disabled.
179 ++ * Check the decrementer register in case we had a rollover
180 ++ * while hard disabled.
181 ++ */
182 ++ if (!(happened & PACA_IRQ_DEC)) {
183 ++ if (decrementer_check_overflow()) {
184 ++ local_paca->irq_happened |= PACA_IRQ_DEC;
185 ++ happened |= PACA_IRQ_DEC;
186 ++ }
187 ++ }
188 ++ }
189 +
190 + /*
191 + * Force the delivery of pending soft-disabled interrupts on PS3.
192 +@@ -170,7 +183,7 @@ notrace unsigned int __check_irq_replay(void)
193 + * in case we also had a rollover while hard disabled
194 + */
195 + local_paca->irq_happened &= ~PACA_IRQ_DEC;
196 +- if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow())
197 ++ if (happened & PACA_IRQ_DEC)
198 + return 0x900;
199 +
200 + /* Finally check if an external interrupt happened */
201 +diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
202 +index 925a4ef90559..660ed39e9c9a 100644
203 +--- a/arch/powerpc/kernel/ptrace.c
204 ++++ b/arch/powerpc/kernel/ptrace.c
205 +@@ -127,12 +127,19 @@ static void flush_tmregs_to_thread(struct task_struct *tsk)
206 + * If task is not current, it will have been flushed already to
207 + * it's thread_struct during __switch_to().
208 + *
209 +- * A reclaim flushes ALL the state.
210 ++ * A reclaim flushes ALL the state or if not in TM save TM SPRs
211 ++ * in the appropriate thread structures from live.
212 + */
213 +
214 +- if (tsk == current && MSR_TM_SUSPENDED(mfmsr()))
215 +- tm_reclaim_current(TM_CAUSE_SIGNAL);
216 ++ if (tsk != current)
217 ++ return;
218 +
219 ++ if (MSR_TM_SUSPENDED(mfmsr())) {
220 ++ tm_reclaim_current(TM_CAUSE_SIGNAL);
221 ++ } else {
222 ++ tm_enable();
223 ++ tm_save_sprs(&(tsk->thread));
224 ++ }
225 + }
226 + #else
227 + static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
228 +diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
229 +index 2cddcda4f85f..87841d687f8d 100644
230 +--- a/arch/sparc/include/asm/mmu_context_64.h
231 ++++ b/arch/sparc/include/asm/mmu_context_64.h
232 +@@ -27,9 +27,11 @@ void destroy_context(struct mm_struct *mm);
233 + void __tsb_context_switch(unsigned long pgd_pa,
234 + struct tsb_config *tsb_base,
235 + struct tsb_config *tsb_huge,
236 +- unsigned long tsb_descr_pa);
237 ++ unsigned long tsb_descr_pa,
238 ++ unsigned long secondary_ctx);
239 +
240 +-static inline void tsb_context_switch(struct mm_struct *mm)
241 ++static inline void tsb_context_switch_ctx(struct mm_struct *mm,
242 ++ unsigned long ctx)
243 + {
244 + __tsb_context_switch(__pa(mm->pgd),
245 + &mm->context.tsb_block[MM_TSB_BASE],
246 +@@ -40,9 +42,12 @@ static inline void tsb_context_switch(struct mm_struct *mm)
247 + #else
248 + NULL
249 + #endif
250 +- , __pa(&mm->context.tsb_descr[MM_TSB_BASE]));
251 ++ , __pa(&mm->context.tsb_descr[MM_TSB_BASE]),
252 ++ ctx);
253 + }
254 +
255 ++#define tsb_context_switch(X) tsb_context_switch_ctx(X, 0)
256 ++
257 + void tsb_grow(struct mm_struct *mm,
258 + unsigned long tsb_index,
259 + unsigned long mm_rss);
260 +@@ -112,8 +117,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
261 + * cpu0 to update it's TSB because at that point the cpu_vm_mask
262 + * only had cpu1 set in it.
263 + */
264 +- load_secondary_context(mm);
265 +- tsb_context_switch(mm);
266 ++ tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
267 +
268 + /* Any time a processor runs a context on an address space
269 + * for the first time, we must flush that context out of the
270 +diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h
271 +index ec9c04de3664..ff05992dae7a 100644
272 +--- a/arch/sparc/include/asm/trap_block.h
273 ++++ b/arch/sparc/include/asm/trap_block.h
274 +@@ -54,6 +54,7 @@ extern struct trap_per_cpu trap_block[NR_CPUS];
275 + void init_cur_cpu_trap(struct thread_info *);
276 + void setup_tba(void);
277 + extern int ncpus_probed;
278 ++extern u64 cpu_mondo_counter[NR_CPUS];
279 +
280 + unsigned long real_hard_smp_processor_id(void);
281 +
282 +diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
283 +index fdf31040a7dc..3218bc43302e 100644
284 +--- a/arch/sparc/kernel/smp_64.c
285 ++++ b/arch/sparc/kernel/smp_64.c
286 +@@ -622,22 +622,48 @@ static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt)
287 + }
288 + }
289 +
290 +-/* Multi-cpu list version. */
291 ++#define CPU_MONDO_COUNTER(cpuid) (cpu_mondo_counter[cpuid])
292 ++#define MONDO_USEC_WAIT_MIN 2
293 ++#define MONDO_USEC_WAIT_MAX 100
294 ++#define MONDO_RETRY_LIMIT 500000
295 ++
296 ++/* Multi-cpu list version.
297 ++ *
298 ++ * Deliver xcalls to 'cnt' number of cpus in 'cpu_list'.
299 ++ * Sometimes not all cpus receive the mondo, requiring us to re-send
300 ++ * the mondo until all cpus have received, or cpus are truly stuck
301 ++ * unable to receive mondo, and we timeout.
302 ++ * Occasionally a target cpu strand is borrowed briefly by hypervisor to
303 ++ * perform guest service, such as PCIe error handling. Consider the
304 ++ * service time, 1 second overall wait is reasonable for 1 cpu.
305 ++ * Here two in-between mondo check wait time are defined: 2 usec for
306 ++ * single cpu quick turn around and up to 100usec for large cpu count.
307 ++ * Deliver mondo to large number of cpus could take longer, we adjusts
308 ++ * the retry count as long as target cpus are making forward progress.
309 ++ */
310 + static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
311 + {
312 +- int retries, this_cpu, prev_sent, i, saw_cpu_error;
313 ++ int this_cpu, tot_cpus, prev_sent, i, rem;
314 ++ int usec_wait, retries, tot_retries;
315 ++ u16 first_cpu = 0xffff;
316 ++ unsigned long xc_rcvd = 0;
317 + unsigned long status;
318 ++ int ecpuerror_id = 0;
319 ++ int enocpu_id = 0;
320 + u16 *cpu_list;
321 ++ u16 cpu;
322 +
323 + this_cpu = smp_processor_id();
324 +-
325 + cpu_list = __va(tb->cpu_list_pa);
326 +-
327 +- saw_cpu_error = 0;
328 +- retries = 0;
329 ++ usec_wait = cnt * MONDO_USEC_WAIT_MIN;
330 ++ if (usec_wait > MONDO_USEC_WAIT_MAX)
331 ++ usec_wait = MONDO_USEC_WAIT_MAX;
332 ++ retries = tot_retries = 0;
333 ++ tot_cpus = cnt;
334 + prev_sent = 0;
335 ++
336 + do {
337 +- int forward_progress, n_sent;
338 ++ int n_sent, mondo_delivered, target_cpu_busy;
339 +
340 + status = sun4v_cpu_mondo_send(cnt,
341 + tb->cpu_list_pa,
342 +@@ -645,94 +671,113 @@ static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
343 +
344 + /* HV_EOK means all cpus received the xcall, we're done. */
345 + if (likely(status == HV_EOK))
346 +- break;
347 ++ goto xcall_done;
348 ++
349 ++ /* If not these non-fatal errors, panic */
350 ++ if (unlikely((status != HV_EWOULDBLOCK) &&
351 ++ (status != HV_ECPUERROR) &&
352 ++ (status != HV_ENOCPU)))
353 ++ goto fatal_errors;
354 +
355 + /* First, see if we made any forward progress.
356 ++ *
357 ++ * Go through the cpu_list, count the target cpus that have
358 ++ * received our mondo (n_sent), and those that did not (rem).
359 ++ * Re-pack cpu_list with the cpus remain to be retried in the
360 ++ * front - this simplifies tracking the truly stalled cpus.
361 + *
362 + * The hypervisor indicates successful sends by setting
363 + * cpu list entries to the value 0xffff.
364 ++ *
365 ++ * EWOULDBLOCK means some target cpus did not receive the
366 ++ * mondo and retry usually helps.
367 ++ *
368 ++ * ECPUERROR means at least one target cpu is in error state,
369 ++ * it's usually safe to skip the faulty cpu and retry.
370 ++ *
371 ++ * ENOCPU means one of the target cpu doesn't belong to the
372 ++ * domain, perhaps offlined which is unexpected, but not
373 ++ * fatal and it's okay to skip the offlined cpu.
374 + */
375 ++ rem = 0;
376 + n_sent = 0;
377 + for (i = 0; i < cnt; i++) {
378 +- if (likely(cpu_list[i] == 0xffff))
379 ++ cpu = cpu_list[i];
380 ++ if (likely(cpu == 0xffff)) {
381 + n_sent++;
382 ++ } else if ((status == HV_ECPUERROR) &&
383 ++ (sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) {
384 ++ ecpuerror_id = cpu + 1;
385 ++ } else if (status == HV_ENOCPU && !cpu_online(cpu)) {
386 ++ enocpu_id = cpu + 1;
387 ++ } else {
388 ++ cpu_list[rem++] = cpu;
389 ++ }
390 + }
391 +
392 +- forward_progress = 0;
393 +- if (n_sent > prev_sent)
394 +- forward_progress = 1;
395 ++ /* No cpu remained, we're done. */
396 ++ if (rem == 0)
397 ++ break;
398 +
399 +- prev_sent = n_sent;
400 ++ /* Otherwise, update the cpu count for retry. */
401 ++ cnt = rem;
402 +
403 +- /* If we get a HV_ECPUERROR, then one or more of the cpus
404 +- * in the list are in error state. Use the cpu_state()
405 +- * hypervisor call to find out which cpus are in error state.
406 ++ /* Record the overall number of mondos received by the
407 ++ * first of the remaining cpus.
408 + */
409 +- if (unlikely(status == HV_ECPUERROR)) {
410 +- for (i = 0; i < cnt; i++) {
411 +- long err;
412 +- u16 cpu;
413 ++ if (first_cpu != cpu_list[0]) {
414 ++ first_cpu = cpu_list[0];
415 ++ xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
416 ++ }
417 +
418 +- cpu = cpu_list[i];
419 +- if (cpu == 0xffff)
420 +- continue;
421 ++ /* Was any mondo delivered successfully? */
422 ++ mondo_delivered = (n_sent > prev_sent);
423 ++ prev_sent = n_sent;
424 +
425 +- err = sun4v_cpu_state(cpu);
426 +- if (err == HV_CPU_STATE_ERROR) {
427 +- saw_cpu_error = (cpu + 1);
428 +- cpu_list[i] = 0xffff;
429 +- }
430 +- }
431 +- } else if (unlikely(status != HV_EWOULDBLOCK))
432 +- goto fatal_mondo_error;
433 ++ /* or, was any target cpu busy processing other mondos? */
434 ++ target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu));
435 ++ xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
436 +
437 +- /* Don't bother rewriting the CPU list, just leave the
438 +- * 0xffff and non-0xffff entries in there and the
439 +- * hypervisor will do the right thing.
440 +- *
441 +- * Only advance timeout state if we didn't make any
442 +- * forward progress.
443 ++ /* Retry count is for no progress. If we're making progress,
444 ++ * reset the retry count.
445 + */
446 +- if (unlikely(!forward_progress)) {
447 +- if (unlikely(++retries > 10000))
448 +- goto fatal_mondo_timeout;
449 +-
450 +- /* Delay a little bit to let other cpus catch up
451 +- * on their cpu mondo queue work.
452 +- */
453 +- udelay(2 * cnt);
454 ++ if (likely(mondo_delivered || target_cpu_busy)) {
455 ++ tot_retries += retries;
456 ++ retries = 0;
457 ++ } else if (unlikely(retries > MONDO_RETRY_LIMIT)) {
458 ++ goto fatal_mondo_timeout;
459 + }
460 +- } while (1);
461 +
462 +- if (unlikely(saw_cpu_error))
463 +- goto fatal_mondo_cpu_error;
464 ++ /* Delay a little bit to let other cpus catch up on
465 ++ * their cpu mondo queue work.
466 ++ */
467 ++ if (!mondo_delivered)
468 ++ udelay(usec_wait);
469 +
470 +- return;
471 ++ retries++;
472 ++ } while (1);
473 +
474 +-fatal_mondo_cpu_error:
475 +- printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
476 +- "(including %d) were in error state\n",
477 +- this_cpu, saw_cpu_error - 1);
478 ++xcall_done:
479 ++ if (unlikely(ecpuerror_id > 0)) {
480 ++ pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n",
481 ++ this_cpu, ecpuerror_id - 1);
482 ++ } else if (unlikely(enocpu_id > 0)) {
483 ++ pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n",
484 ++ this_cpu, enocpu_id - 1);
485 ++ }
486 + return;
487 +
488 ++fatal_errors:
489 ++ /* fatal errors include bad alignment, etc */
490 ++ pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n",
491 ++ this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
492 ++ panic("Unexpected SUN4V mondo error %lu\n", status);
493 ++
494 + fatal_mondo_timeout:
495 +- printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
496 +- " progress after %d retries.\n",
497 +- this_cpu, retries);
498 +- goto dump_cpu_list_and_out;
499 +-
500 +-fatal_mondo_error:
501 +- printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
502 +- this_cpu, status);
503 +- printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
504 +- "mondo_block_pa(%lx)\n",
505 +- this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
506 +-
507 +-dump_cpu_list_and_out:
508 +- printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
509 +- for (i = 0; i < cnt; i++)
510 +- printk("%u ", cpu_list[i]);
511 +- printk("]\n");
512 ++ /* some cpus being non-responsive to the cpu mondo */
513 ++ pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n",
514 ++ this_cpu, first_cpu, (tot_retries + retries), tot_cpus);
515 ++ panic("SUN4V mondo timeout panic\n");
516 + }
517 +
518 + static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
519 +diff --git a/arch/sparc/kernel/sun4v_ivec.S b/arch/sparc/kernel/sun4v_ivec.S
520 +index 559bc5e9c199..34631995859a 100644
521 +--- a/arch/sparc/kernel/sun4v_ivec.S
522 ++++ b/arch/sparc/kernel/sun4v_ivec.S
523 +@@ -26,6 +26,21 @@ sun4v_cpu_mondo:
524 + ldxa [%g0] ASI_SCRATCHPAD, %g4
525 + sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
526 +
527 ++ /* Get smp_processor_id() into %g3 */
528 ++ sethi %hi(trap_block), %g5
529 ++ or %g5, %lo(trap_block), %g5
530 ++ sub %g4, %g5, %g3
531 ++ srlx %g3, TRAP_BLOCK_SZ_SHIFT, %g3
532 ++
533 ++ /* Increment cpu_mondo_counter[smp_processor_id()] */
534 ++ sethi %hi(cpu_mondo_counter), %g5
535 ++ or %g5, %lo(cpu_mondo_counter), %g5
536 ++ sllx %g3, 3, %g3
537 ++ add %g5, %g3, %g5
538 ++ ldx [%g5], %g3
539 ++ add %g3, 1, %g3
540 ++ stx %g3, [%g5]
541 ++
542 + /* Get CPU mondo queue base phys address into %g7. */
543 + ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
544 +
545 +diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
546 +index 196ee5eb4d48..ad31af1dd726 100644
547 +--- a/arch/sparc/kernel/traps_64.c
548 ++++ b/arch/sparc/kernel/traps_64.c
549 +@@ -2733,6 +2733,7 @@ void do_getpsr(struct pt_regs *regs)
550 + }
551 + }
552 +
553 ++u64 cpu_mondo_counter[NR_CPUS] = {0};
554 + struct trap_per_cpu trap_block[NR_CPUS];
555 + EXPORT_SYMBOL(trap_block);
556 +
557 +diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
558 +index 07c0df924960..db872dbfafe9 100644
559 +--- a/arch/sparc/kernel/tsb.S
560 ++++ b/arch/sparc/kernel/tsb.S
561 +@@ -360,6 +360,7 @@ tsb_flush:
562 + * %o1: TSB base config pointer
563 + * %o2: TSB huge config pointer, or NULL if none
564 + * %o3: Hypervisor TSB descriptor physical address
565 ++ * %o4: Secondary context to load, if non-zero
566 + *
567 + * We have to run this whole thing with interrupts
568 + * disabled so that the current cpu doesn't change
569 +@@ -372,6 +373,17 @@ __tsb_context_switch:
570 + rdpr %pstate, %g1
571 + wrpr %g1, PSTATE_IE, %pstate
572 +
573 ++ brz,pn %o4, 1f
574 ++ mov SECONDARY_CONTEXT, %o5
575 ++
576 ++661: stxa %o4, [%o5] ASI_DMMU
577 ++ .section .sun4v_1insn_patch, "ax"
578 ++ .word 661b
579 ++ stxa %o4, [%o5] ASI_MMU
580 ++ .previous
581 ++ flush %g6
582 ++
583 ++1:
584 + TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
585 +
586 + stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
587 +diff --git a/arch/sparc/lib/U3memcpy.S b/arch/sparc/lib/U3memcpy.S
588 +index 54f98706b03b..5a8cb37f0a3b 100644
589 +--- a/arch/sparc/lib/U3memcpy.S
590 ++++ b/arch/sparc/lib/U3memcpy.S
591 +@@ -145,13 +145,13 @@ ENDPROC(U3_retl_o2_plus_GS_plus_0x08)
592 + ENTRY(U3_retl_o2_and_7_plus_GS)
593 + and %o2, 7, %o2
594 + retl
595 +- add %o2, GLOBAL_SPARE, %o2
596 ++ add %o2, GLOBAL_SPARE, %o0
597 + ENDPROC(U3_retl_o2_and_7_plus_GS)
598 + ENTRY(U3_retl_o2_and_7_plus_GS_plus_8)
599 + add GLOBAL_SPARE, 8, GLOBAL_SPARE
600 + and %o2, 7, %o2
601 + retl
602 +- add %o2, GLOBAL_SPARE, %o2
603 ++ add %o2, GLOBAL_SPARE, %o0
604 + ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8)
605 + #endif
606 +
607 +diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
608 +index 3c40ebd50f92..fed73f14aa49 100644
609 +--- a/arch/sparc/mm/init_64.c
610 ++++ b/arch/sparc/mm/init_64.c
611 +@@ -325,6 +325,29 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde
612 + }
613 +
614 + #ifdef CONFIG_HUGETLB_PAGE
615 ++static void __init add_huge_page_size(unsigned long size)
616 ++{
617 ++ unsigned int order;
618 ++
619 ++ if (size_to_hstate(size))
620 ++ return;
621 ++
622 ++ order = ilog2(size) - PAGE_SHIFT;
623 ++ hugetlb_add_hstate(order);
624 ++}
625 ++
626 ++static int __init hugetlbpage_init(void)
627 ++{
628 ++ add_huge_page_size(1UL << HPAGE_64K_SHIFT);
629 ++ add_huge_page_size(1UL << HPAGE_SHIFT);
630 ++ add_huge_page_size(1UL << HPAGE_256MB_SHIFT);
631 ++ add_huge_page_size(1UL << HPAGE_2GB_SHIFT);
632 ++
633 ++ return 0;
634 ++}
635 ++
636 ++arch_initcall(hugetlbpage_init);
637 ++
638 + static int __init setup_hugepagesz(char *string)
639 + {
640 + unsigned long long hugepage_size;
641 +@@ -364,7 +387,7 @@ static int __init setup_hugepagesz(char *string)
642 + goto out;
643 + }
644 +
645 +- hugetlb_add_hstate(hugepage_shift - PAGE_SHIFT);
646 ++ add_huge_page_size(hugepage_size);
647 + rc = 1;
648 +
649 + out:
650 +diff --git a/arch/sparc/power/hibernate.c b/arch/sparc/power/hibernate.c
651 +index 17bd2e167e07..df707a8ad311 100644
652 +--- a/arch/sparc/power/hibernate.c
653 ++++ b/arch/sparc/power/hibernate.c
654 +@@ -35,6 +35,5 @@ void restore_processor_state(void)
655 + {
656 + struct mm_struct *mm = current->active_mm;
657 +
658 +- load_secondary_context(mm);
659 +- tsb_context_switch(mm);
660 ++ tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
661 + }
662 +diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
663 +index 43e10d6fdbed..44adcde7a0ca 100644
664 +--- a/arch/x86/kernel/kvm.c
665 ++++ b/arch/x86/kernel/kvm.c
666 +@@ -151,6 +151,8 @@ void kvm_async_pf_task_wait(u32 token)
667 + if (hlist_unhashed(&n.link))
668 + break;
669 +
670 ++ rcu_irq_exit();
671 ++
672 + if (!n.halted) {
673 + local_irq_enable();
674 + schedule();
675 +@@ -159,11 +161,11 @@ void kvm_async_pf_task_wait(u32 token)
676 + /*
677 + * We cannot reschedule. So halt.
678 + */
679 +- rcu_irq_exit();
680 + native_safe_halt();
681 + local_irq_disable();
682 +- rcu_irq_enter();
683 + }
684 ++
685 ++ rcu_irq_enter();
686 + }
687 + if (!n.halted)
688 + finish_swait(&n.wq, &wait);
689 +diff --git a/block/blk-core.c b/block/blk-core.c
690 +index a7421b772d0e..56a7fac71439 100644
691 +--- a/block/blk-core.c
692 ++++ b/block/blk-core.c
693 +@@ -3307,6 +3307,10 @@ EXPORT_SYMBOL(blk_finish_plug);
694 + */
695 + void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
696 + {
697 ++ /* not support for RQF_PM and ->rpm_status in blk-mq yet */
698 ++ if (q->mq_ops)
699 ++ return;
700 ++
701 + q->dev = dev;
702 + q->rpm_status = RPM_ACTIVE;
703 + pm_runtime_set_autosuspend_delay(q->dev, -1);
704 +diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
705 +index 8e61e8640e17..5eaecd40f701 100644
706 +--- a/block/blk-mq-cpumap.c
707 ++++ b/block/blk-mq-cpumap.c
708 +@@ -35,7 +35,6 @@ int blk_mq_map_queues(struct blk_mq_tag_set *set)
709 + {
710 + unsigned int *map = set->mq_map;
711 + unsigned int nr_queues = set->nr_hw_queues;
712 +- const struct cpumask *online_mask = cpu_online_mask;
713 + unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
714 + cpumask_var_t cpus;
715 +
716 +@@ -44,7 +43,7 @@ int blk_mq_map_queues(struct blk_mq_tag_set *set)
717 +
718 + cpumask_clear(cpus);
719 + nr_cpus = nr_uniq_cpus = 0;
720 +- for_each_cpu(i, online_mask) {
721 ++ for_each_present_cpu(i) {
722 + nr_cpus++;
723 + first_sibling = get_first_sibling(i);
724 + if (!cpumask_test_cpu(first_sibling, cpus))
725 +@@ -54,7 +53,7 @@ int blk_mq_map_queues(struct blk_mq_tag_set *set)
726 +
727 + queue = 0;
728 + for_each_possible_cpu(i) {
729 +- if (!cpumask_test_cpu(i, online_mask)) {
730 ++ if (!cpumask_test_cpu(i, cpu_present_mask)) {
731 + map[i] = 0;
732 + continue;
733 + }
734 +diff --git a/block/blk-mq.c b/block/blk-mq.c
735 +index 958cedaff8b8..7353e0080062 100644
736 +--- a/block/blk-mq.c
737 ++++ b/block/blk-mq.c
738 +@@ -37,9 +37,6 @@
739 + #include "blk-wbt.h"
740 + #include "blk-mq-sched.h"
741 +
742 +-static DEFINE_MUTEX(all_q_mutex);
743 +-static LIST_HEAD(all_q_list);
744 +-
745 + static void blk_mq_poll_stats_start(struct request_queue *q);
746 + static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
747 + static void __blk_mq_stop_hw_queues(struct request_queue *q, bool sync);
748 +@@ -1975,8 +1972,8 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
749 + INIT_LIST_HEAD(&__ctx->rq_list);
750 + __ctx->queue = q;
751 +
752 +- /* If the cpu isn't online, the cpu is mapped to first hctx */
753 +- if (!cpu_online(i))
754 ++ /* If the cpu isn't present, the cpu is mapped to first hctx */
755 ++ if (!cpu_present(i))
756 + continue;
757 +
758 + hctx = blk_mq_map_queue(q, i);
759 +@@ -2019,8 +2016,7 @@ static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
760 + }
761 + }
762 +
763 +-static void blk_mq_map_swqueue(struct request_queue *q,
764 +- const struct cpumask *online_mask)
765 ++static void blk_mq_map_swqueue(struct request_queue *q)
766 + {
767 + unsigned int i, hctx_idx;
768 + struct blk_mq_hw_ctx *hctx;
769 +@@ -2038,13 +2034,11 @@ static void blk_mq_map_swqueue(struct request_queue *q,
770 + }
771 +
772 + /*
773 +- * Map software to hardware queues
774 ++ * Map software to hardware queues.
775 ++ *
776 ++ * If the cpu isn't present, the cpu is mapped to first hctx.
777 + */
778 +- for_each_possible_cpu(i) {
779 +- /* If the cpu isn't online, the cpu is mapped to first hctx */
780 +- if (!cpumask_test_cpu(i, online_mask))
781 +- continue;
782 +-
783 ++ for_each_present_cpu(i) {
784 + hctx_idx = q->mq_map[i];
785 + /* unmapped hw queue can be remapped after CPU topo changed */
786 + if (!set->tags[hctx_idx] &&
787 +@@ -2340,16 +2334,8 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
788 + blk_queue_softirq_done(q, set->ops->complete);
789 +
790 + blk_mq_init_cpu_queues(q, set->nr_hw_queues);
791 +-
792 +- get_online_cpus();
793 +- mutex_lock(&all_q_mutex);
794 +-
795 +- list_add_tail(&q->all_q_node, &all_q_list);
796 + blk_mq_add_queue_tag_set(set, q);
797 +- blk_mq_map_swqueue(q, cpu_online_mask);
798 +-
799 +- mutex_unlock(&all_q_mutex);
800 +- put_online_cpus();
801 ++ blk_mq_map_swqueue(q);
802 +
803 + if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
804 + int ret;
805 +@@ -2375,18 +2361,12 @@ void blk_mq_free_queue(struct request_queue *q)
806 + {
807 + struct blk_mq_tag_set *set = q->tag_set;
808 +
809 +- mutex_lock(&all_q_mutex);
810 +- list_del_init(&q->all_q_node);
811 +- mutex_unlock(&all_q_mutex);
812 +-
813 + blk_mq_del_queue_tag_set(q);
814 +-
815 + blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
816 + }
817 +
818 + /* Basically redo blk_mq_init_queue with queue frozen */
819 +-static void blk_mq_queue_reinit(struct request_queue *q,
820 +- const struct cpumask *online_mask)
821 ++static void blk_mq_queue_reinit(struct request_queue *q)
822 + {
823 + WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
824 +
825 +@@ -2399,76 +2379,12 @@ static void blk_mq_queue_reinit(struct request_queue *q,
826 + * involves free and re-allocate memory, worthy doing?)
827 + */
828 +
829 +- blk_mq_map_swqueue(q, online_mask);
830 ++ blk_mq_map_swqueue(q);
831 +
832 + blk_mq_sysfs_register(q);
833 + blk_mq_debugfs_register_hctxs(q);
834 + }
835 +
836 +-/*
837 +- * New online cpumask which is going to be set in this hotplug event.
838 +- * Declare this cpumasks as global as cpu-hotplug operation is invoked
839 +- * one-by-one and dynamically allocating this could result in a failure.
840 +- */
841 +-static struct cpumask cpuhp_online_new;
842 +-
843 +-static void blk_mq_queue_reinit_work(void)
844 +-{
845 +- struct request_queue *q;
846 +-
847 +- mutex_lock(&all_q_mutex);
848 +- /*
849 +- * We need to freeze and reinit all existing queues. Freezing
850 +- * involves synchronous wait for an RCU grace period and doing it
851 +- * one by one may take a long time. Start freezing all queues in
852 +- * one swoop and then wait for the completions so that freezing can
853 +- * take place in parallel.
854 +- */
855 +- list_for_each_entry(q, &all_q_list, all_q_node)
856 +- blk_freeze_queue_start(q);
857 +- list_for_each_entry(q, &all_q_list, all_q_node)
858 +- blk_mq_freeze_queue_wait(q);
859 +-
860 +- list_for_each_entry(q, &all_q_list, all_q_node)
861 +- blk_mq_queue_reinit(q, &cpuhp_online_new);
862 +-
863 +- list_for_each_entry(q, &all_q_list, all_q_node)
864 +- blk_mq_unfreeze_queue(q);
865 +-
866 +- mutex_unlock(&all_q_mutex);
867 +-}
868 +-
869 +-static int blk_mq_queue_reinit_dead(unsigned int cpu)
870 +-{
871 +- cpumask_copy(&cpuhp_online_new, cpu_online_mask);
872 +- blk_mq_queue_reinit_work();
873 +- return 0;
874 +-}
875 +-
876 +-/*
877 +- * Before hotadded cpu starts handling requests, new mappings must be
878 +- * established. Otherwise, these requests in hw queue might never be
879 +- * dispatched.
880 +- *
881 +- * For example, there is a single hw queue (hctx) and two CPU queues (ctx0
882 +- * for CPU0, and ctx1 for CPU1).
883 +- *
884 +- * Now CPU1 is just onlined and a request is inserted into ctx1->rq_list
885 +- * and set bit0 in pending bitmap as ctx1->index_hw is still zero.
886 +- *
887 +- * And then while running hw queue, blk_mq_flush_busy_ctxs() finds bit0 is set
888 +- * in pending bitmap and tries to retrieve requests in hctx->ctxs[0]->rq_list.
889 +- * But htx->ctxs[0] is a pointer to ctx0, so the request in ctx1->rq_list is
890 +- * ignored.
891 +- */
892 +-static int blk_mq_queue_reinit_prepare(unsigned int cpu)
893 +-{
894 +- cpumask_copy(&cpuhp_online_new, cpu_online_mask);
895 +- cpumask_set_cpu(cpu, &cpuhp_online_new);
896 +- blk_mq_queue_reinit_work();
897 +- return 0;
898 +-}
899 +-
900 + static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
901 + {
902 + int i;
903 +@@ -2679,7 +2595,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
904 + blk_mq_update_queue_map(set);
905 + list_for_each_entry(q, &set->tag_list, tag_set_list) {
906 + blk_mq_realloc_hw_ctxs(set, q);
907 +- blk_mq_queue_reinit(q, cpu_online_mask);
908 ++ blk_mq_queue_reinit(q);
909 + }
910 +
911 + list_for_each_entry(q, &set->tag_list, tag_set_list)
912 +@@ -2895,24 +2811,10 @@ bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
913 + }
914 + EXPORT_SYMBOL_GPL(blk_mq_poll);
915 +
916 +-void blk_mq_disable_hotplug(void)
917 +-{
918 +- mutex_lock(&all_q_mutex);
919 +-}
920 +-
921 +-void blk_mq_enable_hotplug(void)
922 +-{
923 +- mutex_unlock(&all_q_mutex);
924 +-}
925 +-
926 + static int __init blk_mq_init(void)
927 + {
928 + cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
929 + blk_mq_hctx_notify_dead);
930 +-
931 +- cpuhp_setup_state_nocalls(CPUHP_BLK_MQ_PREPARE, "block/mq:prepare",
932 +- blk_mq_queue_reinit_prepare,
933 +- blk_mq_queue_reinit_dead);
934 + return 0;
935 + }
936 + subsys_initcall(blk_mq_init);
937 +diff --git a/block/blk-mq.h b/block/blk-mq.h
938 +index cc67b48e3551..558df56544d2 100644
939 +--- a/block/blk-mq.h
940 ++++ b/block/blk-mq.h
941 +@@ -56,11 +56,6 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
942 + bool at_head);
943 + void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
944 + struct list_head *list);
945 +-/*
946 +- * CPU hotplug helpers
947 +- */
948 +-void blk_mq_enable_hotplug(void);
949 +-void blk_mq_disable_hotplug(void);
950 +
951 + /*
952 + * CPU -> queue mappings
953 +diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
954 +index 10347e3d73ad..5bd58bd4ab05 100644
955 +--- a/drivers/acpi/acpi_lpss.c
956 ++++ b/drivers/acpi/acpi_lpss.c
957 +@@ -85,6 +85,7 @@ static const struct lpss_device_desc lpss_dma_desc = {
958 + };
959 +
960 + struct lpss_private_data {
961 ++ struct acpi_device *adev;
962 + void __iomem *mmio_base;
963 + resource_size_t mmio_size;
964 + unsigned int fixed_clk_rate;
965 +@@ -155,6 +156,12 @@ static struct pwm_lookup byt_pwm_lookup[] = {
966 +
967 + static void byt_pwm_setup(struct lpss_private_data *pdata)
968 + {
969 ++ struct acpi_device *adev = pdata->adev;
970 ++
971 ++ /* Only call pwm_add_table for the first PWM controller */
972 ++ if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
973 ++ return;
974 ++
975 + if (!acpi_dev_present("INT33FD", NULL, -1))
976 + pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
977 + }
978 +@@ -180,6 +187,12 @@ static struct pwm_lookup bsw_pwm_lookup[] = {
979 +
980 + static void bsw_pwm_setup(struct lpss_private_data *pdata)
981 + {
982 ++ struct acpi_device *adev = pdata->adev;
983 ++
984 ++ /* Only call pwm_add_table for the first PWM controller */
985 ++ if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
986 ++ return;
987 ++
988 + pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup));
989 + }
990 +
991 +@@ -456,6 +469,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
992 + goto err_out;
993 + }
994 +
995 ++ pdata->adev = adev;
996 + pdata->dev_desc = dev_desc;
997 +
998 + if (dev_desc->setup)
999 +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
1000 +index 49ba9834c715..12d59968020f 100644
1001 +--- a/drivers/ata/libata-scsi.c
1002 ++++ b/drivers/ata/libata-scsi.c
1003 +@@ -3028,10 +3028,12 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
1004 + static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
1005 + {
1006 + if (!sata_pmp_attached(ap)) {
1007 +- if (likely(devno < ata_link_max_devices(&ap->link)))
1008 ++ if (likely(devno >= 0 &&
1009 ++ devno < ata_link_max_devices(&ap->link)))
1010 + return &ap->link.device[devno];
1011 + } else {
1012 +- if (likely(devno < ap->nr_pmp_links))
1013 ++ if (likely(devno >= 0 &&
1014 ++ devno < ap->nr_pmp_links))
1015 + return &ap->pmp_link[devno].device[0];
1016 + }
1017 +
1018 +diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c
1019 +index 5372bf8be5e6..31d7ffda9aab 100644
1020 +--- a/drivers/clk/sunxi-ng/ccu-sun5i.c
1021 ++++ b/drivers/clk/sunxi-ng/ccu-sun5i.c
1022 +@@ -184,7 +184,7 @@ static struct ccu_mux cpu_clk = {
1023 + .hw.init = CLK_HW_INIT_PARENTS("cpu",
1024 + cpu_parents,
1025 + &ccu_mux_ops,
1026 +- CLK_IS_CRITICAL),
1027 ++ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
1028 + }
1029 + };
1030 +
1031 +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
1032 +index a42a1eea5714..2e96b3d46e0c 100644
1033 +--- a/drivers/gpio/gpiolib.c
1034 ++++ b/drivers/gpio/gpiolib.c
1035 +@@ -704,24 +704,23 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
1036 + {
1037 + struct lineevent_state *le = p;
1038 + struct gpioevent_data ge;
1039 +- int ret;
1040 ++ int ret, level;
1041 +
1042 + ge.timestamp = ktime_get_real_ns();
1043 ++ level = gpiod_get_value_cansleep(le->desc);
1044 +
1045 + if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
1046 + && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
1047 +- int level = gpiod_get_value_cansleep(le->desc);
1048 +-
1049 + if (level)
1050 + /* Emit low-to-high event */
1051 + ge.id = GPIOEVENT_EVENT_RISING_EDGE;
1052 + else
1053 + /* Emit high-to-low event */
1054 + ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
1055 +- } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) {
1056 ++ } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE && level) {
1057 + /* Emit low-to-high event */
1058 + ge.id = GPIOEVENT_EVENT_RISING_EDGE;
1059 +- } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
1060 ++ } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE && !level) {
1061 + /* Emit high-to-low event */
1062 + ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
1063 + } else {
1064 +diff --git a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
1065 +index 18fd01f3e4b2..003a131bad47 100644
1066 +--- a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
1067 ++++ b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
1068 +@@ -1,24 +1,25 @@
1069 +-
1070 + /*
1071 +-***************************************************************************************************
1072 +-*
1073 +-* Trade secret of Advanced Micro Devices, Inc.
1074 +-* Copyright (c) 2010 Advanced Micro Devices, Inc. (unpublished)
1075 +-*
1076 +-* All rights reserved. This notice is intended as a precaution against inadvertent publication and
1077 +-* does not imply publication or any waiver of confidentiality. The year included in the foregoing
1078 +-* notice is the year of creation of the work.
1079 +-*
1080 +-***************************************************************************************************
1081 +-*/
1082 +-/**
1083 +-***************************************************************************************************
1084 +-* @brief gfx9 Clearstate Definitions
1085 +-***************************************************************************************************
1086 +-*
1087 +-* Do not edit! This is a machine-generated file!
1088 +-*
1089 +-*/
1090 ++ * Copyright 2017 Advanced Micro Devices, Inc.
1091 ++ *
1092 ++ * Permission is hereby granted, free of charge, to any person obtaining a
1093 ++ * copy of this software and associated documentation files (the "Software"),
1094 ++ * to deal in the Software without restriction, including without limitation
1095 ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
1096 ++ * and/or sell copies of the Software, and to permit persons to whom the
1097 ++ * Software is furnished to do so, subject to the following conditions:
1098 ++ *
1099 ++ * The above copyright notice and this permission notice shall be included in
1100 ++ * all copies or substantial portions of the Software.
1101 ++ *
1102 ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1103 ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1104 ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
1105 ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
1106 ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
1107 ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
1108 ++ * OTHER DEALINGS IN THE SOFTWARE.
1109 ++ *
1110 ++ */
1111 +
1112 + static const unsigned int gfx9_SECT_CONTEXT_def_1[] =
1113 + {
1114 +diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
1115 +index c0b1aabf282f..7dbb7cf47986 100644
1116 +--- a/drivers/gpu/drm/amd/amdgpu/si.c
1117 ++++ b/drivers/gpu/drm/amd/amdgpu/si.c
1118 +@@ -1385,6 +1385,7 @@ static void si_init_golden_registers(struct amdgpu_device *adev)
1119 + amdgpu_program_register_sequence(adev,
1120 + pitcairn_mgcg_cgcg_init,
1121 + (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
1122 ++ break;
1123 + case CHIP_VERDE:
1124 + amdgpu_program_register_sequence(adev,
1125 + verde_golden_registers,
1126 +@@ -1409,6 +1410,7 @@ static void si_init_golden_registers(struct amdgpu_device *adev)
1127 + amdgpu_program_register_sequence(adev,
1128 + oland_mgcg_cgcg_init,
1129 + (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
1130 ++ break;
1131 + case CHIP_HAINAN:
1132 + amdgpu_program_register_sequence(adev,
1133 + hainan_golden_registers,
1134 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
1135 +index 1d2db5d912b0..f8a977f86ec7 100644
1136 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
1137 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
1138 +@@ -384,6 +384,12 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
1139 +
1140 + hotspot_x = du->hotspot_x;
1141 + hotspot_y = du->hotspot_y;
1142 ++
1143 ++ if (plane->fb) {
1144 ++ hotspot_x += plane->fb->hot_x;
1145 ++ hotspot_y += plane->fb->hot_y;
1146 ++ }
1147 ++
1148 + du->cursor_surface = vps->surf;
1149 + du->cursor_dmabuf = vps->dmabuf;
1150 +
1151 +@@ -411,6 +417,9 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
1152 + vmw_cursor_update_position(dev_priv, true,
1153 + du->cursor_x + hotspot_x,
1154 + du->cursor_y + hotspot_y);
1155 ++
1156 ++ du->core_hotspot_x = hotspot_x - du->hotspot_x;
1157 ++ du->core_hotspot_y = hotspot_y - du->hotspot_y;
1158 + } else {
1159 + DRM_ERROR("Failed to update cursor image\n");
1160 + }
1161 +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
1162 +index 0f1219fa8561..28fbc81c6e9e 100644
1163 +--- a/drivers/iommu/amd_iommu.c
1164 ++++ b/drivers/iommu/amd_iommu.c
1165 +@@ -4316,6 +4316,7 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
1166 + /* Setting */
1167 + irte->hi.fields.ga_root_ptr = (pi_data->base >> 12);
1168 + irte->hi.fields.vector = vcpu_pi_info->vector;
1169 ++ irte->lo.fields_vapic.ga_log_intr = 1;
1170 + irte->lo.fields_vapic.guest_mode = 1;
1171 + irte->lo.fields_vapic.ga_tag = pi_data->ga_tag;
1172 +
1173 +diff --git a/drivers/media/pci/saa7164/saa7164-bus.c b/drivers/media/pci/saa7164/saa7164-bus.c
1174 +index b2ff82fa7116..ecfeac5cdbed 100644
1175 +--- a/drivers/media/pci/saa7164/saa7164-bus.c
1176 ++++ b/drivers/media/pci/saa7164/saa7164-bus.c
1177 +@@ -389,11 +389,11 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
1178 + msg_tmp.size = le16_to_cpu((__force __le16)msg_tmp.size);
1179 + msg_tmp.command = le32_to_cpu((__force __le32)msg_tmp.command);
1180 + msg_tmp.controlselector = le16_to_cpu((__force __le16)msg_tmp.controlselector);
1181 ++ memcpy(msg, &msg_tmp, sizeof(*msg));
1182 +
1183 + /* No need to update the read positions, because this was a peek */
1184 + /* If the caller specifically want to peek, return */
1185 + if (peekonly) {
1186 +- memcpy(msg, &msg_tmp, sizeof(*msg));
1187 + goto peekout;
1188 + }
1189 +
1190 +@@ -438,21 +438,15 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
1191 + space_rem = bus->m_dwSizeGetRing - curr_grp;
1192 +
1193 + if (space_rem < sizeof(*msg)) {
1194 +- /* msg wraps around the ring */
1195 +- memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, space_rem);
1196 +- memcpy_fromio((u8 *)msg + space_rem, bus->m_pdwGetRing,
1197 +- sizeof(*msg) - space_rem);
1198 + if (buf)
1199 + memcpy_fromio(buf, bus->m_pdwGetRing + sizeof(*msg) -
1200 + space_rem, buf_size);
1201 +
1202 + } else if (space_rem == sizeof(*msg)) {
1203 +- memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
1204 + if (buf)
1205 + memcpy_fromio(buf, bus->m_pdwGetRing, buf_size);
1206 + } else {
1207 + /* Additional data wraps around the ring */
1208 +- memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
1209 + if (buf) {
1210 + memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp +
1211 + sizeof(*msg), space_rem - sizeof(*msg));
1212 +@@ -465,15 +459,10 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
1213 +
1214 + } else {
1215 + /* No wrapping */
1216 +- memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
1217 + if (buf)
1218 + memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp + sizeof(*msg),
1219 + buf_size);
1220 + }
1221 +- /* Convert from little endian to CPU */
1222 +- msg->size = le16_to_cpu((__force __le16)msg->size);
1223 +- msg->command = le32_to_cpu((__force __le32)msg->command);
1224 +- msg->controlselector = le16_to_cpu((__force __le16)msg->controlselector);
1225 +
1226 + /* Update the read positions, adjusting the ring */
1227 + saa7164_writel(bus->m_dwGetReadPos, new_grp);
1228 +diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
1229 +index e3fe3e0635aa..1831bf5ccca5 100644
1230 +--- a/drivers/media/platform/davinci/vpfe_capture.c
1231 ++++ b/drivers/media/platform/davinci/vpfe_capture.c
1232 +@@ -1719,27 +1719,9 @@ static long vpfe_param_handler(struct file *file, void *priv,
1233 +
1234 + switch (cmd) {
1235 + case VPFE_CMD_S_CCDC_RAW_PARAMS:
1236 ++ ret = -EINVAL;
1237 + v4l2_warn(&vpfe_dev->v4l2_dev,
1238 +- "VPFE_CMD_S_CCDC_RAW_PARAMS: experimental ioctl\n");
1239 +- if (ccdc_dev->hw_ops.set_params) {
1240 +- ret = ccdc_dev->hw_ops.set_params(param);
1241 +- if (ret) {
1242 +- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
1243 +- "Error setting parameters in CCDC\n");
1244 +- goto unlock_out;
1245 +- }
1246 +- ret = vpfe_get_ccdc_image_format(vpfe_dev,
1247 +- &vpfe_dev->fmt);
1248 +- if (ret < 0) {
1249 +- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
1250 +- "Invalid image format at CCDC\n");
1251 +- goto unlock_out;
1252 +- }
1253 +- } else {
1254 +- ret = -EINVAL;
1255 +- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
1256 +- "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
1257 +- }
1258 ++ "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
1259 + break;
1260 + default:
1261 + ret = -ENOTTY;
1262 +diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
1263 +index de85f1d7ce43..c01b655571a2 100644
1264 +--- a/drivers/media/rc/ir-lirc-codec.c
1265 ++++ b/drivers/media/rc/ir-lirc-codec.c
1266 +@@ -266,7 +266,7 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
1267 + if (!dev->rx_resolution)
1268 + return -ENOTTY;
1269 +
1270 +- val = dev->rx_resolution;
1271 ++ val = dev->rx_resolution / 1000;
1272 + break;
1273 +
1274 + case LIRC_SET_WIDEBAND_RECEIVER:
1275 +diff --git a/drivers/media/rc/ir-spi.c b/drivers/media/rc/ir-spi.c
1276 +index c8863f36686a..f39cf8cb639f 100644
1277 +--- a/drivers/media/rc/ir-spi.c
1278 ++++ b/drivers/media/rc/ir-spi.c
1279 +@@ -57,10 +57,13 @@ static int ir_spi_tx(struct rc_dev *dev,
1280 +
1281 + /* convert the pulse/space signal to raw binary signal */
1282 + for (i = 0; i < count; i++) {
1283 ++ unsigned int periods;
1284 + int j;
1285 + u16 val = ((i + 1) % 2) ? idata->pulse : idata->space;
1286 +
1287 +- if (len + buffer[i] >= IR_SPI_MAX_BUFSIZE)
1288 ++ periods = DIV_ROUND_CLOSEST(buffer[i] * idata->freq, 1000000);
1289 ++
1290 ++ if (len + periods >= IR_SPI_MAX_BUFSIZE)
1291 + return -EINVAL;
1292 +
1293 + /*
1294 +@@ -69,13 +72,13 @@ static int ir_spi_tx(struct rc_dev *dev,
1295 + * contain a space duration.
1296 + */
1297 + val = (i % 2) ? idata->space : idata->pulse;
1298 +- for (j = 0; j < buffer[i]; j++)
1299 ++ for (j = 0; j < periods; j++)
1300 + idata->tx_buf[len++] = val;
1301 + }
1302 +
1303 + memset(&xfer, 0, sizeof(xfer));
1304 +
1305 +- xfer.speed_hz = idata->freq;
1306 ++ xfer.speed_hz = idata->freq * 16;
1307 + xfer.len = len * sizeof(*idata->tx_buf);
1308 + xfer.tx_buf = idata->tx_buf;
1309 +
1310 +diff --git a/drivers/media/usb/pulse8-cec/pulse8-cec.c b/drivers/media/usb/pulse8-cec/pulse8-cec.c
1311 +index 1dfc2de1fe77..4767f4341ba9 100644
1312 +--- a/drivers/media/usb/pulse8-cec/pulse8-cec.c
1313 ++++ b/drivers/media/usb/pulse8-cec/pulse8-cec.c
1314 +@@ -51,7 +51,7 @@ MODULE_DESCRIPTION("Pulse Eight HDMI CEC driver");
1315 + MODULE_LICENSE("GPL");
1316 +
1317 + static int debug;
1318 +-static int persistent_config = 1;
1319 ++static int persistent_config;
1320 + module_param(debug, int, 0644);
1321 + module_param(persistent_config, int, 0644);
1322 + MODULE_PARM_DESC(debug, "debug level (0-1)");
1323 +diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
1324 +index 3f8c85d5aa09..88fa03142e92 100644
1325 +--- a/drivers/mmc/core/host.c
1326 ++++ b/drivers/mmc/core/host.c
1327 +@@ -176,19 +176,17 @@ static void mmc_retune_timer(unsigned long data)
1328 + */
1329 + int mmc_of_parse(struct mmc_host *host)
1330 + {
1331 +- struct device_node *np;
1332 ++ struct device *dev = host->parent;
1333 + u32 bus_width;
1334 + int ret;
1335 + bool cd_cap_invert, cd_gpio_invert = false;
1336 + bool ro_cap_invert, ro_gpio_invert = false;
1337 +
1338 +- if (!host->parent || !host->parent->of_node)
1339 ++ if (!dev || !dev_fwnode(dev))
1340 + return 0;
1341 +
1342 +- np = host->parent->of_node;
1343 +-
1344 + /* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */
1345 +- if (of_property_read_u32(np, "bus-width", &bus_width) < 0) {
1346 ++ if (device_property_read_u32(dev, "bus-width", &bus_width) < 0) {
1347 + dev_dbg(host->parent,
1348 + "\"bus-width\" property is missing, assuming 1 bit.\n");
1349 + bus_width = 1;
1350 +@@ -210,7 +208,7 @@ int mmc_of_parse(struct mmc_host *host)
1351 + }
1352 +
1353 + /* f_max is obtained from the optional "max-frequency" property */
1354 +- of_property_read_u32(np, "max-frequency", &host->f_max);
1355 ++ device_property_read_u32(dev, "max-frequency", &host->f_max);
1356 +
1357 + /*
1358 + * Configure CD and WP pins. They are both by default active low to
1359 +@@ -225,12 +223,12 @@ int mmc_of_parse(struct mmc_host *host)
1360 + */
1361 +
1362 + /* Parse Card Detection */
1363 +- if (of_property_read_bool(np, "non-removable")) {
1364 ++ if (device_property_read_bool(dev, "non-removable")) {
1365 + host->caps |= MMC_CAP_NONREMOVABLE;
1366 + } else {
1367 +- cd_cap_invert = of_property_read_bool(np, "cd-inverted");
1368 ++ cd_cap_invert = device_property_read_bool(dev, "cd-inverted");
1369 +
1370 +- if (of_property_read_bool(np, "broken-cd"))
1371 ++ if (device_property_read_bool(dev, "broken-cd"))
1372 + host->caps |= MMC_CAP_NEEDS_POLL;
1373 +
1374 + ret = mmc_gpiod_request_cd(host, "cd", 0, true,
1375 +@@ -256,7 +254,7 @@ int mmc_of_parse(struct mmc_host *host)
1376 + }
1377 +
1378 + /* Parse Write Protection */
1379 +- ro_cap_invert = of_property_read_bool(np, "wp-inverted");
1380 ++ ro_cap_invert = device_property_read_bool(dev, "wp-inverted");
1381 +
1382 + ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert);
1383 + if (!ret)
1384 +@@ -264,64 +262,64 @@ int mmc_of_parse(struct mmc_host *host)
1385 + else if (ret != -ENOENT && ret != -ENOSYS)
1386 + return ret;
1387 +
1388 +- if (of_property_read_bool(np, "disable-wp"))
1389 ++ if (device_property_read_bool(dev, "disable-wp"))
1390 + host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
1391 +
1392 + /* See the comment on CD inversion above */
1393 + if (ro_cap_invert ^ ro_gpio_invert)
1394 + host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
1395 +
1396 +- if (of_property_read_bool(np, "cap-sd-highspeed"))
1397 ++ if (device_property_read_bool(dev, "cap-sd-highspeed"))
1398 + host->caps |= MMC_CAP_SD_HIGHSPEED;
1399 +- if (of_property_read_bool(np, "cap-mmc-highspeed"))
1400 ++ if (device_property_read_bool(dev, "cap-mmc-highspeed"))
1401 + host->caps |= MMC_CAP_MMC_HIGHSPEED;
1402 +- if (of_property_read_bool(np, "sd-uhs-sdr12"))
1403 ++ if (device_property_read_bool(dev, "sd-uhs-sdr12"))
1404 + host->caps |= MMC_CAP_UHS_SDR12;
1405 +- if (of_property_read_bool(np, "sd-uhs-sdr25"))
1406 ++ if (device_property_read_bool(dev, "sd-uhs-sdr25"))
1407 + host->caps |= MMC_CAP_UHS_SDR25;
1408 +- if (of_property_read_bool(np, "sd-uhs-sdr50"))
1409 ++ if (device_property_read_bool(dev, "sd-uhs-sdr50"))
1410 + host->caps |= MMC_CAP_UHS_SDR50;
1411 +- if (of_property_read_bool(np, "sd-uhs-sdr104"))
1412 ++ if (device_property_read_bool(dev, "sd-uhs-sdr104"))
1413 + host->caps |= MMC_CAP_UHS_SDR104;
1414 +- if (of_property_read_bool(np, "sd-uhs-ddr50"))
1415 ++ if (device_property_read_bool(dev, "sd-uhs-ddr50"))
1416 + host->caps |= MMC_CAP_UHS_DDR50;
1417 +- if (of_property_read_bool(np, "cap-power-off-card"))
1418 ++ if (device_property_read_bool(dev, "cap-power-off-card"))
1419 + host->caps |= MMC_CAP_POWER_OFF_CARD;
1420 +- if (of_property_read_bool(np, "cap-mmc-hw-reset"))
1421 ++ if (device_property_read_bool(dev, "cap-mmc-hw-reset"))
1422 + host->caps |= MMC_CAP_HW_RESET;
1423 +- if (of_property_read_bool(np, "cap-sdio-irq"))
1424 ++ if (device_property_read_bool(dev, "cap-sdio-irq"))
1425 + host->caps |= MMC_CAP_SDIO_IRQ;
1426 +- if (of_property_read_bool(np, "full-pwr-cycle"))
1427 ++ if (device_property_read_bool(dev, "full-pwr-cycle"))
1428 + host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
1429 +- if (of_property_read_bool(np, "keep-power-in-suspend"))
1430 ++ if (device_property_read_bool(dev, "keep-power-in-suspend"))
1431 + host->pm_caps |= MMC_PM_KEEP_POWER;
1432 +- if (of_property_read_bool(np, "wakeup-source") ||
1433 +- of_property_read_bool(np, "enable-sdio-wakeup")) /* legacy */
1434 ++ if (device_property_read_bool(dev, "wakeup-source") ||
1435 ++ device_property_read_bool(dev, "enable-sdio-wakeup")) /* legacy */
1436 + host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
1437 +- if (of_property_read_bool(np, "mmc-ddr-3_3v"))
1438 ++ if (device_property_read_bool(dev, "mmc-ddr-3_3v"))
1439 + host->caps |= MMC_CAP_3_3V_DDR;
1440 +- if (of_property_read_bool(np, "mmc-ddr-1_8v"))
1441 ++ if (device_property_read_bool(dev, "mmc-ddr-1_8v"))
1442 + host->caps |= MMC_CAP_1_8V_DDR;
1443 +- if (of_property_read_bool(np, "mmc-ddr-1_2v"))
1444 ++ if (device_property_read_bool(dev, "mmc-ddr-1_2v"))
1445 + host->caps |= MMC_CAP_1_2V_DDR;
1446 +- if (of_property_read_bool(np, "mmc-hs200-1_8v"))
1447 ++ if (device_property_read_bool(dev, "mmc-hs200-1_8v"))
1448 + host->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
1449 +- if (of_property_read_bool(np, "mmc-hs200-1_2v"))
1450 ++ if (device_property_read_bool(dev, "mmc-hs200-1_2v"))
1451 + host->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
1452 +- if (of_property_read_bool(np, "mmc-hs400-1_8v"))
1453 ++ if (device_property_read_bool(dev, "mmc-hs400-1_8v"))
1454 + host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR;
1455 +- if (of_property_read_bool(np, "mmc-hs400-1_2v"))
1456 ++ if (device_property_read_bool(dev, "mmc-hs400-1_2v"))
1457 + host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR;
1458 +- if (of_property_read_bool(np, "mmc-hs400-enhanced-strobe"))
1459 ++ if (device_property_read_bool(dev, "mmc-hs400-enhanced-strobe"))
1460 + host->caps2 |= MMC_CAP2_HS400_ES;
1461 +- if (of_property_read_bool(np, "no-sdio"))
1462 ++ if (device_property_read_bool(dev, "no-sdio"))
1463 + host->caps2 |= MMC_CAP2_NO_SDIO;
1464 +- if (of_property_read_bool(np, "no-sd"))
1465 ++ if (device_property_read_bool(dev, "no-sd"))
1466 + host->caps2 |= MMC_CAP2_NO_SD;
1467 +- if (of_property_read_bool(np, "no-mmc"))
1468 ++ if (device_property_read_bool(dev, "no-mmc"))
1469 + host->caps2 |= MMC_CAP2_NO_MMC;
1470 +
1471 +- host->dsr_req = !of_property_read_u32(np, "dsr", &host->dsr);
1472 ++ host->dsr_req = !device_property_read_u32(dev, "dsr", &host->dsr);
1473 + if (host->dsr_req && (host->dsr & ~0xffff)) {
1474 + dev_err(host->parent,
1475 + "device tree specified broken value for DSR: 0x%x, ignoring\n",
1476 +diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
1477 +index e45129f48174..efde0f20dd24 100644
1478 +--- a/drivers/mmc/host/dw_mmc.c
1479 ++++ b/drivers/mmc/host/dw_mmc.c
1480 +@@ -2707,8 +2707,8 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
1481 + host->slot[id] = slot;
1482 +
1483 + mmc->ops = &dw_mci_ops;
1484 +- if (of_property_read_u32_array(host->dev->of_node,
1485 +- "clock-freq-min-max", freq, 2)) {
1486 ++ if (device_property_read_u32_array(host->dev, "clock-freq-min-max",
1487 ++ freq, 2)) {
1488 + mmc->f_min = DW_MCI_FREQ_MIN;
1489 + mmc->f_max = DW_MCI_FREQ_MAX;
1490 + } else {
1491 +@@ -2808,7 +2808,6 @@ static void dw_mci_init_dma(struct dw_mci *host)
1492 + {
1493 + int addr_config;
1494 + struct device *dev = host->dev;
1495 +- struct device_node *np = dev->of_node;
1496 +
1497 + /*
1498 + * Check tansfer mode from HCON[17:16]
1499 +@@ -2869,8 +2868,9 @@ static void dw_mci_init_dma(struct dw_mci *host)
1500 + dev_info(host->dev, "Using internal DMA controller.\n");
1501 + } else {
1502 + /* TRANS_MODE_EDMAC: check dma bindings again */
1503 +- if ((of_property_count_strings(np, "dma-names") < 0) ||
1504 +- (!of_find_property(np, "dmas", NULL))) {
1505 ++ if ((device_property_read_string_array(dev, "dma-names",
1506 ++ NULL, 0) < 0) ||
1507 ++ !device_property_present(dev, "dmas")) {
1508 + goto no_dma;
1509 + }
1510 + host->dma_ops = &dw_mci_edmac_ops;
1511 +@@ -2937,7 +2937,6 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
1512 + {
1513 + struct dw_mci_board *pdata;
1514 + struct device *dev = host->dev;
1515 +- struct device_node *np = dev->of_node;
1516 + const struct dw_mci_drv_data *drv_data = host->drv_data;
1517 + int ret;
1518 + u32 clock_frequency;
1519 +@@ -2954,20 +2953,21 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
1520 + }
1521 +
1522 + /* find out number of slots supported */
1523 +- of_property_read_u32(np, "num-slots", &pdata->num_slots);
1524 ++ device_property_read_u32(dev, "num-slots", &pdata->num_slots);
1525 +
1526 +- if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
1527 ++ if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
1528 + dev_info(dev,
1529 + "fifo-depth property not found, using value of FIFOTH register as default\n");
1530 +
1531 +- of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
1532 ++ device_property_read_u32(dev, "card-detect-delay",
1533 ++ &pdata->detect_delay_ms);
1534 +
1535 +- of_property_read_u32(np, "data-addr", &host->data_addr_override);
1536 ++ device_property_read_u32(dev, "data-addr", &host->data_addr_override);
1537 +
1538 +- if (of_get_property(np, "fifo-watermark-aligned", NULL))
1539 ++ if (device_property_present(dev, "fifo-watermark-aligned"))
1540 + host->wm_aligned = true;
1541 +
1542 +- if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
1543 ++ if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency))
1544 + pdata->bus_hz = clock_frequency;
1545 +
1546 + if (drv_data && drv_data->parse_dt) {
1547 +diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
1548 +index 7611fd679f1a..1485530c3592 100644
1549 +--- a/drivers/mmc/host/sdhci-of-at91.c
1550 ++++ b/drivers/mmc/host/sdhci-of-at91.c
1551 +@@ -31,6 +31,7 @@
1552 +
1553 + #define SDMMC_MC1R 0x204
1554 + #define SDMMC_MC1R_DDR BIT(3)
1555 ++#define SDMMC_MC1R_FCD BIT(7)
1556 + #define SDMMC_CACR 0x230
1557 + #define SDMMC_CACR_CAPWREN BIT(0)
1558 + #define SDMMC_CACR_KEY (0x46 << 8)
1559 +@@ -43,6 +44,15 @@ struct sdhci_at91_priv {
1560 + struct clk *mainck;
1561 + };
1562 +
1563 ++static void sdhci_at91_set_force_card_detect(struct sdhci_host *host)
1564 ++{
1565 ++ u8 mc1r;
1566 ++
1567 ++ mc1r = readb(host->ioaddr + SDMMC_MC1R);
1568 ++ mc1r |= SDMMC_MC1R_FCD;
1569 ++ writeb(mc1r, host->ioaddr + SDMMC_MC1R);
1570 ++}
1571 ++
1572 + static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
1573 + {
1574 + u16 clk;
1575 +@@ -110,10 +120,18 @@ void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, unsigned int timing)
1576 + sdhci_set_uhs_signaling(host, timing);
1577 + }
1578 +
1579 ++static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
1580 ++{
1581 ++ sdhci_reset(host, mask);
1582 ++
1583 ++ if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
1584 ++ sdhci_at91_set_force_card_detect(host);
1585 ++}
1586 ++
1587 + static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
1588 + .set_clock = sdhci_at91_set_clock,
1589 + .set_bus_width = sdhci_set_bus_width,
1590 +- .reset = sdhci_reset,
1591 ++ .reset = sdhci_at91_reset,
1592 + .set_uhs_signaling = sdhci_at91_set_uhs_signaling,
1593 + .set_power = sdhci_at91_set_power,
1594 + };
1595 +@@ -324,6 +342,21 @@ static int sdhci_at91_probe(struct platform_device *pdev)
1596 + host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
1597 + }
1598 +
1599 ++ /*
1600 ++ * If the device attached to the MMC bus is not removable, it is safer
1601 ++ * to set the Force Card Detect bit. People often don't connect the
1602 ++ * card detect signal and use this pin for another purpose. If the card
1603 ++ * detect pin is not muxed to SDHCI controller, a default value is
1604 ++ * used. This value can be different from a SoC revision to another
1605 ++ * one. Problems come when this default value is not card present. To
1606 ++ * avoid this case, if the device is non removable then the card
1607 ++ * detection procedure using the SDMCC_CD signal is bypassed.
1608 ++ * This bit is reset when a software reset for all command is performed
1609 ++ * so we need to implement our own reset function to set back this bit.
1610 ++ */
1611 ++ if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
1612 ++ sdhci_at91_set_force_card_detect(host);
1613 ++
1614 + pm_runtime_put_autosuspend(&pdev->dev);
1615 +
1616 + return 0;
1617 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
1618 +index 8ab6bdbe1682..224e93aa6d23 100644
1619 +--- a/drivers/net/bonding/bond_main.c
1620 ++++ b/drivers/net/bonding/bond_main.c
1621 +@@ -2047,6 +2047,7 @@ static int bond_miimon_inspect(struct bonding *bond)
1622 + continue;
1623 +
1624 + bond_propose_link_state(slave, BOND_LINK_FAIL);
1625 ++ commit++;
1626 + slave->delay = bond->params.downdelay;
1627 + if (slave->delay) {
1628 + netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n",
1629 +@@ -2085,6 +2086,7 @@ static int bond_miimon_inspect(struct bonding *bond)
1630 + continue;
1631 +
1632 + bond_propose_link_state(slave, BOND_LINK_BACK);
1633 ++ commit++;
1634 + slave->delay = bond->params.updelay;
1635 +
1636 + if (slave->delay) {
1637 +@@ -4598,7 +4600,7 @@ static int bond_check_params(struct bond_params *params)
1638 + }
1639 + ad_user_port_key = valptr->value;
1640 +
1641 +- if (bond_mode == BOND_MODE_TLB) {
1642 ++ if ((bond_mode == BOND_MODE_TLB) || (bond_mode == BOND_MODE_ALB)) {
1643 + bond_opt_initstr(&newval, "default");
1644 + valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB),
1645 + &newval);
1646 +diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
1647 +index fa0eece21eef..d9cc94a7d44e 100644
1648 +--- a/drivers/net/dsa/b53/b53_common.c
1649 ++++ b/drivers/net/dsa/b53/b53_common.c
1650 +@@ -1668,6 +1668,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
1651 + .dev_name = "BCM53125",
1652 + .vlans = 4096,
1653 + .enabled_ports = 0xff,
1654 ++ .arl_entries = 4,
1655 + .cpu_port = B53_CPU_PORT,
1656 + .vta_regs = B53_VTA_REGS,
1657 + .duplex_reg = B53_DUPLEX_STAT_GE,
1658 +diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
1659 +index d034d8cd7d22..32864a47c4c1 100644
1660 +--- a/drivers/net/dsa/mv88e6xxx/chip.c
1661 ++++ b/drivers/net/dsa/mv88e6xxx/chip.c
1662 +@@ -3377,6 +3377,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
1663 + .port_jumbo_config = mv88e6165_port_jumbo_config,
1664 + .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
1665 + .port_pause_config = mv88e6390_port_pause_config,
1666 ++ .port_set_cmode = mv88e6390x_port_set_cmode,
1667 + .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
1668 + .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
1669 + .stats_snapshot = mv88e6390_g1_stats_snapshot,
1670 +diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
1671 +index 5711fbbd6ae3..878cffd37e1f 100644
1672 +--- a/drivers/net/ethernet/aurora/nb8800.c
1673 ++++ b/drivers/net/ethernet/aurora/nb8800.c
1674 +@@ -609,7 +609,7 @@ static void nb8800_mac_config(struct net_device *dev)
1675 + mac_mode |= HALF_DUPLEX;
1676 +
1677 + if (gigabit) {
1678 +- if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII)
1679 ++ if (phy_interface_is_rgmii(dev->phydev))
1680 + mac_mode |= RGMII_MODE;
1681 +
1682 + mac_mode |= GMAC_MODE;
1683 +@@ -1268,11 +1268,10 @@ static int nb8800_tangox_init(struct net_device *dev)
1684 + break;
1685 +
1686 + case PHY_INTERFACE_MODE_RGMII:
1687 +- pad_mode = PAD_MODE_RGMII;
1688 +- break;
1689 +-
1690 ++ case PHY_INTERFACE_MODE_RGMII_ID:
1691 ++ case PHY_INTERFACE_MODE_RGMII_RXID:
1692 + case PHY_INTERFACE_MODE_RGMII_TXID:
1693 +- pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY;
1694 ++ pad_mode = PAD_MODE_RGMII;
1695 + break;
1696 +
1697 + default:
1698 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
1699 +index 10d282841f5b..ac0a460c006a 100644
1700 +--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
1701 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
1702 +@@ -777,6 +777,10 @@ static void cb_timeout_handler(struct work_struct *work)
1703 + mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
1704 + }
1705 +
1706 ++static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
1707 ++static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
1708 ++ struct mlx5_cmd_msg *msg);
1709 ++
1710 + static void cmd_work_handler(struct work_struct *work)
1711 + {
1712 + struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
1713 +@@ -786,16 +790,27 @@ static void cmd_work_handler(struct work_struct *work)
1714 + struct mlx5_cmd_layout *lay;
1715 + struct semaphore *sem;
1716 + unsigned long flags;
1717 ++ int alloc_ret;
1718 +
1719 + sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
1720 + down(sem);
1721 + if (!ent->page_queue) {
1722 +- ent->idx = alloc_ent(cmd);
1723 +- if (ent->idx < 0) {
1724 ++ alloc_ret = alloc_ent(cmd);
1725 ++ if (alloc_ret < 0) {
1726 + mlx5_core_err(dev, "failed to allocate command entry\n");
1727 ++ if (ent->callback) {
1728 ++ ent->callback(-EAGAIN, ent->context);
1729 ++ mlx5_free_cmd_msg(dev, ent->out);
1730 ++ free_msg(dev, ent->in);
1731 ++ free_cmd(ent);
1732 ++ } else {
1733 ++ ent->ret = -EAGAIN;
1734 ++ complete(&ent->done);
1735 ++ }
1736 + up(sem);
1737 + return;
1738 + }
1739 ++ ent->idx = alloc_ret;
1740 + } else {
1741 + ent->idx = cmd->max_reg_cmds;
1742 + spin_lock_irqsave(&cmd->alloc_lock, flags);
1743 +@@ -955,7 +970,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
1744 +
1745 + err = wait_func(dev, ent);
1746 + if (err == -ETIMEDOUT)
1747 +- goto out_free;
1748 ++ goto out;
1749 +
1750 + ds = ent->ts2 - ent->ts1;
1751 + op = MLX5_GET(mbox_in, in->first.data, opcode);
1752 +@@ -1419,6 +1434,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
1753 + mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n",
1754 + ent->idx);
1755 + free_ent(cmd, ent->idx);
1756 ++ free_cmd(ent);
1757 + }
1758 + continue;
1759 + }
1760 +@@ -1477,7 +1493,8 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
1761 + free_msg(dev, ent->in);
1762 +
1763 + err = err ? err : ent->status;
1764 +- free_cmd(ent);
1765 ++ if (!forced)
1766 ++ free_cmd(ent);
1767 + callback(err, context);
1768 + } else {
1769 + complete(&ent->done);
1770 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
1771 +index 944fc1742464..3b39dbd97e57 100644
1772 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
1773 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
1774 +@@ -261,6 +261,14 @@ struct mlx5e_dcbx {
1775 + };
1776 + #endif
1777 +
1778 ++#define MAX_PIN_NUM 8
1779 ++struct mlx5e_pps {
1780 ++ u8 pin_caps[MAX_PIN_NUM];
1781 ++ struct work_struct out_work;
1782 ++ u64 start[MAX_PIN_NUM];
1783 ++ u8 enabled;
1784 ++};
1785 ++
1786 + struct mlx5e_tstamp {
1787 + rwlock_t lock;
1788 + struct cyclecounter cycles;
1789 +@@ -272,7 +280,7 @@ struct mlx5e_tstamp {
1790 + struct mlx5_core_dev *mdev;
1791 + struct ptp_clock *ptp;
1792 + struct ptp_clock_info ptp_info;
1793 +- u8 *pps_pin_caps;
1794 ++ struct mlx5e_pps pps_info;
1795 + };
1796 +
1797 + enum {
1798 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
1799 +index e706a87fc8b2..80c500f87ab6 100644
1800 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
1801 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
1802 +@@ -53,6 +53,15 @@ enum {
1803 + MLX5E_EVENT_MODE_ONCE_TILL_ARM = 0x2,
1804 + };
1805 +
1806 ++enum {
1807 ++ MLX5E_MTPPS_FS_ENABLE = BIT(0x0),
1808 ++ MLX5E_MTPPS_FS_PATTERN = BIT(0x2),
1809 ++ MLX5E_MTPPS_FS_PIN_MODE = BIT(0x3),
1810 ++ MLX5E_MTPPS_FS_TIME_STAMP = BIT(0x4),
1811 ++ MLX5E_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5),
1812 ++ MLX5E_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
1813 ++};
1814 ++
1815 + void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp,
1816 + struct skb_shared_hwtstamps *hwts)
1817 + {
1818 +@@ -73,17 +82,46 @@ static u64 mlx5e_read_internal_timer(const struct cyclecounter *cc)
1819 + return mlx5_read_internal_timer(tstamp->mdev) & cc->mask;
1820 + }
1821 +
1822 ++static void mlx5e_pps_out(struct work_struct *work)
1823 ++{
1824 ++ struct mlx5e_pps *pps_info = container_of(work, struct mlx5e_pps,
1825 ++ out_work);
1826 ++ struct mlx5e_tstamp *tstamp = container_of(pps_info, struct mlx5e_tstamp,
1827 ++ pps_info);
1828 ++ u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
1829 ++ unsigned long flags;
1830 ++ int i;
1831 ++
1832 ++ for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
1833 ++ u64 tstart;
1834 ++
1835 ++ write_lock_irqsave(&tstamp->lock, flags);
1836 ++ tstart = tstamp->pps_info.start[i];
1837 ++ tstamp->pps_info.start[i] = 0;
1838 ++ write_unlock_irqrestore(&tstamp->lock, flags);
1839 ++ if (!tstart)
1840 ++ continue;
1841 ++
1842 ++ MLX5_SET(mtpps_reg, in, pin, i);
1843 ++ MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
1844 ++ MLX5_SET(mtpps_reg, in, field_select, MLX5E_MTPPS_FS_TIME_STAMP);
1845 ++ mlx5_set_mtpps(tstamp->mdev, in, sizeof(in));
1846 ++ }
1847 ++}
1848 ++
1849 + static void mlx5e_timestamp_overflow(struct work_struct *work)
1850 + {
1851 + struct delayed_work *dwork = to_delayed_work(work);
1852 + struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp,
1853 + overflow_work);
1854 ++ struct mlx5e_priv *priv = container_of(tstamp, struct mlx5e_priv, tstamp);
1855 + unsigned long flags;
1856 +
1857 + write_lock_irqsave(&tstamp->lock, flags);
1858 + timecounter_read(&tstamp->clock);
1859 + write_unlock_irqrestore(&tstamp->lock, flags);
1860 +- schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period);
1861 ++ queue_delayed_work(priv->wq, &tstamp->overflow_work,
1862 ++ msecs_to_jiffies(tstamp->overflow_period * 1000));
1863 + }
1864 +
1865 + int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
1866 +@@ -214,18 +252,6 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
1867 + int neg_adj = 0;
1868 + struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
1869 + ptp_info);
1870 +- struct mlx5e_priv *priv =
1871 +- container_of(tstamp, struct mlx5e_priv, tstamp);
1872 +-
1873 +- if (MLX5_CAP_GEN(priv->mdev, pps_modify)) {
1874 +- u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
1875 +-
1876 +- /* For future use need to add a loop for finding all 1PPS out pins */
1877 +- MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT);
1878 +- MLX5_SET(mtpps_reg, in, out_periodic_adjustment, delta & 0xFFFF);
1879 +-
1880 +- mlx5_set_mtpps(priv->mdev, in, sizeof(in));
1881 +- }
1882 +
1883 + if (delta < 0) {
1884 + neg_adj = 1;
1885 +@@ -254,12 +280,13 @@ static int mlx5e_extts_configure(struct ptp_clock_info *ptp,
1886 + struct mlx5e_priv *priv =
1887 + container_of(tstamp, struct mlx5e_priv, tstamp);
1888 + u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
1889 ++ u32 field_select = 0;
1890 ++ u8 pin_mode = 0;
1891 + u8 pattern = 0;
1892 + int pin = -1;
1893 + int err = 0;
1894 +
1895 +- if (!MLX5_CAP_GEN(priv->mdev, pps) ||
1896 +- !MLX5_CAP_GEN(priv->mdev, pps_modify))
1897 ++ if (!MLX5_PPS_CAP(priv->mdev))
1898 + return -EOPNOTSUPP;
1899 +
1900 + if (rq->extts.index >= tstamp->ptp_info.n_pins)
1901 +@@ -269,15 +296,21 @@ static int mlx5e_extts_configure(struct ptp_clock_info *ptp,
1902 + pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index);
1903 + if (pin < 0)
1904 + return -EBUSY;
1905 ++ pin_mode = MLX5E_PIN_MODE_IN;
1906 ++ pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
1907 ++ field_select = MLX5E_MTPPS_FS_PIN_MODE |
1908 ++ MLX5E_MTPPS_FS_PATTERN |
1909 ++ MLX5E_MTPPS_FS_ENABLE;
1910 ++ } else {
1911 ++ pin = rq->extts.index;
1912 ++ field_select = MLX5E_MTPPS_FS_ENABLE;
1913 + }
1914 +
1915 +- if (rq->extts.flags & PTP_FALLING_EDGE)
1916 +- pattern = 1;
1917 +-
1918 + MLX5_SET(mtpps_reg, in, pin, pin);
1919 +- MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_IN);
1920 ++ MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
1921 + MLX5_SET(mtpps_reg, in, pattern, pattern);
1922 + MLX5_SET(mtpps_reg, in, enable, on);
1923 ++ MLX5_SET(mtpps_reg, in, field_select, field_select);
1924 +
1925 + err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
1926 + if (err)
1927 +@@ -296,14 +329,18 @@ static int mlx5e_perout_configure(struct ptp_clock_info *ptp,
1928 + struct mlx5e_priv *priv =
1929 + container_of(tstamp, struct mlx5e_priv, tstamp);
1930 + u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
1931 +- u64 nsec_now, nsec_delta, time_stamp;
1932 ++ u64 nsec_now, nsec_delta, time_stamp = 0;
1933 + u64 cycles_now, cycles_delta;
1934 + struct timespec64 ts;
1935 + unsigned long flags;
1936 ++ u32 field_select = 0;
1937 ++ u8 pin_mode = 0;
1938 ++ u8 pattern = 0;
1939 + int pin = -1;
1940 ++ int err = 0;
1941 + s64 ns;
1942 +
1943 +- if (!MLX5_CAP_GEN(priv->mdev, pps_modify))
1944 ++ if (!MLX5_PPS_CAP(priv->mdev))
1945 + return -EOPNOTSUPP;
1946 +
1947 + if (rq->perout.index >= tstamp->ptp_info.n_pins)
1948 +@@ -314,32 +351,60 @@ static int mlx5e_perout_configure(struct ptp_clock_info *ptp,
1949 + rq->perout.index);
1950 + if (pin < 0)
1951 + return -EBUSY;
1952 +- }
1953 +
1954 +- ts.tv_sec = rq->perout.period.sec;
1955 +- ts.tv_nsec = rq->perout.period.nsec;
1956 +- ns = timespec64_to_ns(&ts);
1957 +- if (on)
1958 ++ pin_mode = MLX5E_PIN_MODE_OUT;
1959 ++ pattern = MLX5E_OUT_PATTERN_PERIODIC;
1960 ++ ts.tv_sec = rq->perout.period.sec;
1961 ++ ts.tv_nsec = rq->perout.period.nsec;
1962 ++ ns = timespec64_to_ns(&ts);
1963 ++
1964 + if ((ns >> 1) != 500000000LL)
1965 + return -EINVAL;
1966 +- ts.tv_sec = rq->perout.start.sec;
1967 +- ts.tv_nsec = rq->perout.start.nsec;
1968 +- ns = timespec64_to_ns(&ts);
1969 +- cycles_now = mlx5_read_internal_timer(tstamp->mdev);
1970 +- write_lock_irqsave(&tstamp->lock, flags);
1971 +- nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
1972 +- nsec_delta = ns - nsec_now;
1973 +- cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
1974 +- tstamp->cycles.mult);
1975 +- write_unlock_irqrestore(&tstamp->lock, flags);
1976 +- time_stamp = cycles_now + cycles_delta;
1977 ++
1978 ++ ts.tv_sec = rq->perout.start.sec;
1979 ++ ts.tv_nsec = rq->perout.start.nsec;
1980 ++ ns = timespec64_to_ns(&ts);
1981 ++ cycles_now = mlx5_read_internal_timer(tstamp->mdev);
1982 ++ write_lock_irqsave(&tstamp->lock, flags);
1983 ++ nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
1984 ++ nsec_delta = ns - nsec_now;
1985 ++ cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
1986 ++ tstamp->cycles.mult);
1987 ++ write_unlock_irqrestore(&tstamp->lock, flags);
1988 ++ time_stamp = cycles_now + cycles_delta;
1989 ++ field_select = MLX5E_MTPPS_FS_PIN_MODE |
1990 ++ MLX5E_MTPPS_FS_PATTERN |
1991 ++ MLX5E_MTPPS_FS_ENABLE |
1992 ++ MLX5E_MTPPS_FS_TIME_STAMP;
1993 ++ } else {
1994 ++ pin = rq->perout.index;
1995 ++ field_select = MLX5E_MTPPS_FS_ENABLE;
1996 ++ }
1997 ++
1998 + MLX5_SET(mtpps_reg, in, pin, pin);
1999 +- MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT);
2000 +- MLX5_SET(mtpps_reg, in, pattern, MLX5E_OUT_PATTERN_PERIODIC);
2001 ++ MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
2002 ++ MLX5_SET(mtpps_reg, in, pattern, pattern);
2003 + MLX5_SET(mtpps_reg, in, enable, on);
2004 + MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
2005 ++ MLX5_SET(mtpps_reg, in, field_select, field_select);
2006 ++
2007 ++ err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
2008 ++ if (err)
2009 ++ return err;
2010 +
2011 +- return mlx5_set_mtpps(priv->mdev, in, sizeof(in));
2012 ++ return mlx5_set_mtppse(priv->mdev, pin, 0,
2013 ++ MLX5E_EVENT_MODE_REPETETIVE & on);
2014 ++}
2015 ++
2016 ++static int mlx5e_pps_configure(struct ptp_clock_info *ptp,
2017 ++ struct ptp_clock_request *rq,
2018 ++ int on)
2019 ++{
2020 ++ struct mlx5e_tstamp *tstamp =
2021 ++ container_of(ptp, struct mlx5e_tstamp, ptp_info);
2022 ++
2023 ++ tstamp->pps_info.enabled = !!on;
2024 ++ return 0;
2025 + }
2026 +
2027 + static int mlx5e_ptp_enable(struct ptp_clock_info *ptp,
2028 +@@ -351,6 +416,8 @@ static int mlx5e_ptp_enable(struct ptp_clock_info *ptp,
2029 + return mlx5e_extts_configure(ptp, rq, on);
2030 + case PTP_CLK_REQ_PEROUT:
2031 + return mlx5e_perout_configure(ptp, rq, on);
2032 ++ case PTP_CLK_REQ_PPS:
2033 ++ return mlx5e_pps_configure(ptp, rq, on);
2034 + default:
2035 + return -EOPNOTSUPP;
2036 + }
2037 +@@ -396,6 +463,7 @@ static int mlx5e_init_pin_config(struct mlx5e_tstamp *tstamp)
2038 + return -ENOMEM;
2039 + tstamp->ptp_info.enable = mlx5e_ptp_enable;
2040 + tstamp->ptp_info.verify = mlx5e_ptp_verify;
2041 ++ tstamp->ptp_info.pps = 1;
2042 +
2043 + for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
2044 + snprintf(tstamp->ptp_info.pin_config[i].name,
2045 +@@ -423,22 +491,56 @@ static void mlx5e_get_pps_caps(struct mlx5e_priv *priv,
2046 + tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
2047 + cap_max_num_of_pps_out_pins);
2048 +
2049 +- tstamp->pps_pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
2050 +- tstamp->pps_pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
2051 +- tstamp->pps_pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
2052 +- tstamp->pps_pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
2053 +- tstamp->pps_pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
2054 +- tstamp->pps_pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
2055 +- tstamp->pps_pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
2056 +- tstamp->pps_pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
2057 ++ tstamp->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
2058 ++ tstamp->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
2059 ++ tstamp->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
2060 ++ tstamp->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
2061 ++ tstamp->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
2062 ++ tstamp->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
2063 ++ tstamp->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
2064 ++ tstamp->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
2065 + }
2066 +
2067 + void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
2068 + struct ptp_clock_event *event)
2069 + {
2070 ++ struct net_device *netdev = priv->netdev;
2071 + struct mlx5e_tstamp *tstamp = &priv->tstamp;
2072 ++ struct timespec64 ts;
2073 ++ u64 nsec_now, nsec_delta;
2074 ++ u64 cycles_now, cycles_delta;
2075 ++ int pin = event->index;
2076 ++ s64 ns;
2077 ++ unsigned long flags;
2078 +
2079 +- ptp_clock_event(tstamp->ptp, event);
2080 ++ switch (tstamp->ptp_info.pin_config[pin].func) {
2081 ++ case PTP_PF_EXTTS:
2082 ++ if (tstamp->pps_info.enabled) {
2083 ++ event->type = PTP_CLOCK_PPSUSR;
2084 ++ event->pps_times.ts_real = ns_to_timespec64(event->timestamp);
2085 ++ } else {
2086 ++ event->type = PTP_CLOCK_EXTTS;
2087 ++ }
2088 ++ ptp_clock_event(tstamp->ptp, event);
2089 ++ break;
2090 ++ case PTP_PF_PEROUT:
2091 ++ mlx5e_ptp_gettime(&tstamp->ptp_info, &ts);
2092 ++ cycles_now = mlx5_read_internal_timer(tstamp->mdev);
2093 ++ ts.tv_sec += 1;
2094 ++ ts.tv_nsec = 0;
2095 ++ ns = timespec64_to_ns(&ts);
2096 ++ write_lock_irqsave(&tstamp->lock, flags);
2097 ++ nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
2098 ++ nsec_delta = ns - nsec_now;
2099 ++ cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
2100 ++ tstamp->cycles.mult);
2101 ++ tstamp->pps_info.start[pin] = cycles_now + cycles_delta;
2102 ++ queue_work(priv->wq, &tstamp->pps_info.out_work);
2103 ++ write_unlock_irqrestore(&tstamp->lock, flags);
2104 ++ break;
2105 ++ default:
2106 ++ netdev_err(netdev, "%s: Unhandled event\n", __func__);
2107 ++ }
2108 + }
2109 +
2110 + void mlx5e_timestamp_init(struct mlx5e_priv *priv)
2111 +@@ -474,9 +576,10 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
2112 + do_div(ns, NSEC_PER_SEC / 2 / HZ);
2113 + tstamp->overflow_period = ns;
2114 +
2115 ++ INIT_WORK(&tstamp->pps_info.out_work, mlx5e_pps_out);
2116 + INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow);
2117 + if (tstamp->overflow_period)
2118 +- schedule_delayed_work(&tstamp->overflow_work, 0);
2119 ++ queue_delayed_work(priv->wq, &tstamp->overflow_work, 0);
2120 + else
2121 + mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n");
2122 +
2123 +@@ -485,16 +588,10 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
2124 + snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp");
2125 +
2126 + /* Initialize 1PPS data structures */
2127 +-#define MAX_PIN_NUM 8
2128 +- tstamp->pps_pin_caps = kzalloc(sizeof(u8) * MAX_PIN_NUM, GFP_KERNEL);
2129 +- if (tstamp->pps_pin_caps) {
2130 +- if (MLX5_CAP_GEN(priv->mdev, pps))
2131 +- mlx5e_get_pps_caps(priv, tstamp);
2132 +- if (tstamp->ptp_info.n_pins)
2133 +- mlx5e_init_pin_config(tstamp);
2134 +- } else {
2135 +- mlx5_core_warn(priv->mdev, "1PPS initialization failed\n");
2136 +- }
2137 ++ if (MLX5_PPS_CAP(priv->mdev))
2138 ++ mlx5e_get_pps_caps(priv, tstamp);
2139 ++ if (tstamp->ptp_info.n_pins)
2140 ++ mlx5e_init_pin_config(tstamp);
2141 +
2142 + tstamp->ptp = ptp_clock_register(&tstamp->ptp_info,
2143 + &priv->mdev->pdev->dev);
2144 +@@ -517,8 +614,7 @@ void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv)
2145 + priv->tstamp.ptp = NULL;
2146 + }
2147 +
2148 +- kfree(tstamp->pps_pin_caps);
2149 +- kfree(tstamp->ptp_info.pin_config);
2150 +-
2151 ++ cancel_work_sync(&tstamp->pps_info.out_work);
2152 + cancel_delayed_work_sync(&tstamp->overflow_work);
2153 ++ kfree(tstamp->ptp_info.pin_config);
2154 + }
2155 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
2156 +index 85bf4a389295..986387de13ee 100644
2157 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
2158 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
2159 +@@ -276,7 +276,7 @@ static void add_rule_to_list(struct mlx5e_priv *priv,
2160 +
2161 + static bool outer_header_zero(u32 *match_criteria)
2162 + {
2163 +- int size = MLX5_ST_SZ_BYTES(fte_match_param);
2164 ++ int size = MLX5_FLD_SZ_BYTES(fte_match_param, outer_headers);
2165 + char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
2166 + outer_headers);
2167 +
2168 +@@ -320,7 +320,7 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv,
2169 +
2170 + spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
2171 + flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
2172 +- rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, 1);
2173 ++ rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, dst ? 1 : 0);
2174 + if (IS_ERR(rule)) {
2175 + err = PTR_ERR(rule);
2176 + netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
2177 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2178 +index 7819fe9ede22..072aa8a13a0a 100644
2179 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2180 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2181 +@@ -365,7 +365,6 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
2182 + break;
2183 + case MLX5_DEV_EVENT_PPS:
2184 + eqe = (struct mlx5_eqe *)param;
2185 +- ptp_event.type = PTP_CLOCK_EXTTS;
2186 + ptp_event.index = eqe->data.pps.pin;
2187 + ptp_event.timestamp =
2188 + timecounter_cyc2time(&priv->tstamp.clock,
2189 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
2190 +index 33eae5ad2fb0..58a9f5c96d10 100644
2191 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
2192 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
2193 +@@ -690,7 +690,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
2194 + else
2195 + mlx5_core_dbg(dev, "port_module_event is not set\n");
2196 +
2197 +- if (MLX5_CAP_GEN(dev, pps))
2198 ++ if (MLX5_PPS_CAP(dev))
2199 + async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
2200 +
2201 + err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
2202 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
2203 +index cc1858752e70..6d90e9e3bfd1 100644
2204 +--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
2205 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
2206 +@@ -160,8 +160,6 @@ static int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core
2207 +
2208 + static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
2209 + {
2210 +- mlx5_fs_remove_rx_underlay_qpn(mdev, qp->qpn);
2211 +-
2212 + mlx5_core_destroy_qp(mdev, qp);
2213 + }
2214 +
2215 +@@ -176,8 +174,6 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv)
2216 + return err;
2217 + }
2218 +
2219 +- mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
2220 +-
2221 + err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]);
2222 + if (err) {
2223 + mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
2224 +@@ -235,6 +231,7 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
2225 +
2226 + static int mlx5i_init_rx(struct mlx5e_priv *priv)
2227 + {
2228 ++ struct mlx5i_priv *ipriv = priv->ppriv;
2229 + int err;
2230 +
2231 + err = mlx5e_create_indirect_rqt(priv);
2232 +@@ -253,12 +250,18 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
2233 + if (err)
2234 + goto err_destroy_indirect_tirs;
2235 +
2236 +- err = mlx5i_create_flow_steering(priv);
2237 ++ err = mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
2238 + if (err)
2239 + goto err_destroy_direct_tirs;
2240 +
2241 ++ err = mlx5i_create_flow_steering(priv);
2242 ++ if (err)
2243 ++ goto err_remove_rx_underlay_qpn;
2244 ++
2245 + return 0;
2246 +
2247 ++err_remove_rx_underlay_qpn:
2248 ++ mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
2249 + err_destroy_direct_tirs:
2250 + mlx5e_destroy_direct_tirs(priv);
2251 + err_destroy_indirect_tirs:
2252 +@@ -272,6 +275,9 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
2253 +
2254 + static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
2255 + {
2256 ++ struct mlx5i_priv *ipriv = priv->ppriv;
2257 ++
2258 ++ mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
2259 + mlx5i_destroy_flow_steering(priv);
2260 + mlx5e_destroy_direct_tirs(priv);
2261 + mlx5e_destroy_indirect_tirs(priv);
2262 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
2263 +index b5d5519542e8..0ca4623bda6b 100644
2264 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
2265 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
2266 +@@ -157,22 +157,17 @@ static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev)
2267 + static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
2268 + u8 *port1, u8 *port2)
2269 + {
2270 +- if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
2271 +- if (tracker->netdev_state[0].tx_enabled) {
2272 +- *port1 = 1;
2273 +- *port2 = 1;
2274 +- } else {
2275 +- *port1 = 2;
2276 +- *port2 = 2;
2277 +- }
2278 +- } else {
2279 +- *port1 = 1;
2280 +- *port2 = 2;
2281 +- if (!tracker->netdev_state[0].link_up)
2282 +- *port1 = 2;
2283 +- else if (!tracker->netdev_state[1].link_up)
2284 +- *port2 = 1;
2285 ++ *port1 = 1;
2286 ++ *port2 = 2;
2287 ++ if (!tracker->netdev_state[0].tx_enabled ||
2288 ++ !tracker->netdev_state[0].link_up) {
2289 ++ *port1 = 2;
2290 ++ return;
2291 + }
2292 ++
2293 ++ if (!tracker->netdev_state[1].tx_enabled ||
2294 ++ !tracker->netdev_state[1].link_up)
2295 ++ *port2 = 1;
2296 + }
2297 +
2298 + static void mlx5_activate_lag(struct mlx5_lag *ldev,
2299 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
2300 +index fbc6e9e9e305..1874aa96c1a1 100644
2301 +--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
2302 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
2303 +@@ -153,6 +153,11 @@ int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size);
2304 + int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode);
2305 + int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
2306 +
2307 ++#define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \
2308 ++ MLX5_CAP_GEN((mdev), pps_modify) && \
2309 ++ MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \
2310 ++ MLX5_CAP_MCAM_FEATURE((mdev), mtpps_enh_out_per_adj))
2311 ++
2312 + void mlx5e_init(void);
2313 + void mlx5e_cleanup(void);
2314 +
2315 +diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
2316 +index 6f6ed75b63c9..765de3bedb88 100644
2317 +--- a/drivers/net/irda/mcs7780.c
2318 ++++ b/drivers/net/irda/mcs7780.c
2319 +@@ -141,9 +141,19 @@ static int mcs_set_reg(struct mcs_cb *mcs, __u16 reg, __u16 val)
2320 + static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val)
2321 + {
2322 + struct usb_device *dev = mcs->usbdev;
2323 +- int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
2324 +- MCS_RD_RTYPE, 0, reg, val, 2,
2325 +- msecs_to_jiffies(MCS_CTRL_TIMEOUT));
2326 ++ void *dmabuf;
2327 ++ int ret;
2328 ++
2329 ++ dmabuf = kmalloc(sizeof(__u16), GFP_KERNEL);
2330 ++ if (!dmabuf)
2331 ++ return -ENOMEM;
2332 ++
2333 ++ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
2334 ++ MCS_RD_RTYPE, 0, reg, dmabuf, 2,
2335 ++ msecs_to_jiffies(MCS_CTRL_TIMEOUT));
2336 ++
2337 ++ memcpy(val, dmabuf, sizeof(__u16));
2338 ++ kfree(dmabuf);
2339 +
2340 + return ret;
2341 + }
2342 +diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
2343 +index eebb0e1c70ff..b30d9ceee8bc 100644
2344 +--- a/drivers/net/phy/phy.c
2345 ++++ b/drivers/net/phy/phy.c
2346 +@@ -749,6 +749,9 @@ void phy_stop_machine(struct phy_device *phydev)
2347 + if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
2348 + phydev->state = PHY_UP;
2349 + mutex_unlock(&phydev->lock);
2350 ++
2351 ++ /* Now we can run the state machine synchronously */
2352 ++ phy_state_machine(&phydev->state_queue.work);
2353 + }
2354 +
2355 + /**
2356 +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
2357 +index 6633dd4bb649..acb754eb1ccb 100644
2358 +--- a/drivers/net/virtio_net.c
2359 ++++ b/drivers/net/virtio_net.c
2360 +@@ -889,21 +889,20 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
2361 +
2362 + buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
2363 + buf += headroom; /* advance address leaving hole at front of pkt */
2364 +- ctx = (void *)(unsigned long)len;
2365 + get_page(alloc_frag->page);
2366 + alloc_frag->offset += len + headroom;
2367 + hole = alloc_frag->size - alloc_frag->offset;
2368 + if (hole < len + headroom) {
2369 + /* To avoid internal fragmentation, if there is very likely not
2370 + * enough space for another buffer, add the remaining space to
2371 +- * the current buffer. This extra space is not included in
2372 +- * the truesize stored in ctx.
2373 ++ * the current buffer.
2374 + */
2375 + len += hole;
2376 + alloc_frag->offset += hole;
2377 + }
2378 +
2379 + sg_init_one(rq->sg, buf, len);
2380 ++ ctx = (void *)(unsigned long)len;
2381 + err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
2382 + if (err < 0)
2383 + put_page(virt_to_head_page(buf));
2384 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
2385 +index 5653d6dd38f6..d44f59ef4f72 100644
2386 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
2387 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
2388 +@@ -4168,11 +4168,6 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
2389 + goto fail;
2390 + }
2391 +
2392 +- /* allocate scatter-gather table. sg support
2393 +- * will be disabled upon allocation failure.
2394 +- */
2395 +- brcmf_sdiod_sgtable_alloc(bus->sdiodev);
2396 +-
2397 + /* Query the F2 block size, set roundup accordingly */
2398 + bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
2399 + bus->roundup = min(max_roundup, bus->blocksize);
2400 +diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
2401 +index 4b97371c3b42..838946d17b59 100644
2402 +--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
2403 ++++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
2404 +@@ -1190,11 +1190,11 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
2405 + next_reclaimed;
2406 + IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
2407 + next_reclaimed);
2408 ++ iwlagn_check_ratid_empty(priv, sta_id, tid);
2409 + }
2410 +
2411 + iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
2412 +
2413 +- iwlagn_check_ratid_empty(priv, sta_id, tid);
2414 + freed = 0;
2415 +
2416 + /* process frames */
2417 +diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
2418 +index 3c52867dfe28..d145e0d90227 100644
2419 +--- a/drivers/scsi/Kconfig
2420 ++++ b/drivers/scsi/Kconfig
2421 +@@ -1241,6 +1241,8 @@ config SCSI_LPFC
2422 + tristate "Emulex LightPulse Fibre Channel Support"
2423 + depends on PCI && SCSI
2424 + depends on SCSI_FC_ATTRS
2425 ++ depends on NVME_TARGET_FC || NVME_TARGET_FC=n
2426 ++ depends on NVME_FC || NVME_FC=n
2427 + select CRC_T10DIF
2428 + ---help---
2429 + This lpfc driver supports the Emulex LightPulse
2430 +diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
2431 +index beb5f098f32d..05804227234d 100644
2432 +--- a/drivers/target/target_core_user.c
2433 ++++ b/drivers/target/target_core_user.c
2434 +@@ -437,7 +437,7 @@ static int scatter_data_area(struct tcmu_dev *udev,
2435 + to_offset = get_block_offset_user(udev, dbi,
2436 + block_remaining);
2437 + offset = DATA_BLOCK_SIZE - block_remaining;
2438 +- to = (void *)(unsigned long)to + offset;
2439 ++ to += offset;
2440 +
2441 + if (*iov_cnt != 0 &&
2442 + to_offset == iov_tail(udev, *iov)) {
2443 +@@ -510,7 +510,7 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
2444 + copy_bytes = min_t(size_t, sg_remaining,
2445 + block_remaining);
2446 + offset = DATA_BLOCK_SIZE - block_remaining;
2447 +- from = (void *)(unsigned long)from + offset;
2448 ++ from += offset;
2449 + tcmu_flush_dcache_range(from, copy_bytes);
2450 + memcpy(to + sg->length - sg_remaining, from,
2451 + copy_bytes);
2452 +@@ -699,25 +699,24 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
2453 + size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
2454 +
2455 + entry = (void *) mb + CMDR_OFF + cmd_head;
2456 +- tcmu_flush_dcache_range(entry, sizeof(*entry));
2457 + tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD);
2458 + tcmu_hdr_set_len(&entry->hdr.len_op, pad_size);
2459 + entry->hdr.cmd_id = 0; /* not used for PAD */
2460 + entry->hdr.kflags = 0;
2461 + entry->hdr.uflags = 0;
2462 ++ tcmu_flush_dcache_range(entry, sizeof(*entry));
2463 +
2464 + UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
2465 ++ tcmu_flush_dcache_range(mb, sizeof(*mb));
2466 +
2467 + cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
2468 + WARN_ON(cmd_head != 0);
2469 + }
2470 +
2471 + entry = (void *) mb + CMDR_OFF + cmd_head;
2472 +- tcmu_flush_dcache_range(entry, sizeof(*entry));
2473 ++ memset(entry, 0, command_size);
2474 + tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
2475 + entry->hdr.cmd_id = tcmu_cmd->cmd_id;
2476 +- entry->hdr.kflags = 0;
2477 +- entry->hdr.uflags = 0;
2478 +
2479 + /* Handle allocating space from the data area */
2480 + tcmu_cmd_reset_dbi_cur(tcmu_cmd);
2481 +@@ -736,11 +735,10 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
2482 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2483 + }
2484 + entry->req.iov_cnt = iov_cnt;
2485 +- entry->req.iov_dif_cnt = 0;
2486 +
2487 + /* Handle BIDI commands */
2488 ++ iov_cnt = 0;
2489 + if (se_cmd->se_cmd_flags & SCF_BIDI) {
2490 +- iov_cnt = 0;
2491 + iov++;
2492 + ret = scatter_data_area(udev, tcmu_cmd,
2493 + se_cmd->t_bidi_data_sg,
2494 +@@ -753,8 +751,8 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
2495 + pr_err("tcmu: alloc and scatter bidi data failed\n");
2496 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2497 + }
2498 +- entry->req.iov_bidi_cnt = iov_cnt;
2499 + }
2500 ++ entry->req.iov_bidi_cnt = iov_cnt;
2501 +
2502 + /*
2503 + * Recalaulate the command's base size and size according
2504 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
2505 +index 33d979e9ea2a..83eecd33ad96 100644
2506 +--- a/fs/btrfs/extent-tree.c
2507 ++++ b/fs/btrfs/extent-tree.c
2508 +@@ -4776,10 +4776,6 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
2509 + else
2510 + flush = BTRFS_RESERVE_NO_FLUSH;
2511 + spin_lock(&space_info->lock);
2512 +- if (can_overcommit(root, space_info, orig, flush)) {
2513 +- spin_unlock(&space_info->lock);
2514 +- break;
2515 +- }
2516 + if (list_empty(&space_info->tickets) &&
2517 + list_empty(&space_info->priority_tickets)) {
2518 + spin_unlock(&space_info->lock);
2519 +diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
2520 +index 3ec0e46de95f..22a8d532cca6 100644
2521 +--- a/fs/ext4/acl.c
2522 ++++ b/fs/ext4/acl.c
2523 +@@ -193,13 +193,6 @@ __ext4_set_acl(handle_t *handle, struct inode *inode, int type,
2524 + switch (type) {
2525 + case ACL_TYPE_ACCESS:
2526 + name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS;
2527 +- if (acl) {
2528 +- error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
2529 +- if (error)
2530 +- return error;
2531 +- inode->i_ctime = current_time(inode);
2532 +- ext4_mark_inode_dirty(handle, inode);
2533 +- }
2534 + break;
2535 +
2536 + case ACL_TYPE_DEFAULT:
2537 +@@ -221,8 +214,9 @@ __ext4_set_acl(handle_t *handle, struct inode *inode, int type,
2538 + value, size, 0);
2539 +
2540 + kfree(value);
2541 +- if (!error)
2542 ++ if (!error) {
2543 + set_cached_acl(inode, type, acl);
2544 ++ }
2545 +
2546 + return error;
2547 + }
2548 +@@ -232,6 +226,8 @@ ext4_set_acl(struct inode *inode, struct posix_acl *acl, int type)
2549 + {
2550 + handle_t *handle;
2551 + int error, retries = 0;
2552 ++ umode_t mode = inode->i_mode;
2553 ++ int update_mode = 0;
2554 +
2555 + error = dquot_initialize(inode);
2556 + if (error)
2557 +@@ -242,7 +238,20 @@ ext4_set_acl(struct inode *inode, struct posix_acl *acl, int type)
2558 + if (IS_ERR(handle))
2559 + return PTR_ERR(handle);
2560 +
2561 ++ if ((type == ACL_TYPE_ACCESS) && acl) {
2562 ++ error = posix_acl_update_mode(inode, &mode, &acl);
2563 ++ if (error)
2564 ++ goto out_stop;
2565 ++ update_mode = 1;
2566 ++ }
2567 ++
2568 + error = __ext4_set_acl(handle, inode, type, acl);
2569 ++ if (!error && update_mode) {
2570 ++ inode->i_mode = mode;
2571 ++ inode->i_ctime = current_time(inode);
2572 ++ ext4_mark_inode_dirty(handle, inode);
2573 ++ }
2574 ++out_stop:
2575 + ext4_journal_stop(handle);
2576 + if (error == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
2577 + goto retry;
2578 +diff --git a/fs/ext4/file.c b/fs/ext4/file.c
2579 +index 02ce7e7bbdf5..407fc5aa32a7 100644
2580 +--- a/fs/ext4/file.c
2581 ++++ b/fs/ext4/file.c
2582 +@@ -521,6 +521,8 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
2583 + lastoff = page_offset(page);
2584 + bh = head = page_buffers(page);
2585 + do {
2586 ++ if (lastoff + bh->b_size <= startoff)
2587 ++ goto next;
2588 + if (buffer_uptodate(bh) ||
2589 + buffer_unwritten(bh)) {
2590 + if (whence == SEEK_DATA)
2591 +@@ -535,6 +537,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
2592 + unlock_page(page);
2593 + goto out;
2594 + }
2595 ++next:
2596 + lastoff += bh->b_size;
2597 + bh = bh->b_this_page;
2598 + } while (bh != head);
2599 +diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
2600 +index c3ed9021b781..035cd3f4785e 100644
2601 +--- a/fs/ext4/resize.c
2602 ++++ b/fs/ext4/resize.c
2603 +@@ -1927,7 +1927,8 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
2604 + n_desc_blocks = o_desc_blocks +
2605 + le16_to_cpu(es->s_reserved_gdt_blocks);
2606 + n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
2607 +- n_blocks_count = n_group * EXT4_BLOCKS_PER_GROUP(sb);
2608 ++ n_blocks_count = (ext4_fsblk_t)n_group *
2609 ++ EXT4_BLOCKS_PER_GROUP(sb);
2610 + n_group--; /* set to last group number */
2611 + }
2612 +
2613 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
2614 +index f5a7faac39a7..074169a54162 100644
2615 +--- a/fs/nfs/nfs4proc.c
2616 ++++ b/fs/nfs/nfs4proc.c
2617 +@@ -7407,7 +7407,7 @@ static void nfs4_exchange_id_done(struct rpc_task *task, void *data)
2618 + cdata->res.server_scope = NULL;
2619 + }
2620 + /* Save the EXCHANGE_ID verifier session trunk tests */
2621 +- memcpy(clp->cl_confirm.data, cdata->args.verifier->data,
2622 ++ memcpy(clp->cl_confirm.data, cdata->args.verifier.data,
2623 + sizeof(clp->cl_confirm.data));
2624 + }
2625 + out:
2626 +@@ -7444,7 +7444,6 @@ static const struct rpc_call_ops nfs4_exchange_id_call_ops = {
2627 + static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
2628 + u32 sp4_how, struct rpc_xprt *xprt)
2629 + {
2630 +- nfs4_verifier verifier;
2631 + struct rpc_message msg = {
2632 + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
2633 + .rpc_cred = cred,
2634 +@@ -7468,8 +7467,7 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
2635 + return -ENOMEM;
2636 + }
2637 +
2638 +- if (!xprt)
2639 +- nfs4_init_boot_verifier(clp, &verifier);
2640 ++ nfs4_init_boot_verifier(clp, &calldata->args.verifier);
2641 +
2642 + status = nfs4_init_uniform_client_string(clp);
2643 + if (status)
2644 +@@ -7510,9 +7508,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
2645 + task_setup_data.rpc_xprt = xprt;
2646 + task_setup_data.flags =
2647 + RPC_TASK_SOFT|RPC_TASK_SOFTCONN|RPC_TASK_ASYNC;
2648 +- calldata->args.verifier = &clp->cl_confirm;
2649 +- } else {
2650 +- calldata->args.verifier = &verifier;
2651 ++ memcpy(calldata->args.verifier.data, clp->cl_confirm.data,
2652 ++ sizeof(calldata->args.verifier.data));
2653 + }
2654 + calldata->args.client = clp;
2655 + #ifdef CONFIG_NFS_V4_1_MIGRATION
2656 +diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
2657 +index 3aebfdc82b30..b0cbee2b2422 100644
2658 +--- a/fs/nfs/nfs4xdr.c
2659 ++++ b/fs/nfs/nfs4xdr.c
2660 +@@ -1765,7 +1765,7 @@ static void encode_exchange_id(struct xdr_stream *xdr,
2661 + int len = 0;
2662 +
2663 + encode_op_hdr(xdr, OP_EXCHANGE_ID, decode_exchange_id_maxsz, hdr);
2664 +- encode_nfs4_verifier(xdr, args->verifier);
2665 ++ encode_nfs4_verifier(xdr, &args->verifier);
2666 +
2667 + encode_string(xdr, strlen(args->client->cl_owner_id),
2668 + args->client->cl_owner_id);
2669 +diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
2670 +index dc22ba8c710f..e50a387959bf 100644
2671 +--- a/fs/ocfs2/acl.c
2672 ++++ b/fs/ocfs2/acl.c
2673 +@@ -240,18 +240,6 @@ int ocfs2_set_acl(handle_t *handle,
2674 + switch (type) {
2675 + case ACL_TYPE_ACCESS:
2676 + name_index = OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS;
2677 +- if (acl) {
2678 +- umode_t mode;
2679 +-
2680 +- ret = posix_acl_update_mode(inode, &mode, &acl);
2681 +- if (ret)
2682 +- return ret;
2683 +-
2684 +- ret = ocfs2_acl_set_mode(inode, di_bh,
2685 +- handle, mode);
2686 +- if (ret)
2687 +- return ret;
2688 +- }
2689 + break;
2690 + case ACL_TYPE_DEFAULT:
2691 + name_index = OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT;
2692 +@@ -289,7 +277,19 @@ int ocfs2_iop_set_acl(struct inode *inode, struct posix_acl *acl, int type)
2693 + had_lock = ocfs2_inode_lock_tracker(inode, &bh, 1, &oh);
2694 + if (had_lock < 0)
2695 + return had_lock;
2696 ++ if (type == ACL_TYPE_ACCESS && acl) {
2697 ++ umode_t mode;
2698 ++
2699 ++ status = posix_acl_update_mode(inode, &mode, &acl);
2700 ++ if (status)
2701 ++ goto unlock;
2702 ++
2703 ++ status = ocfs2_acl_set_mode(inode, bh, NULL, mode);
2704 ++ if (status)
2705 ++ goto unlock;
2706 ++ }
2707 + status = ocfs2_set_acl(NULL, inode, bh, type, acl, NULL, NULL);
2708 ++unlock:
2709 + ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
2710 + brelse(bh);
2711 + return status;
2712 +diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
2713 +index 1d622f276e3a..26f9591b04b1 100644
2714 +--- a/fs/userfaultfd.c
2715 ++++ b/fs/userfaultfd.c
2716 +@@ -851,6 +851,9 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
2717 + __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, &range);
2718 + spin_unlock(&ctx->fault_pending_wqh.lock);
2719 +
2720 ++ /* Flush pending events that may still wait on event_wqh */
2721 ++ wake_up_all(&ctx->event_wqh);
2722 ++
2723 + wake_up_poll(&ctx->fd_wqh, POLLHUP);
2724 + userfaultfd_ctx_put(ctx);
2725 + return 0;
2726 +@@ -1645,6 +1648,8 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
2727 + ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start,
2728 + uffdio_zeropage.range.len);
2729 + mmput(ctx->mm);
2730 ++ } else {
2731 ++ return -ENOSPC;
2732 + }
2733 + if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
2734 + return -EFAULT;
2735 +diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
2736 +index 0f2a80377520..30b86efea2bc 100644
2737 +--- a/include/linux/cpuhotplug.h
2738 ++++ b/include/linux/cpuhotplug.h
2739 +@@ -58,7 +58,6 @@ enum cpuhp_state {
2740 + CPUHP_XEN_EVTCHN_PREPARE,
2741 + CPUHP_ARM_SHMOBILE_SCU_PREPARE,
2742 + CPUHP_SH_SH3X_PREPARE,
2743 +- CPUHP_BLK_MQ_PREPARE,
2744 + CPUHP_NET_FLOW_PREPARE,
2745 + CPUHP_TOPOLOGY_PREPARE,
2746 + CPUHP_NET_IUCV_PREPARE,
2747 +diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
2748 +index 119a3f9604b0..898cfe2eeb42 100644
2749 +--- a/include/linux/cpuset.h
2750 ++++ b/include/linux/cpuset.h
2751 +@@ -18,6 +18,19 @@
2752 +
2753 + #ifdef CONFIG_CPUSETS
2754 +
2755 ++/*
2756 ++ * Static branch rewrites can happen in an arbitrary order for a given
2757 ++ * key. In code paths where we need to loop with read_mems_allowed_begin() and
2758 ++ * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
2759 ++ * to ensure that begin() always gets rewritten before retry() in the
2760 ++ * disabled -> enabled transition. If not, then if local irqs are disabled
2761 ++ * around the loop, we can deadlock since retry() would always be
2762 ++ * comparing the latest value of the mems_allowed seqcount against 0 as
2763 ++ * begin() still would see cpusets_enabled() as false. The enabled -> disabled
2764 ++ * transition should happen in reverse order for the same reasons (want to stop
2765 ++ * looking at real value of mems_allowed.sequence in retry() first).
2766 ++ */
2767 ++extern struct static_key_false cpusets_pre_enable_key;
2768 + extern struct static_key_false cpusets_enabled_key;
2769 + static inline bool cpusets_enabled(void)
2770 + {
2771 +@@ -32,12 +45,14 @@ static inline int nr_cpusets(void)
2772 +
2773 + static inline void cpuset_inc(void)
2774 + {
2775 ++ static_branch_inc(&cpusets_pre_enable_key);
2776 + static_branch_inc(&cpusets_enabled_key);
2777 + }
2778 +
2779 + static inline void cpuset_dec(void)
2780 + {
2781 + static_branch_dec(&cpusets_enabled_key);
2782 ++ static_branch_dec(&cpusets_pre_enable_key);
2783 + }
2784 +
2785 + extern int cpuset_init(void);
2786 +@@ -115,7 +130,7 @@ extern void cpuset_print_current_mems_allowed(void);
2787 + */
2788 + static inline unsigned int read_mems_allowed_begin(void)
2789 + {
2790 +- if (!cpusets_enabled())
2791 ++ if (!static_branch_unlikely(&cpusets_pre_enable_key))
2792 + return 0;
2793 +
2794 + return read_seqcount_begin(&current->mems_allowed_seq);
2795 +@@ -129,7 +144,7 @@ static inline unsigned int read_mems_allowed_begin(void)
2796 + */
2797 + static inline bool read_mems_allowed_retry(unsigned int seq)
2798 + {
2799 +- if (!cpusets_enabled())
2800 ++ if (!static_branch_unlikely(&cpusets_enabled_key))
2801 + return false;
2802 +
2803 + return read_seqcount_retry(&current->mems_allowed_seq, seq);
2804 +diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
2805 +index edafedb7b509..e21a0b3d6454 100644
2806 +--- a/include/linux/mlx5/mlx5_ifc.h
2807 ++++ b/include/linux/mlx5/mlx5_ifc.h
2808 +@@ -7718,8 +7718,10 @@ struct mlx5_ifc_pcam_reg_bits {
2809 + };
2810 +
2811 + struct mlx5_ifc_mcam_enhanced_features_bits {
2812 +- u8 reserved_at_0[0x7f];
2813 ++ u8 reserved_at_0[0x7d];
2814 +
2815 ++ u8 mtpps_enh_out_per_adj[0x1];
2816 ++ u8 mtpps_fs[0x1];
2817 + u8 pcie_performance_group[0x1];
2818 + };
2819 +
2820 +@@ -8115,7 +8117,8 @@ struct mlx5_ifc_mtpps_reg_bits {
2821 + u8 reserved_at_78[0x4];
2822 + u8 cap_pin_4_mode[0x4];
2823 +
2824 +- u8 reserved_at_80[0x80];
2825 ++ u8 field_select[0x20];
2826 ++ u8 reserved_at_a0[0x60];
2827 +
2828 + u8 enable[0x1];
2829 + u8 reserved_at_101[0xb];
2830 +@@ -8130,8 +8133,9 @@ struct mlx5_ifc_mtpps_reg_bits {
2831 +
2832 + u8 out_pulse_duration[0x10];
2833 + u8 out_periodic_adjustment[0x10];
2834 ++ u8 enhanced_out_periodic_adjustment[0x20];
2835 +
2836 +- u8 reserved_at_1a0[0x60];
2837 ++ u8 reserved_at_1c0[0x20];
2838 + };
2839 +
2840 + struct mlx5_ifc_mtppse_reg_bits {
2841 +diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
2842 +index 45cdb27791a3..ab8f7e11c160 100644
2843 +--- a/include/linux/mm_types.h
2844 ++++ b/include/linux/mm_types.h
2845 +@@ -494,6 +494,10 @@ struct mm_struct {
2846 + * PROT_NONE or PROT_NUMA mapped page.
2847 + */
2848 + bool tlb_flush_pending;
2849 ++#endif
2850 ++#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
2851 ++ /* See flush_tlb_batched_pending() */
2852 ++ bool tlb_flush_batched;
2853 + #endif
2854 + struct uprobes_state uprobes_state;
2855 + #ifdef CONFIG_HUGETLB_PAGE
2856 +diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
2857 +index b28c83475ee8..7882a07d973e 100644
2858 +--- a/include/linux/nfs_xdr.h
2859 ++++ b/include/linux/nfs_xdr.h
2860 +@@ -1222,7 +1222,7 @@ struct nfs41_state_protection {
2861 +
2862 + struct nfs41_exchange_id_args {
2863 + struct nfs_client *client;
2864 +- nfs4_verifier *verifier;
2865 ++ nfs4_verifier verifier;
2866 + u32 flags;
2867 + struct nfs41_state_protection state_protect;
2868 + };
2869 +diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
2870 +index c102ef65cb64..db6dc9dc0482 100644
2871 +--- a/include/linux/workqueue.h
2872 ++++ b/include/linux/workqueue.h
2873 +@@ -323,6 +323,7 @@ enum {
2874 +
2875 + __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
2876 + __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
2877 ++ __WQ_ORDERED_EXPLICIT = 1 << 18, /* internal: alloc_ordered_workqueue() */
2878 + __WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */
2879 +
2880 + WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
2881 +@@ -422,7 +423,8 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
2882 + * Pointer to the allocated workqueue on success, %NULL on failure.
2883 + */
2884 + #define alloc_ordered_workqueue(fmt, flags, args...) \
2885 +- alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
2886 ++ alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \
2887 ++ __WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
2888 +
2889 + #define create_workqueue(name) \
2890 + alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
2891 +diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
2892 +index 069582ee5d7f..06db0c3ec384 100644
2893 +--- a/include/net/sctp/sctp.h
2894 ++++ b/include/net/sctp/sctp.h
2895 +@@ -469,6 +469,8 @@ _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member)
2896 +
2897 + #define _sctp_walk_params(pos, chunk, end, member)\
2898 + for (pos.v = chunk->member;\
2899 ++ (pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) <=\
2900 ++ (void *)chunk + end) &&\
2901 + pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
2902 + ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\
2903 + pos.v += SCTP_PAD4(ntohs(pos.p->length)))
2904 +@@ -479,6 +481,8 @@ _sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length))
2905 + #define _sctp_walk_errors(err, chunk_hdr, end)\
2906 + for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
2907 + sizeof(sctp_chunkhdr_t));\
2908 ++ ((void *)err + offsetof(sctp_errhdr_t, length) + sizeof(err->length) <=\
2909 ++ (void *)chunk_hdr + end) &&\
2910 + (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
2911 + ntohs(err->length) >= sizeof(sctp_errhdr_t); \
2912 + err = (sctp_errhdr_t *)((void *)err + SCTP_PAD4(ntohs(err->length))))
2913 +diff --git a/include/net/udp.h b/include/net/udp.h
2914 +index 3391dbd73959..1933442cf1a6 100644
2915 +--- a/include/net/udp.h
2916 ++++ b/include/net/udp.h
2917 +@@ -265,6 +265,7 @@ static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
2918 + }
2919 +
2920 + void udp_v4_early_demux(struct sk_buff *skb);
2921 ++void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
2922 + int udp_get_port(struct sock *sk, unsigned short snum,
2923 + int (*saddr_cmp)(const struct sock *,
2924 + const struct sock *));
2925 +diff --git a/include/sound/soc.h b/include/sound/soc.h
2926 +index 5170fd81e1fd..375893d8d4a5 100644
2927 +--- a/include/sound/soc.h
2928 ++++ b/include/sound/soc.h
2929 +@@ -795,10 +795,6 @@ struct snd_soc_component_driver {
2930 + int (*suspend)(struct snd_soc_component *);
2931 + int (*resume)(struct snd_soc_component *);
2932 +
2933 +- /* pcm creation and destruction */
2934 +- int (*pcm_new)(struct snd_soc_pcm_runtime *);
2935 +- void (*pcm_free)(struct snd_pcm *);
2936 +-
2937 + /* DT */
2938 + int (*of_xlate_dai_name)(struct snd_soc_component *component,
2939 + struct of_phandle_args *args,
2940 +@@ -872,8 +868,6 @@ struct snd_soc_component {
2941 + void (*remove)(struct snd_soc_component *);
2942 + int (*suspend)(struct snd_soc_component *);
2943 + int (*resume)(struct snd_soc_component *);
2944 +- int (*pcm_new)(struct snd_soc_pcm_runtime *);
2945 +- void (*pcm_free)(struct snd_pcm *);
2946 +
2947 + /* machine specific init */
2948 + int (*init)(struct snd_soc_component *component);
2949 +diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h
2950 +index 00f4d6bf048f..7a01568e5e22 100644
2951 +--- a/kernel/cgroup/cgroup-internal.h
2952 ++++ b/kernel/cgroup/cgroup-internal.h
2953 +@@ -33,6 +33,9 @@ struct cgroup_taskset {
2954 + struct list_head src_csets;
2955 + struct list_head dst_csets;
2956 +
2957 ++ /* the number of tasks in the set */
2958 ++ int nr_tasks;
2959 ++
2960 + /* the subsys currently being processed */
2961 + int ssid;
2962 +
2963 +diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
2964 +index 8d4e85eae42c..2c62e4b3f198 100644
2965 +--- a/kernel/cgroup/cgroup.c
2966 ++++ b/kernel/cgroup/cgroup.c
2967 +@@ -1948,6 +1948,8 @@ static void cgroup_migrate_add_task(struct task_struct *task,
2968 + if (!cset->mg_src_cgrp)
2969 + return;
2970 +
2971 ++ mgctx->tset.nr_tasks++;
2972 ++
2973 + list_move_tail(&task->cg_list, &cset->mg_tasks);
2974 + if (list_empty(&cset->mg_node))
2975 + list_add_tail(&cset->mg_node,
2976 +@@ -2036,21 +2038,19 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx)
2977 + struct css_set *cset, *tmp_cset;
2978 + int ssid, failed_ssid, ret;
2979 +
2980 +- /* methods shouldn't be called if no task is actually migrating */
2981 +- if (list_empty(&tset->src_csets))
2982 +- return 0;
2983 +-
2984 + /* check that we can legitimately attach to the cgroup */
2985 +- do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
2986 +- if (ss->can_attach) {
2987 +- tset->ssid = ssid;
2988 +- ret = ss->can_attach(tset);
2989 +- if (ret) {
2990 +- failed_ssid = ssid;
2991 +- goto out_cancel_attach;
2992 ++ if (tset->nr_tasks) {
2993 ++ do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
2994 ++ if (ss->can_attach) {
2995 ++ tset->ssid = ssid;
2996 ++ ret = ss->can_attach(tset);
2997 ++ if (ret) {
2998 ++ failed_ssid = ssid;
2999 ++ goto out_cancel_attach;
3000 ++ }
3001 + }
3002 +- }
3003 +- } while_each_subsys_mask();
3004 ++ } while_each_subsys_mask();
3005 ++ }
3006 +
3007 + /*
3008 + * Now that we're guaranteed success, proceed to move all tasks to
3009 +@@ -2077,25 +2077,29 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx)
3010 + */
3011 + tset->csets = &tset->dst_csets;
3012 +
3013 +- do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
3014 +- if (ss->attach) {
3015 +- tset->ssid = ssid;
3016 +- ss->attach(tset);
3017 +- }
3018 +- } while_each_subsys_mask();
3019 ++ if (tset->nr_tasks) {
3020 ++ do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
3021 ++ if (ss->attach) {
3022 ++ tset->ssid = ssid;
3023 ++ ss->attach(tset);
3024 ++ }
3025 ++ } while_each_subsys_mask();
3026 ++ }
3027 +
3028 + ret = 0;
3029 + goto out_release_tset;
3030 +
3031 + out_cancel_attach:
3032 +- do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
3033 +- if (ssid == failed_ssid)
3034 +- break;
3035 +- if (ss->cancel_attach) {
3036 +- tset->ssid = ssid;
3037 +- ss->cancel_attach(tset);
3038 +- }
3039 +- } while_each_subsys_mask();
3040 ++ if (tset->nr_tasks) {
3041 ++ do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
3042 ++ if (ssid == failed_ssid)
3043 ++ break;
3044 ++ if (ss->cancel_attach) {
3045 ++ tset->ssid = ssid;
3046 ++ ss->cancel_attach(tset);
3047 ++ }
3048 ++ } while_each_subsys_mask();
3049 ++ }
3050 + out_release_tset:
3051 + spin_lock_irq(&css_set_lock);
3052 + list_splice_init(&tset->dst_csets, &tset->src_csets);
3053 +@@ -2917,11 +2921,11 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
3054 + cgrp->subtree_control &= ~disable;
3055 +
3056 + ret = cgroup_apply_control(cgrp);
3057 +-
3058 + cgroup_finalize_control(cgrp, ret);
3059 ++ if (ret)
3060 ++ goto out_unlock;
3061 +
3062 + kernfs_activate(cgrp->kn);
3063 +- ret = 0;
3064 + out_unlock:
3065 + cgroup_kn_unlock(of->kn);
3066 + return ret ?: nbytes;
3067 +@@ -4574,6 +4578,10 @@ int __init cgroup_init(void)
3068 +
3069 + if (ss->bind)
3070 + ss->bind(init_css_set.subsys[ssid]);
3071 ++
3072 ++ mutex_lock(&cgroup_mutex);
3073 ++ css_populate_dir(init_css_set.subsys[ssid]);
3074 ++ mutex_unlock(&cgroup_mutex);
3075 + }
3076 +
3077 + /* init_css_set.subsys[] has been updated, re-hash */
3078 +diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
3079 +index ae643412948a..8f26927f16a1 100644
3080 +--- a/kernel/cgroup/cpuset.c
3081 ++++ b/kernel/cgroup/cpuset.c
3082 +@@ -63,6 +63,7 @@
3083 + #include <linux/cgroup.h>
3084 + #include <linux/wait.h>
3085 +
3086 ++DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
3087 + DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
3088 +
3089 + /* See "Frequency meter" comments, below. */
3090 +diff --git a/kernel/time/timer.c b/kernel/time/timer.c
3091 +index 152a706ef8b8..d3f33020a06b 100644
3092 +--- a/kernel/time/timer.c
3093 ++++ b/kernel/time/timer.c
3094 +@@ -1495,7 +1495,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
3095 + base->is_idle = false;
3096 + } else {
3097 + if (!is_max_delta)
3098 +- expires = basem + (nextevt - basej) * TICK_NSEC;
3099 ++ expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
3100 + /*
3101 + * If we expect to sleep more than a tick, mark the base idle:
3102 + */
3103 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
3104 +index c74bf39ef764..6effbcb7a3d6 100644
3105 +--- a/kernel/workqueue.c
3106 ++++ b/kernel/workqueue.c
3107 +@@ -3744,8 +3744,12 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
3108 + return -EINVAL;
3109 +
3110 + /* creating multiple pwqs breaks ordering guarantee */
3111 +- if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
3112 +- return -EINVAL;
3113 ++ if (!list_empty(&wq->pwqs)) {
3114 ++ if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
3115 ++ return -EINVAL;
3116 ++
3117 ++ wq->flags &= ~__WQ_ORDERED;
3118 ++ }
3119 +
3120 + ctx = apply_wqattrs_prepare(wq, attrs);
3121 + if (!ctx)
3122 +@@ -3929,6 +3933,16 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3123 + struct workqueue_struct *wq;
3124 + struct pool_workqueue *pwq;
3125 +
3126 ++ /*
3127 ++ * Unbound && max_active == 1 used to imply ordered, which is no
3128 ++ * longer the case on NUMA machines due to per-node pools. While
3129 ++ * alloc_ordered_workqueue() is the right way to create an ordered
3130 ++ * workqueue, keep the previous behavior to avoid subtle breakages
3131 ++ * on NUMA.
3132 ++ */
3133 ++ if ((flags & WQ_UNBOUND) && max_active == 1)
3134 ++ flags |= __WQ_ORDERED;
3135 ++
3136 + /* see the comment above the definition of WQ_POWER_EFFICIENT */
3137 + if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
3138 + flags |= WQ_UNBOUND;
3139 +@@ -4119,13 +4133,14 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
3140 + struct pool_workqueue *pwq;
3141 +
3142 + /* disallow meddling with max_active for ordered workqueues */
3143 +- if (WARN_ON(wq->flags & __WQ_ORDERED))
3144 ++ if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
3145 + return;
3146 +
3147 + max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
3148 +
3149 + mutex_lock(&wq->mutex);
3150 +
3151 ++ wq->flags &= ~__WQ_ORDERED;
3152 + wq->saved_max_active = max_active;
3153 +
3154 + for_each_pwq(pwq, wq)
3155 +@@ -5253,7 +5268,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
3156 + * attributes breaks ordering guarantee. Disallow exposing ordered
3157 + * workqueues.
3158 + */
3159 +- if (WARN_ON(wq->flags & __WQ_ORDERED))
3160 ++ if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
3161 + return -EINVAL;
3162 +
3163 + wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
3164 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
3165 +index 3eedb187e549..cc289933f462 100644
3166 +--- a/mm/hugetlb.c
3167 ++++ b/mm/hugetlb.c
3168 +@@ -4095,6 +4095,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3169 + unsigned long vaddr = *position;
3170 + unsigned long remainder = *nr_pages;
3171 + struct hstate *h = hstate_vma(vma);
3172 ++ int err = -EFAULT;
3173 +
3174 + while (vaddr < vma->vm_end && remainder) {
3175 + pte_t *pte;
3176 +@@ -4170,11 +4171,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3177 + }
3178 + ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
3179 + if (ret & VM_FAULT_ERROR) {
3180 +- int err = vm_fault_to_errno(ret, flags);
3181 +-
3182 +- if (err)
3183 +- return err;
3184 +-
3185 ++ err = vm_fault_to_errno(ret, flags);
3186 + remainder = 0;
3187 + break;
3188 + }
3189 +@@ -4229,7 +4226,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3190 + */
3191 + *position = vaddr;
3192 +
3193 +- return i ? i : -EFAULT;
3194 ++ return i ? i : err;
3195 + }
3196 +
3197 + #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
3198 +diff --git a/mm/internal.h b/mm/internal.h
3199 +index 0e4f558412fb..9c8a2bfb975c 100644
3200 +--- a/mm/internal.h
3201 ++++ b/mm/internal.h
3202 +@@ -498,6 +498,7 @@ extern struct workqueue_struct *mm_percpu_wq;
3203 + #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
3204 + void try_to_unmap_flush(void);
3205 + void try_to_unmap_flush_dirty(void);
3206 ++void flush_tlb_batched_pending(struct mm_struct *mm);
3207 + #else
3208 + static inline void try_to_unmap_flush(void)
3209 + {
3210 +@@ -505,7 +506,9 @@ static inline void try_to_unmap_flush(void)
3211 + static inline void try_to_unmap_flush_dirty(void)
3212 + {
3213 + }
3214 +-
3215 ++static inline void flush_tlb_batched_pending(struct mm_struct *mm)
3216 ++{
3217 ++}
3218 + #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
3219 +
3220 + extern const struct trace_print_flags pageflag_names[];
3221 +diff --git a/mm/madvise.c b/mm/madvise.c
3222 +index 25b78ee4fc2c..75d2cffbe61d 100644
3223 +--- a/mm/madvise.c
3224 ++++ b/mm/madvise.c
3225 +@@ -320,6 +320,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
3226 +
3227 + tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
3228 + orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
3229 ++ flush_tlb_batched_pending(mm);
3230 + arch_enter_lazy_mmu_mode();
3231 + for (; addr != end; pte++, addr += PAGE_SIZE) {
3232 + ptent = *pte;
3233 +diff --git a/mm/memory.c b/mm/memory.c
3234 +index bb11c474857e..b0c3d1556a94 100644
3235 +--- a/mm/memory.c
3236 ++++ b/mm/memory.c
3237 +@@ -1197,6 +1197,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
3238 + init_rss_vec(rss);
3239 + start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
3240 + pte = start_pte;
3241 ++ flush_tlb_batched_pending(mm);
3242 + arch_enter_lazy_mmu_mode();
3243 + do {
3244 + pte_t ptent = *pte;
3245 +diff --git a/mm/mprotect.c b/mm/mprotect.c
3246 +index 8edd0d576254..f42749e6bf4e 100644
3247 +--- a/mm/mprotect.c
3248 ++++ b/mm/mprotect.c
3249 +@@ -66,6 +66,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
3250 + atomic_read(&vma->vm_mm->mm_users) == 1)
3251 + target_node = numa_node_id();
3252 +
3253 ++ flush_tlb_batched_pending(vma->vm_mm);
3254 + arch_enter_lazy_mmu_mode();
3255 + do {
3256 + oldpte = *pte;
3257 +diff --git a/mm/mremap.c b/mm/mremap.c
3258 +index cd8a1b199ef9..3f23715d3c69 100644
3259 +--- a/mm/mremap.c
3260 ++++ b/mm/mremap.c
3261 +@@ -152,6 +152,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
3262 + new_ptl = pte_lockptr(mm, new_pmd);
3263 + if (new_ptl != old_ptl)
3264 + spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
3265 ++ flush_tlb_batched_pending(vma->vm_mm);
3266 + arch_enter_lazy_mmu_mode();
3267 +
3268 + for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
3269 +@@ -428,6 +429,7 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
3270 + static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
3271 + unsigned long new_addr, unsigned long new_len, bool *locked,
3272 + struct vm_userfaultfd_ctx *uf,
3273 ++ struct list_head *uf_unmap_early,
3274 + struct list_head *uf_unmap)
3275 + {
3276 + struct mm_struct *mm = current->mm;
3277 +@@ -446,7 +448,7 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
3278 + if (addr + old_len > new_addr && new_addr + new_len > addr)
3279 + goto out;
3280 +
3281 +- ret = do_munmap(mm, new_addr, new_len, NULL);
3282 ++ ret = do_munmap(mm, new_addr, new_len, uf_unmap_early);
3283 + if (ret)
3284 + goto out;
3285 +
3286 +@@ -514,6 +516,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
3287 + unsigned long charged = 0;
3288 + bool locked = false;
3289 + struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
3290 ++ LIST_HEAD(uf_unmap_early);
3291 + LIST_HEAD(uf_unmap);
3292 +
3293 + if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
3294 +@@ -541,7 +544,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
3295 +
3296 + if (flags & MREMAP_FIXED) {
3297 + ret = mremap_to(addr, old_len, new_addr, new_len,
3298 +- &locked, &uf, &uf_unmap);
3299 ++ &locked, &uf, &uf_unmap_early, &uf_unmap);
3300 + goto out;
3301 + }
3302 +
3303 +@@ -621,6 +624,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
3304 + up_write(&current->mm->mmap_sem);
3305 + if (locked && new_len > old_len)
3306 + mm_populate(new_addr + old_len, new_len - old_len);
3307 ++ userfaultfd_unmap_complete(mm, &uf_unmap_early);
3308 + mremap_userfaultfd_complete(&uf, addr, new_addr, old_len);
3309 + userfaultfd_unmap_complete(mm, &uf_unmap);
3310 + return ret;
3311 +diff --git a/mm/rmap.c b/mm/rmap.c
3312 +index d405f0e0ee96..9835d19fe143 100644
3313 +--- a/mm/rmap.c
3314 ++++ b/mm/rmap.c
3315 +@@ -616,6 +616,13 @@ static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
3316 + cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm));
3317 + tlb_ubc->flush_required = true;
3318 +
3319 ++ /*
3320 ++ * Ensure compiler does not re-order the setting of tlb_flush_batched
3321 ++ * before the PTE is cleared.
3322 ++ */
3323 ++ barrier();
3324 ++ mm->tlb_flush_batched = true;
3325 ++
3326 + /*
3327 + * If the PTE was dirty then it's best to assume it's writable. The
3328 + * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
3329 +@@ -643,6 +650,35 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
3330 +
3331 + return should_defer;
3332 + }
3333 ++
3334 ++/*
3335 ++ * Reclaim unmaps pages under the PTL but do not flush the TLB prior to
3336 ++ * releasing the PTL if TLB flushes are batched. It's possible for a parallel
3337 ++ * operation such as mprotect or munmap to race between reclaim unmapping
3338 ++ * the page and flushing the page. If this race occurs, it potentially allows
3339 ++ * access to data via a stale TLB entry. Tracking all mm's that have TLB
3340 ++ * batching in flight would be expensive during reclaim so instead track
3341 ++ * whether TLB batching occurred in the past and if so then do a flush here
3342 ++ * if required. This will cost one additional flush per reclaim cycle paid
3343 ++ * by the first operation at risk such as mprotect and mumap.
3344 ++ *
3345 ++ * This must be called under the PTL so that an access to tlb_flush_batched
3346 ++ * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise
3347 ++ * via the PTL.
3348 ++ */
3349 ++void flush_tlb_batched_pending(struct mm_struct *mm)
3350 ++{
3351 ++ if (mm->tlb_flush_batched) {
3352 ++ flush_tlb_mm(mm);
3353 ++
3354 ++ /*
3355 ++ * Do not allow the compiler to re-order the clearing of
3356 ++ * tlb_flush_batched before the tlb is flushed.
3357 ++ */
3358 ++ barrier();
3359 ++ mm->tlb_flush_batched = false;
3360 ++ }
3361 ++}
3362 + #else
3363 + static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
3364 + {
3365 +diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
3366 +index 27fad31784a8..18f9cb9aa87d 100644
3367 +--- a/net/core/dev_ioctl.c
3368 ++++ b/net/core/dev_ioctl.c
3369 +@@ -28,6 +28,7 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg)
3370 +
3371 + if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3372 + return -EFAULT;
3373 ++ ifr.ifr_name[IFNAMSIZ-1] = 0;
3374 +
3375 + error = netdev_get_name(net, ifr.ifr_name, ifr.ifr_ifindex);
3376 + if (error)
3377 +@@ -423,6 +424,8 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
3378 + if (copy_from_user(&iwr, arg, sizeof(iwr)))
3379 + return -EFAULT;
3380 +
3381 ++ iwr.ifr_name[sizeof(iwr.ifr_name) - 1] = 0;
3382 ++
3383 + return wext_handle_ioctl(net, &iwr, cmd, arg);
3384 + }
3385 +
3386 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
3387 +index 467a2f4510a7..52bfeb60c886 100644
3388 +--- a/net/core/rtnetlink.c
3389 ++++ b/net/core/rtnetlink.c
3390 +@@ -1977,7 +1977,8 @@ static int do_setlink(const struct sk_buff *skb,
3391 + struct sockaddr *sa;
3392 + int len;
3393 +
3394 +- len = sizeof(sa_family_t) + dev->addr_len;
3395 ++ len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
3396 ++ sizeof(*sa));
3397 + sa = kmalloc(len, GFP_KERNEL);
3398 + if (!sa) {
3399 + err = -ENOMEM;
3400 +@@ -4165,6 +4166,7 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
3401 +
3402 + switch (event) {
3403 + case NETDEV_REBOOT:
3404 ++ case NETDEV_CHANGEADDR:
3405 + case NETDEV_CHANGENAME:
3406 + case NETDEV_FEAT_CHANGE:
3407 + case NETDEV_BONDING_FAILOVER:
3408 +diff --git a/net/dccp/feat.c b/net/dccp/feat.c
3409 +index 1704948e6a12..f227f002c73d 100644
3410 +--- a/net/dccp/feat.c
3411 ++++ b/net/dccp/feat.c
3412 +@@ -1471,9 +1471,12 @@ int dccp_feat_init(struct sock *sk)
3413 + * singleton values (which always leads to failure).
3414 + * These settings can still (later) be overridden via sockopts.
3415 + */
3416 +- if (ccid_get_builtin_ccids(&tx.val, &tx.len) ||
3417 +- ccid_get_builtin_ccids(&rx.val, &rx.len))
3418 ++ if (ccid_get_builtin_ccids(&tx.val, &tx.len))
3419 + return -ENOBUFS;
3420 ++ if (ccid_get_builtin_ccids(&rx.val, &rx.len)) {
3421 ++ kfree(tx.val);
3422 ++ return -ENOBUFS;
3423 ++ }
3424 +
3425 + if (!dccp_feat_prefer(sysctl_dccp_tx_ccid, tx.val, tx.len) ||
3426 + !dccp_feat_prefer(sysctl_dccp_rx_ccid, rx.val, rx.len))
3427 +diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
3428 +index f75482bdee9a..97368f229876 100644
3429 +--- a/net/dccp/ipv4.c
3430 ++++ b/net/dccp/ipv4.c
3431 +@@ -631,6 +631,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
3432 + goto drop_and_free;
3433 +
3434 + inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
3435 ++ reqsk_put(req);
3436 + return 0;
3437 +
3438 + drop_and_free:
3439 +diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
3440 +index 992621172220..cf3e40df4765 100644
3441 +--- a/net/dccp/ipv6.c
3442 ++++ b/net/dccp/ipv6.c
3443 +@@ -380,6 +380,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
3444 + goto drop_and_free;
3445 +
3446 + inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
3447 ++ reqsk_put(req);
3448 + return 0;
3449 +
3450 + drop_and_free:
3451 +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
3452 +index 83e3ed258467..3acc8261477c 100644
3453 +--- a/net/ipv4/fib_frontend.c
3454 ++++ b/net/ipv4/fib_frontend.c
3455 +@@ -1327,13 +1327,14 @@ static struct pernet_operations fib_net_ops = {
3456 +
3457 + void __init ip_fib_init(void)
3458 + {
3459 +- rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
3460 +- rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
3461 +- rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
3462 ++ fib_trie_init();
3463 +
3464 + register_pernet_subsys(&fib_net_ops);
3465 ++
3466 + register_netdevice_notifier(&fib_netdev_notifier);
3467 + register_inetaddr_notifier(&fib_inetaddr_notifier);
3468 +
3469 +- fib_trie_init();
3470 ++ rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
3471 ++ rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
3472 ++ rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
3473 + }
3474 +diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
3475 +index ad9ad4aab5da..ce7bc2e5175a 100644
3476 +--- a/net/ipv4/fib_semantics.c
3477 ++++ b/net/ipv4/fib_semantics.c
3478 +@@ -1372,7 +1372,7 @@ static int call_fib_nh_notifiers(struct fib_nh *fib_nh,
3479 + return call_fib_notifiers(dev_net(fib_nh->nh_dev), event_type,
3480 + &info.info);
3481 + case FIB_EVENT_NH_DEL:
3482 +- if ((IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
3483 ++ if ((in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
3484 + fib_nh->nh_flags & RTNH_F_LINKDOWN) ||
3485 + (fib_nh->nh_flags & RTNH_F_DEAD))
3486 + return call_fib_notifiers(dev_net(fib_nh->nh_dev),
3487 +diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
3488 +index 0257d965f111..4a97fe20f59e 100644
3489 +--- a/net/ipv4/syncookies.c
3490 ++++ b/net/ipv4/syncookies.c
3491 +@@ -332,6 +332,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
3492 + treq->rcv_isn = ntohl(th->seq) - 1;
3493 + treq->snt_isn = cookie;
3494 + treq->ts_off = 0;
3495 ++ treq->txhash = net_tx_rndhash();
3496 + req->mss = mss;
3497 + ireq->ir_num = ntohs(th->dest);
3498 + ireq->ir_rmt_port = th->source;
3499 +diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
3500 +index b89bce4c721e..96c95c8d981e 100644
3501 +--- a/net/ipv4/tcp_bbr.c
3502 ++++ b/net/ipv4/tcp_bbr.c
3503 +@@ -113,7 +113,8 @@ struct bbr {
3504 + cwnd_gain:10, /* current gain for setting cwnd */
3505 + full_bw_cnt:3, /* number of rounds without large bw gains */
3506 + cycle_idx:3, /* current index in pacing_gain cycle array */
3507 +- unused_b:6;
3508 ++ has_seen_rtt:1, /* have we seen an RTT sample yet? */
3509 ++ unused_b:5;
3510 + u32 prior_cwnd; /* prior cwnd upon entering loss recovery */
3511 + u32 full_bw; /* recent bw, to estimate if pipe is full */
3512 + };
3513 +@@ -212,6 +213,35 @@ static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
3514 + return rate >> BW_SCALE;
3515 + }
3516 +
3517 ++/* Convert a BBR bw and gain factor to a pacing rate in bytes per second. */
3518 ++static u32 bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain)
3519 ++{
3520 ++ u64 rate = bw;
3521 ++
3522 ++ rate = bbr_rate_bytes_per_sec(sk, rate, gain);
3523 ++ rate = min_t(u64, rate, sk->sk_max_pacing_rate);
3524 ++ return rate;
3525 ++}
3526 ++
3527 ++/* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
3528 ++static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
3529 ++{
3530 ++ struct tcp_sock *tp = tcp_sk(sk);
3531 ++ struct bbr *bbr = inet_csk_ca(sk);
3532 ++ u64 bw;
3533 ++ u32 rtt_us;
3534 ++
3535 ++ if (tp->srtt_us) { /* any RTT sample yet? */
3536 ++ rtt_us = max(tp->srtt_us >> 3, 1U);
3537 ++ bbr->has_seen_rtt = 1;
3538 ++ } else { /* no RTT sample yet */
3539 ++ rtt_us = USEC_PER_MSEC; /* use nominal default RTT */
3540 ++ }
3541 ++ bw = (u64)tp->snd_cwnd * BW_UNIT;
3542 ++ do_div(bw, rtt_us);
3543 ++ sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain);
3544 ++}
3545 ++
3546 + /* Pace using current bw estimate and a gain factor. In order to help drive the
3547 + * network toward lower queues while maintaining high utilization and low
3548 + * latency, the average pacing rate aims to be slightly (~1%) lower than the
3549 +@@ -221,12 +251,13 @@ static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
3550 + */
3551 + static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
3552 + {
3553 ++ struct tcp_sock *tp = tcp_sk(sk);
3554 + struct bbr *bbr = inet_csk_ca(sk);
3555 +- u64 rate = bw;
3556 ++ u32 rate = bbr_bw_to_pacing_rate(sk, bw, gain);
3557 +
3558 +- rate = bbr_rate_bytes_per_sec(sk, rate, gain);
3559 +- rate = min_t(u64, rate, sk->sk_max_pacing_rate);
3560 +- if (bbr->mode != BBR_STARTUP || rate > sk->sk_pacing_rate)
3561 ++ if (unlikely(!bbr->has_seen_rtt && tp->srtt_us))
3562 ++ bbr_init_pacing_rate_from_rtt(sk);
3563 ++ if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate)
3564 + sk->sk_pacing_rate = rate;
3565 + }
3566 +
3567 +@@ -799,7 +830,6 @@ static void bbr_init(struct sock *sk)
3568 + {
3569 + struct tcp_sock *tp = tcp_sk(sk);
3570 + struct bbr *bbr = inet_csk_ca(sk);
3571 +- u64 bw;
3572 +
3573 + bbr->prior_cwnd = 0;
3574 + bbr->tso_segs_goal = 0; /* default segs per skb until first ACK */
3575 +@@ -815,11 +845,8 @@ static void bbr_init(struct sock *sk)
3576 +
3577 + minmax_reset(&bbr->bw, bbr->rtt_cnt, 0); /* init max bw to 0 */
3578 +
3579 +- /* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
3580 +- bw = (u64)tp->snd_cwnd * BW_UNIT;
3581 +- do_div(bw, (tp->srtt_us >> 3) ? : USEC_PER_MSEC);
3582 +- sk->sk_pacing_rate = 0; /* force an update of sk_pacing_rate */
3583 +- bbr_set_pacing_rate(sk, bw, bbr_high_gain);
3584 ++ bbr->has_seen_rtt = 0;
3585 ++ bbr_init_pacing_rate_from_rtt(sk);
3586 +
3587 + bbr->restore_cwnd = 0;
3588 + bbr->round_start = 0;
3589 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
3590 +index 1d6219bf2d6b..b9a84eba60b8 100644
3591 +--- a/net/ipv4/udp.c
3592 ++++ b/net/ipv4/udp.c
3593 +@@ -1762,7 +1762,7 @@ static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
3594 + /* For TCP sockets, sk_rx_dst is protected by socket lock
3595 + * For UDP, we use xchg() to guard against concurrent changes.
3596 + */
3597 +-static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
3598 ++void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
3599 + {
3600 + struct dst_entry *old;
3601 +
3602 +@@ -2120,6 +2120,7 @@ void udp_destroy_sock(struct sock *sk)
3603 + encap_destroy(sk);
3604 + }
3605 + }
3606 ++EXPORT_SYMBOL(udp_sk_rx_dst_set);
3607 +
3608 + /*
3609 + * Socket option code for UDP
3610 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
3611 +index 1699acb2fa2c..be0306778938 100644
3612 +--- a/net/ipv6/ip6_output.c
3613 ++++ b/net/ipv6/ip6_output.c
3614 +@@ -673,8 +673,6 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
3615 + *prevhdr = NEXTHDR_FRAGMENT;
3616 + tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
3617 + if (!tmp_hdr) {
3618 +- IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
3619 +- IPSTATS_MIB_FRAGFAILS);
3620 + err = -ENOMEM;
3621 + goto fail;
3622 + }
3623 +@@ -793,8 +791,6 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
3624 + frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
3625 + hroom + troom, GFP_ATOMIC);
3626 + if (!frag) {
3627 +- IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
3628 +- IPSTATS_MIB_FRAGFAILS);
3629 + err = -ENOMEM;
3630 + goto fail;
3631 + }
3632 +diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
3633 +index e9065b8d3af8..abb2c307fbe8 100644
3634 +--- a/net/ipv6/output_core.c
3635 ++++ b/net/ipv6/output_core.c
3636 +@@ -78,7 +78,7 @@ EXPORT_SYMBOL(ipv6_select_ident);
3637 +
3638 + int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
3639 + {
3640 +- u16 offset = sizeof(struct ipv6hdr);
3641 ++ unsigned int offset = sizeof(struct ipv6hdr);
3642 + unsigned int packet_len = skb_tail_pointer(skb) -
3643 + skb_network_header(skb);
3644 + int found_rhdr = 0;
3645 +@@ -86,6 +86,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
3646 +
3647 + while (offset <= packet_len) {
3648 + struct ipv6_opt_hdr *exthdr;
3649 ++ unsigned int len;
3650 +
3651 + switch (**nexthdr) {
3652 +
3653 +@@ -111,7 +112,10 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
3654 +
3655 + exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
3656 + offset);
3657 +- offset += ipv6_optlen(exthdr);
3658 ++ len = ipv6_optlen(exthdr);
3659 ++ if (len + offset >= IPV6_MAXPLEN)
3660 ++ return -EINVAL;
3661 ++ offset += len;
3662 + *nexthdr = &exthdr->nexthdr;
3663 + }
3664 +
3665 +diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
3666 +index 5abc3692b901..ca7895454cec 100644
3667 +--- a/net/ipv6/syncookies.c
3668 ++++ b/net/ipv6/syncookies.c
3669 +@@ -215,6 +215,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
3670 + treq->rcv_isn = ntohl(th->seq) - 1;
3671 + treq->snt_isn = cookie;
3672 + treq->ts_off = 0;
3673 ++ treq->txhash = net_tx_rndhash();
3674 +
3675 + /*
3676 + * We need to lookup the dst_entry to get the correct window size.
3677 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
3678 +index 75703fda23e7..592270c310f4 100644
3679 +--- a/net/ipv6/udp.c
3680 ++++ b/net/ipv6/udp.c
3681 +@@ -291,11 +291,7 @@ static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
3682 + struct udp_table *udptable)
3683 + {
3684 + const struct ipv6hdr *iph = ipv6_hdr(skb);
3685 +- struct sock *sk;
3686 +
3687 +- sk = skb_steal_sock(skb);
3688 +- if (unlikely(sk))
3689 +- return sk;
3690 + return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
3691 + &iph->daddr, dport, inet6_iif(skb),
3692 + udptable, skb);
3693 +@@ -798,6 +794,24 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
3694 + if (udp6_csum_init(skb, uh, proto))
3695 + goto csum_error;
3696 +
3697 ++ /* Check if the socket is already available, e.g. due to early demux */
3698 ++ sk = skb_steal_sock(skb);
3699 ++ if (sk) {
3700 ++ struct dst_entry *dst = skb_dst(skb);
3701 ++ int ret;
3702 ++
3703 ++ if (unlikely(sk->sk_rx_dst != dst))
3704 ++ udp_sk_rx_dst_set(sk, dst);
3705 ++
3706 ++ ret = udpv6_queue_rcv_skb(sk, skb);
3707 ++ sock_put(sk);
3708 ++
3709 ++ /* a return value > 0 means to resubmit the input */
3710 ++ if (ret > 0)
3711 ++ return ret;
3712 ++ return 0;
3713 ++ }
3714 ++
3715 + /*
3716 + * Multicast receive code
3717 + */
3718 +@@ -806,11 +820,6 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
3719 + saddr, daddr, udptable, proto);
3720 +
3721 + /* Unicast */
3722 +-
3723 +- /*
3724 +- * check socket cache ... must talk to Alan about his plans
3725 +- * for sock caches... i'll skip this for now.
3726 +- */
3727 + sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
3728 + if (sk) {
3729 + int ret;
3730 +diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
3731 +index 08679ebb3068..b3bf66bbf4dc 100644
3732 +--- a/net/openvswitch/conntrack.c
3733 ++++ b/net/openvswitch/conntrack.c
3734 +@@ -1289,8 +1289,8 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
3735 +
3736 + nla_for_each_nested(a, attr, rem) {
3737 + int type = nla_type(a);
3738 +- int maxlen = ovs_ct_attr_lens[type].maxlen;
3739 +- int minlen = ovs_ct_attr_lens[type].minlen;
3740 ++ int maxlen;
3741 ++ int minlen;
3742 +
3743 + if (type > OVS_CT_ATTR_MAX) {
3744 + OVS_NLERR(log,
3745 +@@ -1298,6 +1298,9 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
3746 + type, OVS_CT_ATTR_MAX);
3747 + return -EINVAL;
3748 + }
3749 ++
3750 ++ maxlen = ovs_ct_attr_lens[type].maxlen;
3751 ++ minlen = ovs_ct_attr_lens[type].minlen;
3752 + if (nla_len(a) < minlen || nla_len(a) > maxlen) {
3753 + OVS_NLERR(log,
3754 + "Conntrack attr type has unexpected length (type=%d, length=%d, expected=%d)",
3755 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
3756 +index e3eeed19cc7a..0880e0a9d151 100644
3757 +--- a/net/packet/af_packet.c
3758 ++++ b/net/packet/af_packet.c
3759 +@@ -4334,7 +4334,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
3760 + register_prot_hook(sk);
3761 + }
3762 + spin_unlock(&po->bind_lock);
3763 +- if (closing && (po->tp_version > TPACKET_V2)) {
3764 ++ if (pg_vec && (po->tp_version > TPACKET_V2)) {
3765 + /* Because we don't support block-based V3 on tx-ring */
3766 + if (!tx_ring)
3767 + prb_shutdown_retire_blk_timer(po, rb_queue);
3768 +diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
3769 +index 92e332e17391..961a6f81ae64 100644
3770 +--- a/net/sctp/sm_make_chunk.c
3771 ++++ b/net/sctp/sm_make_chunk.c
3772 +@@ -228,7 +228,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
3773 + sctp_adaptation_ind_param_t aiparam;
3774 + sctp_supported_ext_param_t ext_param;
3775 + int num_ext = 0;
3776 +- __u8 extensions[3];
3777 ++ __u8 extensions[4];
3778 + sctp_paramhdr_t *auth_chunks = NULL,
3779 + *auth_hmacs = NULL;
3780 +
3781 +@@ -396,7 +396,7 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
3782 + sctp_adaptation_ind_param_t aiparam;
3783 + sctp_supported_ext_param_t ext_param;
3784 + int num_ext = 0;
3785 +- __u8 extensions[3];
3786 ++ __u8 extensions[4];
3787 + sctp_paramhdr_t *auth_chunks = NULL,
3788 + *auth_hmacs = NULL,
3789 + *auth_random = NULL;
3790 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3791 +index a808332d02d0..606d5333ff98 100644
3792 +--- a/sound/pci/hda/patch_realtek.c
3793 ++++ b/sound/pci/hda/patch_realtek.c
3794 +@@ -2296,6 +2296,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
3795 + SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
3796 + SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
3797 + SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
3798 ++ SND_PCI_QUIRK(0x104d, 0x9060, "Sony Vaio VPCL14M1R", ALC882_FIXUP_NO_PRIMARY_HP),
3799 + SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
3800 + SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
3801 +
3802 +diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
3803 +index 754e3ef8d7ae..d05acc8eed1f 100644
3804 +--- a/sound/soc/soc-core.c
3805 ++++ b/sound/soc/soc-core.c
3806 +@@ -3139,8 +3139,6 @@ static int snd_soc_component_initialize(struct snd_soc_component *component,
3807 + component->remove = component->driver->remove;
3808 + component->suspend = component->driver->suspend;
3809 + component->resume = component->driver->resume;
3810 +- component->pcm_new = component->driver->pcm_new;
3811 +- component->pcm_free = component->driver->pcm_free;
3812 +
3813 + dapm = &component->dapm;
3814 + dapm->dev = dev;
3815 +@@ -3328,25 +3326,6 @@ static void snd_soc_platform_drv_remove(struct snd_soc_component *component)
3816 + platform->driver->remove(platform);
3817 + }
3818 +
3819 +-static int snd_soc_platform_drv_pcm_new(struct snd_soc_pcm_runtime *rtd)
3820 +-{
3821 +- struct snd_soc_platform *platform = rtd->platform;
3822 +-
3823 +- if (platform->driver->pcm_new)
3824 +- return platform->driver->pcm_new(rtd);
3825 +- else
3826 +- return 0;
3827 +-}
3828 +-
3829 +-static void snd_soc_platform_drv_pcm_free(struct snd_pcm *pcm)
3830 +-{
3831 +- struct snd_soc_pcm_runtime *rtd = pcm->private_data;
3832 +- struct snd_soc_platform *platform = rtd->platform;
3833 +-
3834 +- if (platform->driver->pcm_free)
3835 +- platform->driver->pcm_free(pcm);
3836 +-}
3837 +-
3838 + /**
3839 + * snd_soc_add_platform - Add a platform to the ASoC core
3840 + * @dev: The parent device for the platform
3841 +@@ -3370,10 +3349,6 @@ int snd_soc_add_platform(struct device *dev, struct snd_soc_platform *platform,
3842 + platform->component.probe = snd_soc_platform_drv_probe;
3843 + if (platform_drv->remove)
3844 + platform->component.remove = snd_soc_platform_drv_remove;
3845 +- if (platform_drv->pcm_new)
3846 +- platform->component.pcm_new = snd_soc_platform_drv_pcm_new;
3847 +- if (platform_drv->pcm_free)
3848 +- platform->component.pcm_free = snd_soc_platform_drv_pcm_free;
3849 +
3850 + #ifdef CONFIG_DEBUG_FS
3851 + platform->component.debugfs_prefix = "platform";
3852 +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
3853 +index efc5831f205d..8ff7cd3b8c1f 100644
3854 +--- a/sound/soc/soc-pcm.c
3855 ++++ b/sound/soc/soc-pcm.c
3856 +@@ -181,6 +181,10 @@ int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir,
3857 + dev_dbg(be->dev, "ASoC: BE %s event %d dir %d\n",
3858 + be->dai_link->name, event, dir);
3859 +
3860 ++ if ((event == SND_SOC_DAPM_STREAM_STOP) &&
3861 ++ (be->dpcm[dir].users >= 1))
3862 ++ continue;
3863 ++
3864 + snd_soc_dapm_stream_event(be, dir, event);
3865 + }
3866 +
3867 +@@ -2628,25 +2632,12 @@ static int dpcm_fe_dai_close(struct snd_pcm_substream *fe_substream)
3868 + return ret;
3869 + }
3870 +
3871 +-static void soc_pcm_free(struct snd_pcm *pcm)
3872 +-{
3873 +- struct snd_soc_pcm_runtime *rtd = pcm->private_data;
3874 +- struct snd_soc_component *component;
3875 +-
3876 +- list_for_each_entry(component, &rtd->card->component_dev_list,
3877 +- card_list) {
3878 +- if (component->pcm_free)
3879 +- component->pcm_free(pcm);
3880 +- }
3881 +-}
3882 +-
3883 + /* create a new pcm */
3884 + int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
3885 + {
3886 + struct snd_soc_platform *platform = rtd->platform;
3887 + struct snd_soc_dai *codec_dai;
3888 + struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
3889 +- struct snd_soc_component *component;
3890 + struct snd_pcm *pcm;
3891 + char new_name[64];
3892 + int ret = 0, playback = 0, capture = 0;
3893 +@@ -2755,18 +2746,17 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
3894 + if (capture)
3895 + snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &rtd->ops);
3896 +
3897 +- list_for_each_entry(component, &rtd->card->component_dev_list, card_list) {
3898 +- if (component->pcm_new) {
3899 +- ret = component->pcm_new(rtd);
3900 +- if (ret < 0) {
3901 +- dev_err(component->dev,
3902 +- "ASoC: pcm constructor failed: %d\n",
3903 +- ret);
3904 +- return ret;
3905 +- }
3906 ++ if (platform->driver->pcm_new) {
3907 ++ ret = platform->driver->pcm_new(rtd);
3908 ++ if (ret < 0) {
3909 ++ dev_err(platform->dev,
3910 ++ "ASoC: pcm constructor failed: %d\n",
3911 ++ ret);
3912 ++ return ret;
3913 + }
3914 + }
3915 +- pcm->private_free = soc_pcm_free;
3916 ++
3917 ++ pcm->private_free = platform->driver->pcm_free;
3918 + out:
3919 + dev_info(rtd->card->dev, "%s <-> %s mapping ok\n",
3920 + (rtd->num_codecs > 1) ? "multicodec" : rtd->codec_dai->name,
3921 +diff --git a/sound/soc/ux500/mop500.c b/sound/soc/ux500/mop500.c
3922 +index b50f68a439ce..ba9fc099cf67 100644
3923 +--- a/sound/soc/ux500/mop500.c
3924 ++++ b/sound/soc/ux500/mop500.c
3925 +@@ -33,6 +33,7 @@ static struct snd_soc_dai_link mop500_dai_links[] = {
3926 + .stream_name = "ab8500_0",
3927 + .cpu_dai_name = "ux500-msp-i2s.1",
3928 + .codec_dai_name = "ab8500-codec-dai.0",
3929 ++ .platform_name = "ux500-msp-i2s.1",
3930 + .codec_name = "ab8500-codec.0",
3931 + .init = mop500_ab8500_machine_init,
3932 + .ops = mop500_ab8500_ops,
3933 +@@ -42,6 +43,7 @@ static struct snd_soc_dai_link mop500_dai_links[] = {
3934 + .stream_name = "ab8500_1",
3935 + .cpu_dai_name = "ux500-msp-i2s.3",
3936 + .codec_dai_name = "ab8500-codec-dai.1",
3937 ++ .platform_name = "ux500-msp-i2s.3",
3938 + .codec_name = "ab8500-codec.0",
3939 + .init = NULL,
3940 + .ops = mop500_ab8500_ops,
3941 +@@ -85,6 +87,8 @@ static int mop500_of_probe(struct platform_device *pdev,
3942 + for (i = 0; i < 2; i++) {
3943 + mop500_dai_links[i].cpu_of_node = msp_np[i];
3944 + mop500_dai_links[i].cpu_dai_name = NULL;
3945 ++ mop500_dai_links[i].platform_of_node = msp_np[i];
3946 ++ mop500_dai_links[i].platform_name = NULL;
3947 + mop500_dai_links[i].codec_of_node = codec_np;
3948 + mop500_dai_links[i].codec_name = NULL;
3949 + }
3950 +diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
3951 +index e2e5effba2a9..db1c7b25a44c 100644
3952 +--- a/virt/kvm/arm/mmu.c
3953 ++++ b/virt/kvm/arm/mmu.c
3954 +@@ -1665,12 +1665,16 @@ static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *
3955 +
3956 + int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
3957 + {
3958 ++ if (!kvm->arch.pgd)
3959 ++ return 0;
3960 + trace_kvm_age_hva(start, end);
3961 + return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
3962 + }
3963 +
3964 + int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
3965 + {
3966 ++ if (!kvm->arch.pgd)
3967 ++ return 0;
3968 + trace_kvm_test_age_hva(hva);
3969 + return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
3970 + }