Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Sat, 26 Jan 2019 15:09:22
Message-Id: 1548515325.ab431e3aad1b05a8e5ab6aff6f0ba86a0fac96b9.mpagano@gentoo
1 commit: ab431e3aad1b05a8e5ab6aff6f0ba86a0fac96b9
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Jan 26 15:08:45 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat Jan 26 15:08:45 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ab431e3a
7
8 proj/linux-patches: Linux patch 4.19.18
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1017_linux-4.19.18.patch | 4709 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 4713 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index f7061f2..776e758 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -111,6 +111,10 @@ Patch: 1016_linux-4.19.17.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.19.17
23
24 +Patch: 1017_linux-4.19.18.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.19.18
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1017_linux-4.19.18.patch b/1017_linux-4.19.18.patch
33 new file mode 100644
34 index 0000000..5cb666f
35 --- /dev/null
36 +++ b/1017_linux-4.19.18.patch
37 @@ -0,0 +1,4709 @@
38 +diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
39 +index 22b4b00dee31..06ac6dda9b34 100644
40 +--- a/Documentation/filesystems/proc.txt
41 ++++ b/Documentation/filesystems/proc.txt
42 +@@ -496,7 +496,9 @@ manner. The codes are the following:
43 +
44 + Note that there is no guarantee that every flag and associated mnemonic will
45 + be present in all further kernel releases. Things get changed, the flags may
46 +-be vanished or the reverse -- new added.
47 ++be vanished or the reverse -- new added. Interpretation of their meaning
48 ++might change in future as well. So each consumer of these flags has to
49 ++follow each specific kernel version for the exact semantic.
50 +
51 + This file is only present if the CONFIG_MMU kernel configuration option is
52 + enabled.
53 +diff --git a/Makefile b/Makefile
54 +index 4b0bce87a36b..9f37a8a9feb9 100644
55 +--- a/Makefile
56 ++++ b/Makefile
57 +@@ -1,7 +1,7 @@
58 + # SPDX-License-Identifier: GPL-2.0
59 + VERSION = 4
60 + PATCHLEVEL = 19
61 +-SUBLEVEL = 17
62 ++SUBLEVEL = 18
63 + EXTRAVERSION =
64 + NAME = "People's Front"
65 +
66 +diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
67 +index 0bcc98dbba56..f90f5d83b228 100644
68 +--- a/arch/arm64/include/asm/assembler.h
69 ++++ b/arch/arm64/include/asm/assembler.h
70 +@@ -378,27 +378,33 @@ alternative_endif
71 + * size: size of the region
72 + * Corrupts: kaddr, size, tmp1, tmp2
73 + */
74 ++ .macro __dcache_op_workaround_clean_cache, op, kaddr
75 ++alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
76 ++ dc \op, \kaddr
77 ++alternative_else
78 ++ dc civac, \kaddr
79 ++alternative_endif
80 ++ .endm
81 ++
82 + .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
83 + dcache_line_size \tmp1, \tmp2
84 + add \size, \kaddr, \size
85 + sub \tmp2, \tmp1, #1
86 + bic \kaddr, \kaddr, \tmp2
87 + 9998:
88 +- .if (\op == cvau || \op == cvac)
89 +-alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
90 +- dc \op, \kaddr
91 +-alternative_else
92 +- dc civac, \kaddr
93 +-alternative_endif
94 +- .elseif (\op == cvap)
95 +-alternative_if ARM64_HAS_DCPOP
96 +- sys 3, c7, c12, 1, \kaddr // dc cvap
97 +-alternative_else
98 +- dc cvac, \kaddr
99 +-alternative_endif
100 ++ .ifc \op, cvau
101 ++ __dcache_op_workaround_clean_cache \op, \kaddr
102 ++ .else
103 ++ .ifc \op, cvac
104 ++ __dcache_op_workaround_clean_cache \op, \kaddr
105 ++ .else
106 ++ .ifc \op, cvap
107 ++ sys 3, c7, c12, 1, \kaddr // dc cvap
108 + .else
109 + dc \op, \kaddr
110 + .endif
111 ++ .endif
112 ++ .endif
113 + add \kaddr, \kaddr, \tmp1
114 + cmp \kaddr, \size
115 + b.lo 9998b
116 +diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
117 +index b96442960aea..56562ff01076 100644
118 +--- a/arch/arm64/include/asm/memory.h
119 ++++ b/arch/arm64/include/asm/memory.h
120 +@@ -76,12 +76,17 @@
121 + /*
122 + * KASAN requires 1/8th of the kernel virtual address space for the shadow
123 + * region. KASAN can bloat the stack significantly, so double the (minimum)
124 +- * stack size when KASAN is in use.
125 ++ * stack size when KASAN is in use, and then double it again if KASAN_EXTRA is
126 ++ * on.
127 + */
128 + #ifdef CONFIG_KASAN
129 + #define KASAN_SHADOW_SCALE_SHIFT 3
130 + #define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
131 ++#ifdef CONFIG_KASAN_EXTRA
132 ++#define KASAN_THREAD_SHIFT 2
133 ++#else
134 + #define KASAN_THREAD_SHIFT 1
135 ++#endif /* CONFIG_KASAN_EXTRA */
136 + #else
137 + #define KASAN_SHADOW_SIZE (0)
138 + #define KASAN_THREAD_SHIFT 0
139 +diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
140 +index e213f8e867f6..8a91ac067d44 100644
141 +--- a/arch/arm64/kernel/perf_event.c
142 ++++ b/arch/arm64/kernel/perf_event.c
143 +@@ -1274,6 +1274,7 @@ static struct platform_driver armv8_pmu_driver = {
144 + .driver = {
145 + .name = ARMV8_PMU_PDEV_NAME,
146 + .of_match_table = armv8_pmu_of_device_ids,
147 ++ .suppress_bind_attrs = true,
148 + },
149 + .probe = armv8_pmu_device_probe,
150 + };
151 +diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
152 +index 0c22ede52f90..a194fd0e837f 100644
153 +--- a/arch/arm64/mm/cache.S
154 ++++ b/arch/arm64/mm/cache.S
155 +@@ -212,6 +212,9 @@ ENDPROC(__dma_clean_area)
156 + * - size - size in question
157 + */
158 + ENTRY(__clean_dcache_area_pop)
159 ++ alternative_if_not ARM64_HAS_DCPOP
160 ++ b __clean_dcache_area_poc
161 ++ alternative_else_nop_endif
162 + dcache_by_line_op cvap, sy, x0, x1, x2, x3
163 + ret
164 + ENDPIPROC(__clean_dcache_area_pop)
165 +diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
166 +index 154b811d5894..201caf226b47 100644
167 +--- a/arch/mips/Kconfig
168 ++++ b/arch/mips/Kconfig
169 +@@ -794,6 +794,7 @@ config SIBYTE_SWARM
170 + select SYS_SUPPORTS_HIGHMEM
171 + select SYS_SUPPORTS_LITTLE_ENDIAN
172 + select ZONE_DMA32 if 64BIT
173 ++ select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
174 +
175 + config SIBYTE_LITTLESUR
176 + bool "Sibyte BCM91250C2-LittleSur"
177 +@@ -814,6 +815,7 @@ config SIBYTE_SENTOSA
178 + select SYS_HAS_CPU_SB1
179 + select SYS_SUPPORTS_BIG_ENDIAN
180 + select SYS_SUPPORTS_LITTLE_ENDIAN
181 ++ select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
182 +
183 + config SIBYTE_BIGSUR
184 + bool "Sibyte BCM91480B-BigSur"
185 +@@ -826,6 +828,7 @@ config SIBYTE_BIGSUR
186 + select SYS_SUPPORTS_HIGHMEM
187 + select SYS_SUPPORTS_LITTLE_ENDIAN
188 + select ZONE_DMA32 if 64BIT
189 ++ select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
190 +
191 + config SNI_RM
192 + bool "SNI RM200/300/400"
193 +diff --git a/arch/mips/sibyte/common/Makefile b/arch/mips/sibyte/common/Makefile
194 +index b3d6bf23a662..3ef3fb658136 100644
195 +--- a/arch/mips/sibyte/common/Makefile
196 ++++ b/arch/mips/sibyte/common/Makefile
197 +@@ -1,4 +1,5 @@
198 + obj-y := cfe.o
199 ++obj-$(CONFIG_SWIOTLB) += dma.o
200 + obj-$(CONFIG_SIBYTE_BUS_WATCHER) += bus_watcher.o
201 + obj-$(CONFIG_SIBYTE_CFE_CONSOLE) += cfe_console.o
202 + obj-$(CONFIG_SIBYTE_TBPROF) += sb_tbprof.o
203 +diff --git a/arch/mips/sibyte/common/dma.c b/arch/mips/sibyte/common/dma.c
204 +new file mode 100644
205 +index 000000000000..eb47a94f3583
206 +--- /dev/null
207 ++++ b/arch/mips/sibyte/common/dma.c
208 +@@ -0,0 +1,14 @@
209 ++// SPDX-License-Identifier: GPL-2.0+
210 ++/*
211 ++ * DMA support for Broadcom SiByte platforms.
212 ++ *
213 ++ * Copyright (c) 2018 Maciej W. Rozycki
214 ++ */
215 ++
216 ++#include <linux/swiotlb.h>
217 ++#include <asm/bootinfo.h>
218 ++
219 ++void __init plat_swiotlb_setup(void)
220 ++{
221 ++ swiotlb_init(1);
222 ++}
223 +diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
224 +index 4264aedc7775..dd6badc31f45 100644
225 +--- a/arch/powerpc/xmon/xmon.c
226 ++++ b/arch/powerpc/xmon/xmon.c
227 +@@ -75,6 +75,9 @@ static int xmon_gate;
228 + #define xmon_owner 0
229 + #endif /* CONFIG_SMP */
230 +
231 ++#ifdef CONFIG_PPC_PSERIES
232 ++static int set_indicator_token = RTAS_UNKNOWN_SERVICE;
233 ++#endif
234 + static unsigned long in_xmon __read_mostly = 0;
235 + static int xmon_on = IS_ENABLED(CONFIG_XMON_DEFAULT);
236 +
237 +@@ -358,7 +361,6 @@ static inline void disable_surveillance(void)
238 + #ifdef CONFIG_PPC_PSERIES
239 + /* Since this can't be a module, args should end up below 4GB. */
240 + static struct rtas_args args;
241 +- int token;
242 +
243 + /*
244 + * At this point we have got all the cpus we can into
245 +@@ -367,11 +369,11 @@ static inline void disable_surveillance(void)
246 + * If we did try to take rtas.lock there would be a
247 + * real possibility of deadlock.
248 + */
249 +- token = rtas_token("set-indicator");
250 +- if (token == RTAS_UNKNOWN_SERVICE)
251 ++ if (set_indicator_token == RTAS_UNKNOWN_SERVICE)
252 + return;
253 +
254 +- rtas_call_unlocked(&args, token, 3, 1, NULL, SURVEILLANCE_TOKEN, 0, 0);
255 ++ rtas_call_unlocked(&args, set_indicator_token, 3, 1, NULL,
256 ++ SURVEILLANCE_TOKEN, 0, 0);
257 +
258 + #endif /* CONFIG_PPC_PSERIES */
259 + }
260 +@@ -3672,6 +3674,14 @@ static void xmon_init(int enable)
261 + __debugger_iabr_match = xmon_iabr_match;
262 + __debugger_break_match = xmon_break_match;
263 + __debugger_fault_handler = xmon_fault_handler;
264 ++
265 ++#ifdef CONFIG_PPC_PSERIES
266 ++ /*
267 ++ * Get the token here to avoid trying to get a lock
268 ++ * during the crash, causing a deadlock.
269 ++ */
270 ++ set_indicator_token = rtas_token("set-indicator");
271 ++#endif
272 + } else {
273 + __debugger = NULL;
274 + __debugger_ipi = NULL;
275 +diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
276 +index 3de69330e6c5..afbc87206886 100644
277 +--- a/arch/x86/include/asm/traps.h
278 ++++ b/arch/x86/include/asm/traps.h
279 +@@ -104,9 +104,9 @@ extern int panic_on_unrecovered_nmi;
280 +
281 + void math_emulate(struct math_emu_info *);
282 + #ifndef CONFIG_X86_32
283 +-asmlinkage void smp_thermal_interrupt(void);
284 +-asmlinkage void smp_threshold_interrupt(void);
285 +-asmlinkage void smp_deferred_error_interrupt(void);
286 ++asmlinkage void smp_thermal_interrupt(struct pt_regs *regs);
287 ++asmlinkage void smp_threshold_interrupt(struct pt_regs *regs);
288 ++asmlinkage void smp_deferred_error_interrupt(struct pt_regs *regs);
289 + #endif
290 +
291 + extern void ist_enter(struct pt_regs *regs);
292 +diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
293 +index e12454e21b8a..9f915a8791cc 100644
294 +--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
295 ++++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
296 +@@ -23,6 +23,7 @@
297 + #include <linux/string.h>
298 +
299 + #include <asm/amd_nb.h>
300 ++#include <asm/traps.h>
301 + #include <asm/apic.h>
302 + #include <asm/mce.h>
303 + #include <asm/msr.h>
304 +@@ -99,7 +100,7 @@ static u32 smca_bank_addrs[MAX_NR_BANKS][NR_BLOCKS] __ro_after_init =
305 + [0 ... MAX_NR_BANKS - 1] = { [0 ... NR_BLOCKS - 1] = -1 }
306 + };
307 +
308 +-const char *smca_get_name(enum smca_bank_types t)
309 ++static const char *smca_get_name(enum smca_bank_types t)
310 + {
311 + if (t >= N_SMCA_BANK_TYPES)
312 + return NULL;
313 +@@ -824,7 +825,7 @@ static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc)
314 + mce_log(&m);
315 + }
316 +
317 +-asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(void)
318 ++asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(struct pt_regs *regs)
319 + {
320 + entering_irq();
321 + trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR);
322 +diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
323 +index 2da67b70ba98..ee229ceee745 100644
324 +--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
325 ++++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
326 +@@ -25,6 +25,7 @@
327 + #include <linux/cpu.h>
328 +
329 + #include <asm/processor.h>
330 ++#include <asm/traps.h>
331 + #include <asm/apic.h>
332 + #include <asm/mce.h>
333 + #include <asm/msr.h>
334 +@@ -390,7 +391,7 @@ static void unexpected_thermal_interrupt(void)
335 +
336 + static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;
337 +
338 +-asmlinkage __visible void __irq_entry smp_thermal_interrupt(struct pt_regs *r)
339 ++asmlinkage __visible void __irq_entry smp_thermal_interrupt(struct pt_regs *regs)
340 + {
341 + entering_irq();
342 + trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
343 +diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c
344 +index 2b584b319eff..c21e0a1efd0f 100644
345 +--- a/arch/x86/kernel/cpu/mcheck/threshold.c
346 ++++ b/arch/x86/kernel/cpu/mcheck/threshold.c
347 +@@ -6,6 +6,7 @@
348 + #include <linux/kernel.h>
349 +
350 + #include <asm/irq_vectors.h>
351 ++#include <asm/traps.h>
352 + #include <asm/apic.h>
353 + #include <asm/mce.h>
354 + #include <asm/trace/irq_vectors.h>
355 +@@ -18,7 +19,7 @@ static void default_threshold_interrupt(void)
356 +
357 + void (*mce_threshold_vector)(void) = default_threshold_interrupt;
358 +
359 +-asmlinkage __visible void __irq_entry smp_threshold_interrupt(void)
360 ++asmlinkage __visible void __irq_entry smp_threshold_interrupt(struct pt_regs *regs)
361 + {
362 + entering_irq();
363 + trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR);
364 +diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
365 +index f02ecaf97904..6489067b78a4 100644
366 +--- a/arch/x86/kernel/smpboot.c
367 ++++ b/arch/x86/kernel/smpboot.c
368 +@@ -1346,7 +1346,7 @@ void __init calculate_max_logical_packages(void)
369 + * extrapolate the boot cpu's data to all packages.
370 + */
371 + ncpus = cpu_data(0).booted_cores * topology_max_smt_threads();
372 +- __max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus);
373 ++ __max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
374 + pr_info("Max logical packages: %u\n", __max_logical_packages);
375 + }
376 +
377 +diff --git a/crypto/ecc.c b/crypto/ecc.c
378 +index 8facafd67802..adcce310f646 100644
379 +--- a/crypto/ecc.c
380 ++++ b/crypto/ecc.c
381 +@@ -842,15 +842,23 @@ static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime,
382 +
383 + static void ecc_point_mult(struct ecc_point *result,
384 + const struct ecc_point *point, const u64 *scalar,
385 +- u64 *initial_z, u64 *curve_prime,
386 ++ u64 *initial_z, const struct ecc_curve *curve,
387 + unsigned int ndigits)
388 + {
389 + /* R0 and R1 */
390 + u64 rx[2][ECC_MAX_DIGITS];
391 + u64 ry[2][ECC_MAX_DIGITS];
392 + u64 z[ECC_MAX_DIGITS];
393 ++ u64 sk[2][ECC_MAX_DIGITS];
394 ++ u64 *curve_prime = curve->p;
395 + int i, nb;
396 +- int num_bits = vli_num_bits(scalar, ndigits);
397 ++ int num_bits;
398 ++ int carry;
399 ++
400 ++ carry = vli_add(sk[0], scalar, curve->n, ndigits);
401 ++ vli_add(sk[1], sk[0], curve->n, ndigits);
402 ++ scalar = sk[!carry];
403 ++ num_bits = sizeof(u64) * ndigits * 8 + 1;
404 +
405 + vli_set(rx[1], point->x, ndigits);
406 + vli_set(ry[1], point->y, ndigits);
407 +@@ -1004,7 +1012,7 @@ int ecc_make_pub_key(unsigned int curve_id, unsigned int ndigits,
408 + goto out;
409 + }
410 +
411 +- ecc_point_mult(pk, &curve->g, priv, NULL, curve->p, ndigits);
412 ++ ecc_point_mult(pk, &curve->g, priv, NULL, curve, ndigits);
413 + if (ecc_point_is_zero(pk)) {
414 + ret = -EAGAIN;
415 + goto err_free_point;
416 +@@ -1090,7 +1098,7 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
417 + goto err_alloc_product;
418 + }
419 +
420 +- ecc_point_mult(product, pk, priv, rand_z, curve->p, ndigits);
421 ++ ecc_point_mult(product, pk, priv, rand_z, curve, ndigits);
422 +
423 + ecc_swap_digits(product->x, secret, ndigits);
424 +
425 +diff --git a/drivers/base/bus.c b/drivers/base/bus.c
426 +index 8bfd27ec73d6..585e2e1c9c8f 100644
427 +--- a/drivers/base/bus.c
428 ++++ b/drivers/base/bus.c
429 +@@ -31,6 +31,9 @@ static struct kset *system_kset;
430 +
431 + #define to_drv_attr(_attr) container_of(_attr, struct driver_attribute, attr)
432 +
433 ++#define DRIVER_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
434 ++ struct driver_attribute driver_attr_##_name = \
435 ++ __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
436 +
437 + static int __must_check bus_rescan_devices_helper(struct device *dev,
438 + void *data);
439 +@@ -195,7 +198,7 @@ static ssize_t unbind_store(struct device_driver *drv, const char *buf,
440 + bus_put(bus);
441 + return err;
442 + }
443 +-static DRIVER_ATTR_WO(unbind);
444 ++static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, S_IWUSR, NULL, unbind_store);
445 +
446 + /*
447 + * Manually attach a device to a driver.
448 +@@ -231,7 +234,7 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf,
449 + bus_put(bus);
450 + return err;
451 + }
452 +-static DRIVER_ATTR_WO(bind);
453 ++static DRIVER_ATTR_IGNORE_LOCKDEP(bind, S_IWUSR, NULL, bind_store);
454 +
455 + static ssize_t show_drivers_autoprobe(struct bus_type *bus, char *buf)
456 + {
457 +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
458 +index cd2e5cf14ea5..77b67a5f21ee 100644
459 +--- a/drivers/bluetooth/btusb.c
460 ++++ b/drivers/bluetooth/btusb.c
461 +@@ -343,6 +343,7 @@ static const struct usb_device_id blacklist_table[] = {
462 + /* Intel Bluetooth devices */
463 + { USB_DEVICE(0x8087, 0x0025), .driver_info = BTUSB_INTEL_NEW },
464 + { USB_DEVICE(0x8087, 0x0026), .driver_info = BTUSB_INTEL_NEW },
465 ++ { USB_DEVICE(0x8087, 0x0029), .driver_info = BTUSB_INTEL_NEW },
466 + { USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
467 + { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
468 + { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
469 +@@ -2054,6 +2055,35 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
470 + return -EILSEQ;
471 + }
472 +
473 ++static bool btusb_setup_intel_new_get_fw_name(struct intel_version *ver,
474 ++ struct intel_boot_params *params,
475 ++ char *fw_name, size_t len,
476 ++ const char *suffix)
477 ++{
478 ++ switch (ver->hw_variant) {
479 ++ case 0x0b: /* SfP */
480 ++ case 0x0c: /* WsP */
481 ++ snprintf(fw_name, len, "intel/ibt-%u-%u.%s",
482 ++ le16_to_cpu(ver->hw_variant),
483 ++ le16_to_cpu(params->dev_revid),
484 ++ suffix);
485 ++ break;
486 ++ case 0x11: /* JfP */
487 ++ case 0x12: /* ThP */
488 ++ case 0x13: /* HrP */
489 ++ case 0x14: /* CcP */
490 ++ snprintf(fw_name, len, "intel/ibt-%u-%u-%u.%s",
491 ++ le16_to_cpu(ver->hw_variant),
492 ++ le16_to_cpu(ver->hw_revision),
493 ++ le16_to_cpu(ver->fw_revision),
494 ++ suffix);
495 ++ break;
496 ++ default:
497 ++ return false;
498 ++ }
499 ++ return true;
500 ++}
501 ++
502 + static int btusb_setup_intel_new(struct hci_dev *hdev)
503 + {
504 + struct btusb_data *data = hci_get_drvdata(hdev);
505 +@@ -2105,7 +2135,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
506 + case 0x11: /* JfP */
507 + case 0x12: /* ThP */
508 + case 0x13: /* HrP */
509 +- case 0x14: /* QnJ, IcP */
510 ++ case 0x14: /* CcP */
511 + break;
512 + default:
513 + bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
514 +@@ -2189,23 +2219,9 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
515 + * ibt-<hw_variant>-<hw_revision>-<fw_revision>.sfi.
516 + *
517 + */
518 +- switch (ver.hw_variant) {
519 +- case 0x0b: /* SfP */
520 +- case 0x0c: /* WsP */
521 +- snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi",
522 +- le16_to_cpu(ver.hw_variant),
523 +- le16_to_cpu(params.dev_revid));
524 +- break;
525 +- case 0x11: /* JfP */
526 +- case 0x12: /* ThP */
527 +- case 0x13: /* HrP */
528 +- case 0x14: /* QnJ, IcP */
529 +- snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.sfi",
530 +- le16_to_cpu(ver.hw_variant),
531 +- le16_to_cpu(ver.hw_revision),
532 +- le16_to_cpu(ver.fw_revision));
533 +- break;
534 +- default:
535 ++ err = btusb_setup_intel_new_get_fw_name(&ver, &params, fwname,
536 ++ sizeof(fwname), "sfi");
537 ++ if (!err) {
538 + bt_dev_err(hdev, "Unsupported Intel firmware naming");
539 + return -EINVAL;
540 + }
541 +@@ -2221,23 +2237,9 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
542 + /* Save the DDC file name for later use to apply once the firmware
543 + * downloading is done.
544 + */
545 +- switch (ver.hw_variant) {
546 +- case 0x0b: /* SfP */
547 +- case 0x0c: /* WsP */
548 +- snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.ddc",
549 +- le16_to_cpu(ver.hw_variant),
550 +- le16_to_cpu(params.dev_revid));
551 +- break;
552 +- case 0x11: /* JfP */
553 +- case 0x12: /* ThP */
554 +- case 0x13: /* HrP */
555 +- case 0x14: /* QnJ, IcP */
556 +- snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.ddc",
557 +- le16_to_cpu(ver.hw_variant),
558 +- le16_to_cpu(ver.hw_revision),
559 +- le16_to_cpu(ver.fw_revision));
560 +- break;
561 +- default:
562 ++ err = btusb_setup_intel_new_get_fw_name(&ver, &params, fwname,
563 ++ sizeof(fwname), "ddc");
564 ++ if (!err) {
565 + bt_dev_err(hdev, "Unsupported Intel firmware naming");
566 + return -EINVAL;
567 + }
568 +diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
569 +index 7fc9612070a1..d5f7a12e350e 100644
570 +--- a/drivers/char/ipmi/ipmi_msghandler.c
571 ++++ b/drivers/char/ipmi/ipmi_msghandler.c
572 +@@ -29,6 +29,7 @@
573 + #include <linux/moduleparam.h>
574 + #include <linux/workqueue.h>
575 + #include <linux/uuid.h>
576 ++#include <linux/nospec.h>
577 +
578 + #define PFX "IPMI message handler: "
579 +
580 +@@ -61,7 +62,8 @@ static void ipmi_debug_msg(const char *title, unsigned char *data,
581 + { }
582 + #endif
583 +
584 +-static int initialized;
585 ++static bool initialized;
586 ++static bool drvregistered;
587 +
588 + enum ipmi_panic_event_op {
589 + IPMI_SEND_PANIC_EVENT_NONE,
590 +@@ -611,7 +613,7 @@ static DEFINE_MUTEX(ipmidriver_mutex);
591 +
592 + static LIST_HEAD(ipmi_interfaces);
593 + static DEFINE_MUTEX(ipmi_interfaces_mutex);
594 +-DEFINE_STATIC_SRCU(ipmi_interfaces_srcu);
595 ++struct srcu_struct ipmi_interfaces_srcu;
596 +
597 + /*
598 + * List of watchers that want to know when smi's are added and deleted.
599 +@@ -719,7 +721,15 @@ struct watcher_entry {
600 + int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
601 + {
602 + struct ipmi_smi *intf;
603 +- int index;
604 ++ int index, rv;
605 ++
606 ++ /*
607 ++ * Make sure the driver is actually initialized, this handles
608 ++ * problems with initialization order.
609 ++ */
610 ++ rv = ipmi_init_msghandler();
611 ++ if (rv)
612 ++ return rv;
613 +
614 + mutex_lock(&smi_watchers_mutex);
615 +
616 +@@ -883,7 +893,7 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
617 +
618 + if (user) {
619 + user->handler->ipmi_recv_hndl(msg, user->handler_data);
620 +- release_ipmi_user(msg->user, index);
621 ++ release_ipmi_user(user, index);
622 + } else {
623 + /* User went away, give up. */
624 + ipmi_free_recv_msg(msg);
625 +@@ -1075,7 +1085,7 @@ int ipmi_create_user(unsigned int if_num,
626 + {
627 + unsigned long flags;
628 + struct ipmi_user *new_user;
629 +- int rv = 0, index;
630 ++ int rv, index;
631 + struct ipmi_smi *intf;
632 +
633 + /*
634 +@@ -1093,18 +1103,9 @@ int ipmi_create_user(unsigned int if_num,
635 + * Make sure the driver is actually initialized, this handles
636 + * problems with initialization order.
637 + */
638 +- if (!initialized) {
639 +- rv = ipmi_init_msghandler();
640 +- if (rv)
641 +- return rv;
642 +-
643 +- /*
644 +- * The init code doesn't return an error if it was turned
645 +- * off, but it won't initialize. Check that.
646 +- */
647 +- if (!initialized)
648 +- return -ENODEV;
649 +- }
650 ++ rv = ipmi_init_msghandler();
651 ++ if (rv)
652 ++ return rv;
653 +
654 + new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
655 + if (!new_user)
656 +@@ -1182,6 +1183,7 @@ EXPORT_SYMBOL(ipmi_get_smi_info);
657 + static void free_user(struct kref *ref)
658 + {
659 + struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
660 ++ cleanup_srcu_struct(&user->release_barrier);
661 + kfree(user);
662 + }
663 +
664 +@@ -1258,7 +1260,6 @@ int ipmi_destroy_user(struct ipmi_user *user)
665 + {
666 + _ipmi_destroy_user(user);
667 +
668 +- cleanup_srcu_struct(&user->release_barrier);
669 + kref_put(&user->refcount, free_user);
670 +
671 + return 0;
672 +@@ -1297,10 +1298,12 @@ int ipmi_set_my_address(struct ipmi_user *user,
673 + if (!user)
674 + return -ENODEV;
675 +
676 +- if (channel >= IPMI_MAX_CHANNELS)
677 ++ if (channel >= IPMI_MAX_CHANNELS) {
678 + rv = -EINVAL;
679 +- else
680 ++ } else {
681 ++ channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
682 + user->intf->addrinfo[channel].address = address;
683 ++ }
684 + release_ipmi_user(user, index);
685 +
686 + return rv;
687 +@@ -1317,10 +1320,12 @@ int ipmi_get_my_address(struct ipmi_user *user,
688 + if (!user)
689 + return -ENODEV;
690 +
691 +- if (channel >= IPMI_MAX_CHANNELS)
692 ++ if (channel >= IPMI_MAX_CHANNELS) {
693 + rv = -EINVAL;
694 +- else
695 ++ } else {
696 ++ channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
697 + *address = user->intf->addrinfo[channel].address;
698 ++ }
699 + release_ipmi_user(user, index);
700 +
701 + return rv;
702 +@@ -1337,10 +1342,12 @@ int ipmi_set_my_LUN(struct ipmi_user *user,
703 + if (!user)
704 + return -ENODEV;
705 +
706 +- if (channel >= IPMI_MAX_CHANNELS)
707 ++ if (channel >= IPMI_MAX_CHANNELS) {
708 + rv = -EINVAL;
709 +- else
710 ++ } else {
711 ++ channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
712 + user->intf->addrinfo[channel].lun = LUN & 0x3;
713 ++ }
714 + release_ipmi_user(user, index);
715 +
716 + return 0;
717 +@@ -1357,10 +1364,12 @@ int ipmi_get_my_LUN(struct ipmi_user *user,
718 + if (!user)
719 + return -ENODEV;
720 +
721 +- if (channel >= IPMI_MAX_CHANNELS)
722 ++ if (channel >= IPMI_MAX_CHANNELS) {
723 + rv = -EINVAL;
724 +- else
725 ++ } else {
726 ++ channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
727 + *address = user->intf->addrinfo[channel].lun;
728 ++ }
729 + release_ipmi_user(user, index);
730 +
731 + return rv;
732 +@@ -2184,6 +2193,7 @@ static int check_addr(struct ipmi_smi *intf,
733 + {
734 + if (addr->channel >= IPMI_MAX_CHANNELS)
735 + return -EINVAL;
736 ++ addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS);
737 + *lun = intf->addrinfo[addr->channel].lun;
738 + *saddr = intf->addrinfo[addr->channel].address;
739 + return 0;
740 +@@ -3294,17 +3304,9 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
741 + * Make sure the driver is actually initialized, this handles
742 + * problems with initialization order.
743 + */
744 +- if (!initialized) {
745 +- rv = ipmi_init_msghandler();
746 +- if (rv)
747 +- return rv;
748 +- /*
749 +- * The init code doesn't return an error if it was turned
750 +- * off, but it won't initialize. Check that.
751 +- */
752 +- if (!initialized)
753 +- return -ENODEV;
754 +- }
755 ++ rv = ipmi_init_msghandler();
756 ++ if (rv)
757 ++ return rv;
758 +
759 + intf = kzalloc(sizeof(*intf), GFP_KERNEL);
760 + if (!intf)
761 +@@ -5020,6 +5022,22 @@ static int panic_event(struct notifier_block *this,
762 + return NOTIFY_DONE;
763 + }
764 +
765 ++/* Must be called with ipmi_interfaces_mutex held. */
766 ++static int ipmi_register_driver(void)
767 ++{
768 ++ int rv;
769 ++
770 ++ if (drvregistered)
771 ++ return 0;
772 ++
773 ++ rv = driver_register(&ipmidriver.driver);
774 ++ if (rv)
775 ++ pr_err("Could not register IPMI driver\n");
776 ++ else
777 ++ drvregistered = true;
778 ++ return rv;
779 ++}
780 ++
781 + static struct notifier_block panic_block = {
782 + .notifier_call = panic_event,
783 + .next = NULL,
784 +@@ -5030,66 +5048,74 @@ static int ipmi_init_msghandler(void)
785 + {
786 + int rv;
787 +
788 ++ mutex_lock(&ipmi_interfaces_mutex);
789 ++ rv = ipmi_register_driver();
790 ++ if (rv)
791 ++ goto out;
792 + if (initialized)
793 +- return 0;
794 +-
795 +- rv = driver_register(&ipmidriver.driver);
796 +- if (rv) {
797 +- pr_err(PFX "Could not register IPMI driver\n");
798 +- return rv;
799 +- }
800 ++ goto out;
801 +
802 +- pr_info("ipmi message handler version " IPMI_DRIVER_VERSION "\n");
803 ++ init_srcu_struct(&ipmi_interfaces_srcu);
804 +
805 + timer_setup(&ipmi_timer, ipmi_timeout, 0);
806 + mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
807 +
808 + atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
809 +
810 +- initialized = 1;
811 ++ initialized = true;
812 +
813 +- return 0;
814 ++out:
815 ++ mutex_unlock(&ipmi_interfaces_mutex);
816 ++ return rv;
817 + }
818 +
819 + static int __init ipmi_init_msghandler_mod(void)
820 + {
821 +- ipmi_init_msghandler();
822 +- return 0;
823 ++ int rv;
824 ++
825 ++ pr_info("version " IPMI_DRIVER_VERSION "\n");
826 ++
827 ++ mutex_lock(&ipmi_interfaces_mutex);
828 ++ rv = ipmi_register_driver();
829 ++ mutex_unlock(&ipmi_interfaces_mutex);
830 ++
831 ++ return rv;
832 + }
833 +
834 + static void __exit cleanup_ipmi(void)
835 + {
836 + int count;
837 +
838 +- if (!initialized)
839 +- return;
840 +-
841 +- atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
842 +-
843 +- /*
844 +- * This can't be called if any interfaces exist, so no worry
845 +- * about shutting down the interfaces.
846 +- */
847 +-
848 +- /*
849 +- * Tell the timer to stop, then wait for it to stop. This
850 +- * avoids problems with race conditions removing the timer
851 +- * here.
852 +- */
853 +- atomic_inc(&stop_operation);
854 +- del_timer_sync(&ipmi_timer);
855 ++ if (initialized) {
856 ++ atomic_notifier_chain_unregister(&panic_notifier_list,
857 ++ &panic_block);
858 +
859 +- driver_unregister(&ipmidriver.driver);
860 +-
861 +- initialized = 0;
862 ++ /*
863 ++ * This can't be called if any interfaces exist, so no worry
864 ++ * about shutting down the interfaces.
865 ++ */
866 +
867 +- /* Check for buffer leaks. */
868 +- count = atomic_read(&smi_msg_inuse_count);
869 +- if (count != 0)
870 +- pr_warn(PFX "SMI message count %d at exit\n", count);
871 +- count = atomic_read(&recv_msg_inuse_count);
872 +- if (count != 0)
873 +- pr_warn(PFX "recv message count %d at exit\n", count);
874 ++ /*
875 ++ * Tell the timer to stop, then wait for it to stop. This
876 ++ * avoids problems with race conditions removing the timer
877 ++ * here.
878 ++ */
879 ++ atomic_inc(&stop_operation);
880 ++ del_timer_sync(&ipmi_timer);
881 ++
882 ++ initialized = false;
883 ++
884 ++ /* Check for buffer leaks. */
885 ++ count = atomic_read(&smi_msg_inuse_count);
886 ++ if (count != 0)
887 ++ pr_warn(PFX "SMI message count %d at exit\n", count);
888 ++ count = atomic_read(&recv_msg_inuse_count);
889 ++ if (count != 0)
890 ++ pr_warn(PFX "recv message count %d at exit\n", count);
891 ++ cleanup_srcu_struct(&ipmi_interfaces_srcu);
892 ++ }
893 ++ if (drvregistered)
894 ++ driver_unregister(&ipmidriver.driver);
895 + }
896 + module_exit(cleanup_ipmi);
897 +
898 +diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
899 +index 9b786726e426..76c2010ba672 100644
900 +--- a/drivers/char/ipmi/ipmi_ssif.c
901 ++++ b/drivers/char/ipmi/ipmi_ssif.c
902 +@@ -630,8 +630,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
903 +
904 + /* Remove the multi-part read marker. */
905 + len -= 2;
906 ++ data += 2;
907 + for (i = 0; i < len; i++)
908 +- ssif_info->data[i] = data[i+2];
909 ++ ssif_info->data[i] = data[i];
910 + ssif_info->multi_len = len;
911 + ssif_info->multi_pos = 1;
912 +
913 +@@ -659,8 +660,19 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
914 + }
915 +
916 + blocknum = data[0];
917 ++ len--;
918 ++ data++;
919 ++
920 ++ if (blocknum != 0xff && len != 31) {
921 ++ /* All blocks but the last must have 31 data bytes. */
922 ++ result = -EIO;
923 ++ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
924 ++ pr_info("Received middle message <31\n");
925 +
926 +- if (ssif_info->multi_len + len - 1 > IPMI_MAX_MSG_LENGTH) {
927 ++ goto continue_op;
928 ++ }
929 ++
930 ++ if (ssif_info->multi_len + len > IPMI_MAX_MSG_LENGTH) {
931 + /* Received message too big, abort the operation. */
932 + result = -E2BIG;
933 + if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
934 +@@ -669,16 +681,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
935 + goto continue_op;
936 + }
937 +
938 +- /* Remove the blocknum from the data. */
939 +- len--;
940 + for (i = 0; i < len; i++)
941 +- ssif_info->data[i + ssif_info->multi_len] = data[i + 1];
942 ++ ssif_info->data[i + ssif_info->multi_len] = data[i];
943 + ssif_info->multi_len += len;
944 + if (blocknum == 0xff) {
945 + /* End of read */
946 + len = ssif_info->multi_len;
947 + data = ssif_info->data;
948 +- } else if (blocknum + 1 != ssif_info->multi_pos) {
949 ++ } else if (blocknum != ssif_info->multi_pos) {
950 + /*
951 + * Out of sequence block, just abort. Block
952 + * numbers start at zero for the second block,
953 +@@ -706,6 +716,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
954 + }
955 + }
956 +
957 ++ continue_op:
958 + if (result < 0) {
959 + ssif_inc_stat(ssif_info, receive_errors);
960 + } else {
961 +@@ -713,8 +724,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
962 + ssif_inc_stat(ssif_info, received_message_parts);
963 + }
964 +
965 +-
966 +- continue_op:
967 + if (ssif_info->ssif_debug & SSIF_DEBUG_STATE)
968 + pr_info(PFX "DONE 1: state = %d, result=%d.\n",
969 + ssif_info->ssif_state, result);
970 +diff --git a/drivers/clk/imx/clk-busy.c b/drivers/clk/imx/clk-busy.c
971 +index 99036527eb0d..e695622c5aa5 100644
972 +--- a/drivers/clk/imx/clk-busy.c
973 ++++ b/drivers/clk/imx/clk-busy.c
974 +@@ -154,7 +154,7 @@ static const struct clk_ops clk_busy_mux_ops = {
975 +
976 + struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
977 + u8 width, void __iomem *busy_reg, u8 busy_shift,
978 +- const char **parent_names, int num_parents)
979 ++ const char * const *parent_names, int num_parents)
980 + {
981 + struct clk_busy_mux *busy;
982 + struct clk *clk;
983 +diff --git a/drivers/clk/imx/clk-fixup-mux.c b/drivers/clk/imx/clk-fixup-mux.c
984 +index c9b327e0a8dd..44817c1b0b88 100644
985 +--- a/drivers/clk/imx/clk-fixup-mux.c
986 ++++ b/drivers/clk/imx/clk-fixup-mux.c
987 +@@ -70,7 +70,7 @@ static const struct clk_ops clk_fixup_mux_ops = {
988 + };
989 +
990 + struct clk *imx_clk_fixup_mux(const char *name, void __iomem *reg,
991 +- u8 shift, u8 width, const char **parents,
992 ++ u8 shift, u8 width, const char * const *parents,
993 + int num_parents, void (*fixup)(u32 *val))
994 + {
995 + struct clk_fixup_mux *fixup_mux;
996 +diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
997 +index 8c7c2fcb8d94..c509324f6338 100644
998 +--- a/drivers/clk/imx/clk-imx6q.c
999 ++++ b/drivers/clk/imx/clk-imx6q.c
1000 +@@ -508,8 +508,12 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
1001 + * lvds1_gate and lvds2_gate are pseudo-gates. Both can be
1002 + * independently configured as clock inputs or outputs. We treat
1003 + * the "output_enable" bit as a gate, even though it's really just
1004 +- * enabling clock output.
1005 ++ * enabling clock output. Initially the gate bits are cleared, as
1006 ++ * otherwise the exclusive configuration gets locked in the setup done
1007 ++ * by software running before the clock driver, with no way to change
1008 ++ * it.
1009 + */
1010 ++ writel(readl(base + 0x160) & ~0x3c00, base + 0x160);
1011 + clk[IMX6QDL_CLK_LVDS1_GATE] = imx_clk_gate_exclusive("lvds1_gate", "lvds1_sel", base + 0x160, 10, BIT(12));
1012 + clk[IMX6QDL_CLK_LVDS2_GATE] = imx_clk_gate_exclusive("lvds2_gate", "lvds2_sel", base + 0x160, 11, BIT(13));
1013 +
1014 +diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
1015 +index 8076ec040f37..e65c1115d978 100644
1016 +--- a/drivers/clk/imx/clk.h
1017 ++++ b/drivers/clk/imx/clk.h
1018 +@@ -63,14 +63,14 @@ struct clk *imx_clk_busy_divider(const char *name, const char *parent_name,
1019 +
1020 + struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
1021 + u8 width, void __iomem *busy_reg, u8 busy_shift,
1022 +- const char **parent_names, int num_parents);
1023 ++ const char * const *parent_names, int num_parents);
1024 +
1025 + struct clk *imx_clk_fixup_divider(const char *name, const char *parent,
1026 + void __iomem *reg, u8 shift, u8 width,
1027 + void (*fixup)(u32 *val));
1028 +
1029 + struct clk *imx_clk_fixup_mux(const char *name, void __iomem *reg,
1030 +- u8 shift, u8 width, const char **parents,
1031 ++ u8 shift, u8 width, const char * const *parents,
1032 + int num_parents, void (*fixup)(u32 *val));
1033 +
1034 + static inline struct clk *imx_clk_fixed(const char *name, int rate)
1035 +@@ -79,7 +79,8 @@ static inline struct clk *imx_clk_fixed(const char *name, int rate)
1036 + }
1037 +
1038 + static inline struct clk *imx_clk_mux_ldb(const char *name, void __iomem *reg,
1039 +- u8 shift, u8 width, const char **parents, int num_parents)
1040 ++ u8 shift, u8 width, const char * const *parents,
1041 ++ int num_parents)
1042 + {
1043 + return clk_register_mux(NULL, name, parents, num_parents,
1044 + CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT, reg,
1045 +@@ -192,7 +193,8 @@ static inline struct clk *imx_clk_gate4(const char *name, const char *parent,
1046 + }
1047 +
1048 + static inline struct clk *imx_clk_mux(const char *name, void __iomem *reg,
1049 +- u8 shift, u8 width, const char **parents, int num_parents)
1050 ++ u8 shift, u8 width, const char * const *parents,
1051 ++ int num_parents)
1052 + {
1053 + return clk_register_mux(NULL, name, parents, num_parents,
1054 + CLK_SET_RATE_NO_REPARENT, reg, shift,
1055 +@@ -200,7 +202,8 @@ static inline struct clk *imx_clk_mux(const char *name, void __iomem *reg,
1056 + }
1057 +
1058 + static inline struct clk *imx_clk_mux2(const char *name, void __iomem *reg,
1059 +- u8 shift, u8 width, const char **parents, int num_parents)
1060 ++ u8 shift, u8 width, const char * const *parents,
1061 ++ int num_parents)
1062 + {
1063 + return clk_register_mux(NULL, name, parents, num_parents,
1064 + CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE,
1065 +@@ -208,8 +211,9 @@ static inline struct clk *imx_clk_mux2(const char *name, void __iomem *reg,
1066 + }
1067 +
1068 + static inline struct clk *imx_clk_mux_flags(const char *name,
1069 +- void __iomem *reg, u8 shift, u8 width, const char **parents,
1070 +- int num_parents, unsigned long flags)
1071 ++ void __iomem *reg, u8 shift, u8 width,
1072 ++ const char * const *parents, int num_parents,
1073 ++ unsigned long flags)
1074 + {
1075 + return clk_register_mux(NULL, name, parents, num_parents,
1076 + flags | CLK_SET_RATE_NO_REPARENT, reg, shift, width, 0,
1077 +diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
1078 +index 74697e145dde..50060e895e7a 100644
1079 +--- a/drivers/clk/meson/meson8b.c
1080 ++++ b/drivers/clk/meson/meson8b.c
1081 +@@ -568,13 +568,14 @@ static struct clk_fixed_factor meson8b_cpu_div3 = {
1082 + };
1083 +
1084 + static const struct clk_div_table cpu_scale_table[] = {
1085 +- { .val = 2, .div = 4 },
1086 +- { .val = 3, .div = 6 },
1087 +- { .val = 4, .div = 8 },
1088 +- { .val = 5, .div = 10 },
1089 +- { .val = 6, .div = 12 },
1090 +- { .val = 7, .div = 14 },
1091 +- { .val = 8, .div = 16 },
1092 ++ { .val = 1, .div = 4 },
1093 ++ { .val = 2, .div = 6 },
1094 ++ { .val = 3, .div = 8 },
1095 ++ { .val = 4, .div = 10 },
1096 ++ { .val = 5, .div = 12 },
1097 ++ { .val = 6, .div = 14 },
1098 ++ { .val = 7, .div = 16 },
1099 ++ { .val = 8, .div = 18 },
1100 + { /* sentinel */ },
1101 + };
1102 +
1103 +diff --git a/drivers/clocksource/timer-integrator-ap.c b/drivers/clocksource/timer-integrator-ap.c
1104 +index 62d24690ba02..9701107806a7 100644
1105 +--- a/drivers/clocksource/timer-integrator-ap.c
1106 ++++ b/drivers/clocksource/timer-integrator-ap.c
1107 +@@ -181,8 +181,7 @@ static int __init integrator_ap_timer_init_of(struct device_node *node)
1108 + int irq;
1109 + struct clk *clk;
1110 + unsigned long rate;
1111 +- struct device_node *pri_node;
1112 +- struct device_node *sec_node;
1113 ++ struct device_node *alias_node;
1114 +
1115 + base = of_io_request_and_map(node, 0, "integrator-timer");
1116 + if (IS_ERR(base))
1117 +@@ -204,7 +203,18 @@ static int __init integrator_ap_timer_init_of(struct device_node *node)
1118 + return err;
1119 + }
1120 +
1121 +- pri_node = of_find_node_by_path(path);
1122 ++ alias_node = of_find_node_by_path(path);
1123 ++
1124 ++ /*
1125 ++ * The pointer is used as an identifier not as a pointer, we
1126 ++ * can drop the refcount on the of__node immediately after
1127 ++ * getting it.
1128 ++ */
1129 ++ of_node_put(alias_node);
1130 ++
1131 ++ if (node == alias_node)
1132 ++ /* The primary timer lacks IRQ, use as clocksource */
1133 ++ return integrator_clocksource_init(rate, base);
1134 +
1135 + err = of_property_read_string(of_aliases,
1136 + "arm,timer-secondary", &path);
1137 +@@ -213,14 +223,11 @@ static int __init integrator_ap_timer_init_of(struct device_node *node)
1138 + return err;
1139 + }
1140 +
1141 ++ alias_node = of_find_node_by_path(path);
1142 +
1143 +- sec_node = of_find_node_by_path(path);
1144 +-
1145 +- if (node == pri_node)
1146 +- /* The primary timer lacks IRQ, use as clocksource */
1147 +- return integrator_clocksource_init(rate, base);
1148 ++ of_node_put(alias_node);
1149 +
1150 +- if (node == sec_node) {
1151 ++ if (node == alias_node) {
1152 + /* The secondary timer will drive the clock event */
1153 + irq = irq_of_parse_and_map(node, 0);
1154 + return integrator_clockevent_init(rate, base, irq);
1155 +diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
1156 +index 9e56bc411061..74c247972bb3 100644
1157 +--- a/drivers/cpuidle/cpuidle-pseries.c
1158 ++++ b/drivers/cpuidle/cpuidle-pseries.c
1159 +@@ -247,7 +247,13 @@ static int pseries_idle_probe(void)
1160 + return -ENODEV;
1161 +
1162 + if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
1163 +- if (lppaca_shared_proc(get_lppaca())) {
1164 ++ /*
1165 ++ * Use local_paca instead of get_lppaca() since
1166 ++ * preemption is not disabled, and it is not required in
1167 ++ * fact, since lppaca_ptr does not need to be the value
1168 ++ * associated to the current CPU, it can be from any CPU.
1169 ++ */
1170 ++ if (lppaca_shared_proc(local_paca->lppaca_ptr)) {
1171 + cpuidle_state_table = shared_states;
1172 + max_idle_state = ARRAY_SIZE(shared_states);
1173 + } else {
1174 +diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
1175 +index c51627660dbb..d9845099635e 100644
1176 +--- a/drivers/firmware/efi/libstub/Makefile
1177 ++++ b/drivers/firmware/efi/libstub/Makefile
1178 +@@ -9,7 +9,10 @@ cflags-$(CONFIG_X86_32) := -march=i386
1179 + cflags-$(CONFIG_X86_64) := -mcmodel=small
1180 + cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -O2 \
1181 + -fPIC -fno-strict-aliasing -mno-red-zone \
1182 +- -mno-mmx -mno-sse -fshort-wchar
1183 ++ -mno-mmx -mno-sse -fshort-wchar \
1184 ++ -Wno-pointer-sign \
1185 ++ $(call cc-disable-warning, address-of-packed-member) \
1186 ++ $(call cc-disable-warning, gnu)
1187 +
1188 + # arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
1189 + # disable the stackleak plugin
1190 +diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
1191 +index 7fa793672a7a..68e4b2b98c8f 100644
1192 +--- a/drivers/fpga/altera-cvp.c
1193 ++++ b/drivers/fpga/altera-cvp.c
1194 +@@ -468,14 +468,6 @@ static int altera_cvp_probe(struct pci_dev *pdev,
1195 + goto err_unmap;
1196 + }
1197 +
1198 +- ret = driver_create_file(&altera_cvp_driver.driver,
1199 +- &driver_attr_chkcfg);
1200 +- if (ret) {
1201 +- dev_err(&pdev->dev, "Can't create sysfs chkcfg file\n");
1202 +- fpga_mgr_unregister(mgr);
1203 +- goto err_unmap;
1204 +- }
1205 +-
1206 + return 0;
1207 +
1208 + err_unmap:
1209 +@@ -493,7 +485,6 @@ static void altera_cvp_remove(struct pci_dev *pdev)
1210 + struct altera_cvp_conf *conf = mgr->priv;
1211 + u16 cmd;
1212 +
1213 +- driver_remove_file(&altera_cvp_driver.driver, &driver_attr_chkcfg);
1214 + fpga_mgr_unregister(mgr);
1215 + pci_iounmap(pdev, conf->map);
1216 + pci_release_region(pdev, CVP_BAR);
1217 +@@ -502,7 +493,30 @@ static void altera_cvp_remove(struct pci_dev *pdev)
1218 + pci_write_config_word(pdev, PCI_COMMAND, cmd);
1219 + }
1220 +
1221 +-module_pci_driver(altera_cvp_driver);
1222 ++static int __init altera_cvp_init(void)
1223 ++{
1224 ++ int ret;
1225 ++
1226 ++ ret = pci_register_driver(&altera_cvp_driver);
1227 ++ if (ret)
1228 ++ return ret;
1229 ++
1230 ++ ret = driver_create_file(&altera_cvp_driver.driver,
1231 ++ &driver_attr_chkcfg);
1232 ++ if (ret)
1233 ++ pr_warn("Can't create sysfs chkcfg file\n");
1234 ++
1235 ++ return 0;
1236 ++}
1237 ++
1238 ++static void __exit altera_cvp_exit(void)
1239 ++{
1240 ++ driver_remove_file(&altera_cvp_driver.driver, &driver_attr_chkcfg);
1241 ++ pci_unregister_driver(&altera_cvp_driver);
1242 ++}
1243 ++
1244 ++module_init(altera_cvp_init);
1245 ++module_exit(altera_cvp_exit);
1246 +
1247 + MODULE_LICENSE("GPL v2");
1248 + MODULE_AUTHOR("Anatolij Gustschin <agust@××××.de>");
1249 +diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
1250 +index 2afd9de84a0d..dc42571e6fdc 100644
1251 +--- a/drivers/gpio/gpio-pl061.c
1252 ++++ b/drivers/gpio/gpio-pl061.c
1253 +@@ -54,6 +54,7 @@ struct pl061 {
1254 +
1255 + void __iomem *base;
1256 + struct gpio_chip gc;
1257 ++ struct irq_chip irq_chip;
1258 + int parent_irq;
1259 +
1260 + #ifdef CONFIG_PM
1261 +@@ -281,15 +282,6 @@ static int pl061_irq_set_wake(struct irq_data *d, unsigned int state)
1262 + return irq_set_irq_wake(pl061->parent_irq, state);
1263 + }
1264 +
1265 +-static struct irq_chip pl061_irqchip = {
1266 +- .name = "pl061",
1267 +- .irq_ack = pl061_irq_ack,
1268 +- .irq_mask = pl061_irq_mask,
1269 +- .irq_unmask = pl061_irq_unmask,
1270 +- .irq_set_type = pl061_irq_type,
1271 +- .irq_set_wake = pl061_irq_set_wake,
1272 +-};
1273 +-
1274 + static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
1275 + {
1276 + struct device *dev = &adev->dev;
1277 +@@ -328,6 +320,13 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
1278 + /*
1279 + * irq_chip support
1280 + */
1281 ++ pl061->irq_chip.name = dev_name(dev);
1282 ++ pl061->irq_chip.irq_ack = pl061_irq_ack;
1283 ++ pl061->irq_chip.irq_mask = pl061_irq_mask;
1284 ++ pl061->irq_chip.irq_unmask = pl061_irq_unmask;
1285 ++ pl061->irq_chip.irq_set_type = pl061_irq_type;
1286 ++ pl061->irq_chip.irq_set_wake = pl061_irq_set_wake;
1287 ++
1288 + writeb(0, pl061->base + GPIOIE); /* disable irqs */
1289 + irq = adev->irq[0];
1290 + if (irq < 0) {
1291 +@@ -336,14 +335,14 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
1292 + }
1293 + pl061->parent_irq = irq;
1294 +
1295 +- ret = gpiochip_irqchip_add(&pl061->gc, &pl061_irqchip,
1296 ++ ret = gpiochip_irqchip_add(&pl061->gc, &pl061->irq_chip,
1297 + 0, handle_bad_irq,
1298 + IRQ_TYPE_NONE);
1299 + if (ret) {
1300 + dev_info(&adev->dev, "could not add irqchip\n");
1301 + return ret;
1302 + }
1303 +- gpiochip_set_chained_irqchip(&pl061->gc, &pl061_irqchip,
1304 ++ gpiochip_set_chained_irqchip(&pl061->gc, &pl061->irq_chip,
1305 + irq, pl061_irq_handler);
1306 +
1307 + amba_set_drvdata(adev, pl061);
1308 +diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
1309 +index 8a926d1df939..2b4199adcd94 100644
1310 +--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
1311 ++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
1312 +@@ -116,16 +116,16 @@ static int uvd_v4_2_sw_init(void *handle)
1313 + if (r)
1314 + return r;
1315 +
1316 +- r = amdgpu_uvd_resume(adev);
1317 +- if (r)
1318 +- return r;
1319 +-
1320 + ring = &adev->uvd.inst->ring;
1321 + sprintf(ring->name, "uvd");
1322 + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
1323 + if (r)
1324 + return r;
1325 +
1326 ++ r = amdgpu_uvd_resume(adev);
1327 ++ if (r)
1328 ++ return r;
1329 ++
1330 + r = amdgpu_uvd_entity_init(adev);
1331 +
1332 + return r;
1333 +diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
1334 +index 50248059412e..88c006c5ee2c 100644
1335 +--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
1336 ++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
1337 +@@ -113,16 +113,16 @@ static int uvd_v5_0_sw_init(void *handle)
1338 + if (r)
1339 + return r;
1340 +
1341 +- r = amdgpu_uvd_resume(adev);
1342 +- if (r)
1343 +- return r;
1344 +-
1345 + ring = &adev->uvd.inst->ring;
1346 + sprintf(ring->name, "uvd");
1347 + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
1348 + if (r)
1349 + return r;
1350 +
1351 ++ r = amdgpu_uvd_resume(adev);
1352 ++ if (r)
1353 ++ return r;
1354 ++
1355 + r = amdgpu_uvd_entity_init(adev);
1356 +
1357 + return r;
1358 +diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
1359 +index 6ae82cc2e55e..d4070839ac80 100644
1360 +--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
1361 ++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
1362 +@@ -420,16 +420,16 @@ static int uvd_v6_0_sw_init(void *handle)
1363 + DRM_INFO("UVD ENC is disabled\n");
1364 + }
1365 +
1366 +- r = amdgpu_uvd_resume(adev);
1367 +- if (r)
1368 +- return r;
1369 +-
1370 + ring = &adev->uvd.inst->ring;
1371 + sprintf(ring->name, "uvd");
1372 + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
1373 + if (r)
1374 + return r;
1375 +
1376 ++ r = amdgpu_uvd_resume(adev);
1377 ++ if (r)
1378 ++ return r;
1379 ++
1380 + if (uvd_v6_0_enc_support(adev)) {
1381 + for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
1382 + ring = &adev->uvd.inst->ring_enc[i];
1383 +diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
1384 +index 9b7f8469bc5c..057151b17b45 100644
1385 +--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
1386 ++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
1387 +@@ -444,10 +444,6 @@ static int uvd_v7_0_sw_init(void *handle)
1388 + DRM_INFO("PSP loading UVD firmware\n");
1389 + }
1390 +
1391 +- r = amdgpu_uvd_resume(adev);
1392 +- if (r)
1393 +- return r;
1394 +-
1395 + for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
1396 + if (adev->uvd.harvest_config & (1 << j))
1397 + continue;
1398 +@@ -479,6 +475,10 @@ static int uvd_v7_0_sw_init(void *handle)
1399 + }
1400 + }
1401 +
1402 ++ r = amdgpu_uvd_resume(adev);
1403 ++ if (r)
1404 ++ return r;
1405 ++
1406 + r = amdgpu_uvd_entity_init(adev);
1407 + if (r)
1408 + return r;
1409 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
1410 +index 1427675d0e5a..5aba50f63ac6 100644
1411 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
1412 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
1413 +@@ -661,6 +661,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
1414 + {
1415 + uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE];
1416 + bool is_patched = false;
1417 ++ unsigned long flags;
1418 +
1419 + if (!kfd->init_complete)
1420 + return;
1421 +@@ -670,7 +671,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
1422 + return;
1423 + }
1424 +
1425 +- spin_lock(&kfd->interrupt_lock);
1426 ++ spin_lock_irqsave(&kfd->interrupt_lock, flags);
1427 +
1428 + if (kfd->interrupts_active
1429 + && interrupt_is_wanted(kfd, ih_ring_entry,
1430 +@@ -679,7 +680,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
1431 + is_patched ? patched_ihre : ih_ring_entry))
1432 + queue_work(kfd->ih_wq, &kfd->interrupt_work);
1433 +
1434 +- spin_unlock(&kfd->interrupt_lock);
1435 ++ spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
1436 + }
1437 +
1438 + int kgd2kfd_quiesce_mm(struct mm_struct *mm)
1439 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
1440 +index 9bfb040352e9..6a6d977ddd7a 100644
1441 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
1442 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
1443 +@@ -60,6 +60,11 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name,
1444 + return -EINVAL;
1445 + }
1446 +
1447 ++ if (!stream_state) {
1448 ++ DRM_ERROR("No stream state for CRTC%d\n", crtc->index);
1449 ++ return -EINVAL;
1450 ++ }
1451 ++
1452 + /* When enabling CRC, we should also disable dithering. */
1453 + if (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO) {
1454 + if (dc_stream_configure_crc(stream_state->ctx->dc,
1455 +diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
1456 +index 1bb4c318bdd4..f77bff5aa307 100644
1457 +--- a/drivers/gpu/drm/drm_atomic_helper.c
1458 ++++ b/drivers/gpu/drm/drm_atomic_helper.c
1459 +@@ -1425,6 +1425,9 @@ void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
1460 + DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
1461 + crtc->base.id, crtc->name);
1462 + }
1463 ++
1464 ++ if (old_state->fake_commit)
1465 ++ complete_all(&old_state->fake_commit->flip_done);
1466 + }
1467 + EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
1468 +
1469 +diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
1470 +index 9973ac893635..3db232429630 100644
1471 +--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
1472 ++++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
1473 +@@ -334,13 +334,16 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
1474 +
1475 + usnic_dbg("\n");
1476 +
1477 +- mutex_lock(&us_ibdev->usdev_lock);
1478 + if (ib_get_eth_speed(ibdev, port, &props->active_speed,
1479 +- &props->active_width)) {
1480 +- mutex_unlock(&us_ibdev->usdev_lock);
1481 ++ &props->active_width))
1482 + return -EINVAL;
1483 +- }
1484 +
1485 ++ /*
1486 ++ * usdev_lock is acquired after (and not before) ib_get_eth_speed call
1487 ++ * because acquiring rtnl_lock in ib_get_eth_speed, while holding
1488 ++ * usdev_lock could lead to a deadlock.
1489 ++ */
1490 ++ mutex_lock(&us_ibdev->usdev_lock);
1491 + /* props being zeroed by the caller, avoid zeroing it here */
1492 +
1493 + props->lid = 0;
1494 +diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
1495 +index 8be27238a86e..fa98a5279647 100644
1496 +--- a/drivers/infiniband/sw/rxe/rxe_req.c
1497 ++++ b/drivers/infiniband/sw/rxe/rxe_req.c
1498 +@@ -640,6 +640,7 @@ next_wqe:
1499 + rmr->access = wqe->wr.wr.reg.access;
1500 + rmr->lkey = wqe->wr.wr.reg.key;
1501 + rmr->rkey = wqe->wr.wr.reg.key;
1502 ++ rmr->iova = wqe->wr.wr.reg.mr->iova;
1503 + wqe->state = wqe_state_done;
1504 + wqe->status = IB_WC_SUCCESS;
1505 + } else {
1506 +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
1507 +index 0481223b1deb..f2ec882f96be 100644
1508 +--- a/drivers/md/dm-crypt.c
1509 ++++ b/drivers/md/dm-crypt.c
1510 +@@ -49,7 +49,7 @@ struct convert_context {
1511 + struct bio *bio_out;
1512 + struct bvec_iter iter_in;
1513 + struct bvec_iter iter_out;
1514 +- sector_t cc_sector;
1515 ++ u64 cc_sector;
1516 + atomic_t cc_pending;
1517 + union {
1518 + struct skcipher_request *req;
1519 +@@ -81,7 +81,7 @@ struct dm_crypt_request {
1520 + struct convert_context *ctx;
1521 + struct scatterlist sg_in[4];
1522 + struct scatterlist sg_out[4];
1523 +- sector_t iv_sector;
1524 ++ u64 iv_sector;
1525 + };
1526 +
1527 + struct crypt_config;
1528 +@@ -160,7 +160,7 @@ struct crypt_config {
1529 + struct iv_lmk_private lmk;
1530 + struct iv_tcw_private tcw;
1531 + } iv_gen_private;
1532 +- sector_t iv_offset;
1533 ++ u64 iv_offset;
1534 + unsigned int iv_size;
1535 + unsigned short int sector_size;
1536 + unsigned char sector_shift;
1537 +@@ -2780,7 +2780,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1538 + }
1539 +
1540 + ret = -EINVAL;
1541 +- if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
1542 ++ if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
1543 + ti->error = "Invalid device sector";
1544 + goto bad;
1545 + }
1546 +diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
1547 +index 2fb7bb4304ad..fddffe251bf6 100644
1548 +--- a/drivers/md/dm-delay.c
1549 ++++ b/drivers/md/dm-delay.c
1550 +@@ -141,7 +141,7 @@ static int delay_class_ctr(struct dm_target *ti, struct delay_class *c, char **a
1551 + unsigned long long tmpll;
1552 + char dummy;
1553 +
1554 +- if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1) {
1555 ++ if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
1556 + ti->error = "Invalid device sector";
1557 + return -EINVAL;
1558 + }
1559 +diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
1560 +index 32aabe27b37c..b86d2439ffc7 100644
1561 +--- a/drivers/md/dm-flakey.c
1562 ++++ b/drivers/md/dm-flakey.c
1563 +@@ -213,7 +213,7 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1564 + devname = dm_shift_arg(&as);
1565 +
1566 + r = -EINVAL;
1567 +- if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1) {
1568 ++ if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
1569 + ti->error = "Invalid device sector";
1570 + goto bad;
1571 + }
1572 +diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
1573 +index 2fc4213e02b5..671c24332802 100644
1574 +--- a/drivers/md/dm-kcopyd.c
1575 ++++ b/drivers/md/dm-kcopyd.c
1576 +@@ -56,15 +56,17 @@ struct dm_kcopyd_client {
1577 + atomic_t nr_jobs;
1578 +
1579 + /*
1580 +- * We maintain three lists of jobs:
1581 ++ * We maintain four lists of jobs:
1582 + *
1583 + * i) jobs waiting for pages
1584 + * ii) jobs that have pages, and are waiting for the io to be issued.
1585 +- * iii) jobs that have completed.
1586 ++ * iii) jobs that don't need to do any IO and just run a callback
1587 ++ * iv) jobs that have completed.
1588 + *
1589 +- * All three of these are protected by job_lock.
1590 ++ * All four of these are protected by job_lock.
1591 + */
1592 + spinlock_t job_lock;
1593 ++ struct list_head callback_jobs;
1594 + struct list_head complete_jobs;
1595 + struct list_head io_jobs;
1596 + struct list_head pages_jobs;
1597 +@@ -625,6 +627,7 @@ static void do_work(struct work_struct *work)
1598 + struct dm_kcopyd_client *kc = container_of(work,
1599 + struct dm_kcopyd_client, kcopyd_work);
1600 + struct blk_plug plug;
1601 ++ unsigned long flags;
1602 +
1603 + /*
1604 + * The order that these are called is *very* important.
1605 +@@ -633,6 +636,10 @@ static void do_work(struct work_struct *work)
1606 + * list. io jobs call wake when they complete and it all
1607 + * starts again.
1608 + */
1609 ++ spin_lock_irqsave(&kc->job_lock, flags);
1610 ++ list_splice_tail_init(&kc->callback_jobs, &kc->complete_jobs);
1611 ++ spin_unlock_irqrestore(&kc->job_lock, flags);
1612 ++
1613 + blk_start_plug(&plug);
1614 + process_jobs(&kc->complete_jobs, kc, run_complete_job);
1615 + process_jobs(&kc->pages_jobs, kc, run_pages_job);
1616 +@@ -650,7 +657,7 @@ static void dispatch_job(struct kcopyd_job *job)
1617 + struct dm_kcopyd_client *kc = job->kc;
1618 + atomic_inc(&kc->nr_jobs);
1619 + if (unlikely(!job->source.count))
1620 +- push(&kc->complete_jobs, job);
1621 ++ push(&kc->callback_jobs, job);
1622 + else if (job->pages == &zero_page_list)
1623 + push(&kc->io_jobs, job);
1624 + else
1625 +@@ -858,7 +865,7 @@ void dm_kcopyd_do_callback(void *j, int read_err, unsigned long write_err)
1626 + job->read_err = read_err;
1627 + job->write_err = write_err;
1628 +
1629 +- push(&kc->complete_jobs, job);
1630 ++ push(&kc->callback_jobs, job);
1631 + wake(kc);
1632 + }
1633 + EXPORT_SYMBOL(dm_kcopyd_do_callback);
1634 +@@ -888,6 +895,7 @@ struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *thro
1635 + return ERR_PTR(-ENOMEM);
1636 +
1637 + spin_lock_init(&kc->job_lock);
1638 ++ INIT_LIST_HEAD(&kc->callback_jobs);
1639 + INIT_LIST_HEAD(&kc->complete_jobs);
1640 + INIT_LIST_HEAD(&kc->io_jobs);
1641 + INIT_LIST_HEAD(&kc->pages_jobs);
1642 +@@ -939,6 +947,7 @@ void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
1643 + /* Wait for completion of all jobs submitted by this client. */
1644 + wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
1645 +
1646 ++ BUG_ON(!list_empty(&kc->callback_jobs));
1647 + BUG_ON(!list_empty(&kc->complete_jobs));
1648 + BUG_ON(!list_empty(&kc->io_jobs));
1649 + BUG_ON(!list_empty(&kc->pages_jobs));
1650 +diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
1651 +index 2f7c44a006c4..caa08c4b84cd 100644
1652 +--- a/drivers/md/dm-linear.c
1653 ++++ b/drivers/md/dm-linear.c
1654 +@@ -45,7 +45,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1655 + }
1656 +
1657 + ret = -EINVAL;
1658 +- if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1) {
1659 ++ if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 || tmp != (sector_t)tmp) {
1660 + ti->error = "Invalid device sector";
1661 + goto bad;
1662 + }
1663 +diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
1664 +index 79eab1071ec2..5a51151f680d 100644
1665 +--- a/drivers/md/dm-raid1.c
1666 ++++ b/drivers/md/dm-raid1.c
1667 +@@ -943,7 +943,8 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
1668 + char dummy;
1669 + int ret;
1670 +
1671 +- if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1) {
1672 ++ if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1 ||
1673 ++ offset != (sector_t)offset) {
1674 + ti->error = "Invalid offset";
1675 + return -EINVAL;
1676 + }
1677 +diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
1678 +index ae4b33d10924..36805b12661e 100644
1679 +--- a/drivers/md/dm-snap.c
1680 ++++ b/drivers/md/dm-snap.c
1681 +@@ -19,6 +19,7 @@
1682 + #include <linux/vmalloc.h>
1683 + #include <linux/log2.h>
1684 + #include <linux/dm-kcopyd.h>
1685 ++#include <linux/semaphore.h>
1686 +
1687 + #include "dm.h"
1688 +
1689 +@@ -105,6 +106,9 @@ struct dm_snapshot {
1690 + /* The on disk metadata handler */
1691 + struct dm_exception_store *store;
1692 +
1693 ++ /* Maximum number of in-flight COW jobs. */
1694 ++ struct semaphore cow_count;
1695 ++
1696 + struct dm_kcopyd_client *kcopyd_client;
1697 +
1698 + /* Wait for events based on state_bits */
1699 +@@ -145,6 +149,19 @@ struct dm_snapshot {
1700 + #define RUNNING_MERGE 0
1701 + #define SHUTDOWN_MERGE 1
1702 +
1703 ++/*
1704 ++ * Maximum number of chunks being copied on write.
1705 ++ *
1706 ++ * The value was decided experimentally as a trade-off between memory
1707 ++ * consumption, stalling the kernel's workqueues and maintaining a high enough
1708 ++ * throughput.
1709 ++ */
1710 ++#define DEFAULT_COW_THRESHOLD 2048
1711 ++
1712 ++static int cow_threshold = DEFAULT_COW_THRESHOLD;
1713 ++module_param_named(snapshot_cow_threshold, cow_threshold, int, 0644);
1714 ++MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write");
1715 ++
1716 + DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
1717 + "A percentage of time allocated for copy on write");
1718 +
1719 +@@ -1190,6 +1207,8 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1720 + goto bad_hash_tables;
1721 + }
1722 +
1723 ++ sema_init(&s->cow_count, (cow_threshold > 0) ? cow_threshold : INT_MAX);
1724 ++
1725 + s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
1726 + if (IS_ERR(s->kcopyd_client)) {
1727 + r = PTR_ERR(s->kcopyd_client);
1728 +@@ -1575,6 +1594,7 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
1729 + rb_link_node(&pe->out_of_order_node, parent, p);
1730 + rb_insert_color(&pe->out_of_order_node, &s->out_of_order_tree);
1731 + }
1732 ++ up(&s->cow_count);
1733 + }
1734 +
1735 + /*
1736 +@@ -1598,6 +1618,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
1737 + dest.count = src.count;
1738 +
1739 + /* Hand over to kcopyd */
1740 ++ down(&s->cow_count);
1741 + dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
1742 + }
1743 +
1744 +@@ -1617,6 +1638,7 @@ static void start_full_bio(struct dm_snap_pending_exception *pe,
1745 + pe->full_bio = bio;
1746 + pe->full_bio_end_io = bio->bi_end_io;
1747 +
1748 ++ down(&s->cow_count);
1749 + callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
1750 + copy_callback, pe);
1751 +
1752 +diff --git a/drivers/md/dm-unstripe.c b/drivers/md/dm-unstripe.c
1753 +index 954b7ab4e684..e673dacf6418 100644
1754 +--- a/drivers/md/dm-unstripe.c
1755 ++++ b/drivers/md/dm-unstripe.c
1756 +@@ -78,7 +78,7 @@ static int unstripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1757 + goto err;
1758 + }
1759 +
1760 +- if (sscanf(argv[4], "%llu%c", &start, &dummy) != 1) {
1761 ++ if (sscanf(argv[4], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
1762 + ti->error = "Invalid striped device offset";
1763 + goto err;
1764 + }
1765 +diff --git a/drivers/media/firewire/firedtv-avc.c b/drivers/media/firewire/firedtv-avc.c
1766 +index 1c933b2cf760..3ef5df1648d7 100644
1767 +--- a/drivers/media/firewire/firedtv-avc.c
1768 ++++ b/drivers/media/firewire/firedtv-avc.c
1769 +@@ -968,7 +968,8 @@ static int get_ca_object_length(struct avc_response_frame *r)
1770 + return r->operand[7];
1771 + }
1772 +
1773 +-int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
1774 ++int avc_ca_app_info(struct firedtv *fdtv, unsigned char *app_info,
1775 ++ unsigned int *len)
1776 + {
1777 + struct avc_command_frame *c = (void *)fdtv->avc_data;
1778 + struct avc_response_frame *r = (void *)fdtv->avc_data;
1779 +@@ -1009,7 +1010,8 @@ out:
1780 + return ret;
1781 + }
1782 +
1783 +-int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
1784 ++int avc_ca_info(struct firedtv *fdtv, unsigned char *app_info,
1785 ++ unsigned int *len)
1786 + {
1787 + struct avc_command_frame *c = (void *)fdtv->avc_data;
1788 + struct avc_response_frame *r = (void *)fdtv->avc_data;
1789 +diff --git a/drivers/media/firewire/firedtv.h b/drivers/media/firewire/firedtv.h
1790 +index 876cdec8329b..009905a19947 100644
1791 +--- a/drivers/media/firewire/firedtv.h
1792 ++++ b/drivers/media/firewire/firedtv.h
1793 +@@ -124,8 +124,10 @@ int avc_lnb_control(struct firedtv *fdtv, char voltage, char burst,
1794 + struct dvb_diseqc_master_cmd *diseqcmd);
1795 + void avc_remote_ctrl_work(struct work_struct *work);
1796 + int avc_register_remote_control(struct firedtv *fdtv);
1797 +-int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len);
1798 +-int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len);
1799 ++int avc_ca_app_info(struct firedtv *fdtv, unsigned char *app_info,
1800 ++ unsigned int *len);
1801 ++int avc_ca_info(struct firedtv *fdtv, unsigned char *app_info,
1802 ++ unsigned int *len);
1803 + int avc_ca_reset(struct firedtv *fdtv);
1804 + int avc_ca_pmt(struct firedtv *fdtv, char *app_info, int length);
1805 + int avc_ca_get_time_date(struct firedtv *fdtv, int *interval);
1806 +diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
1807 +index bb6add9d340e..5b8350e87e75 100644
1808 +--- a/drivers/media/platform/qcom/venus/core.c
1809 ++++ b/drivers/media/platform/qcom/venus/core.c
1810 +@@ -264,6 +264,14 @@ static int venus_probe(struct platform_device *pdev)
1811 + if (ret)
1812 + return ret;
1813 +
1814 ++ if (!dev->dma_parms) {
1815 ++ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
1816 ++ GFP_KERNEL);
1817 ++ if (!dev->dma_parms)
1818 ++ return -ENOMEM;
1819 ++ }
1820 ++ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
1821 ++
1822 + INIT_LIST_HEAD(&core->instances);
1823 + mutex_init(&core->lock);
1824 + INIT_DELAYED_WORK(&core->work, venus_sys_error_handler);
1825 +diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
1826 +index d46dc432456c..361abbc00486 100644
1827 +--- a/drivers/media/usb/uvc/uvc_driver.c
1828 ++++ b/drivers/media/usb/uvc/uvc_driver.c
1829 +@@ -1824,11 +1824,7 @@ static void uvc_delete(struct kref *kref)
1830 + usb_put_intf(dev->intf);
1831 + usb_put_dev(dev->udev);
1832 +
1833 +- if (dev->vdev.dev)
1834 +- v4l2_device_unregister(&dev->vdev);
1835 + #ifdef CONFIG_MEDIA_CONTROLLER
1836 +- if (media_devnode_is_registered(dev->mdev.devnode))
1837 +- media_device_unregister(&dev->mdev);
1838 + media_device_cleanup(&dev->mdev);
1839 + #endif
1840 +
1841 +@@ -1885,6 +1881,15 @@ static void uvc_unregister_video(struct uvc_device *dev)
1842 +
1843 + uvc_debugfs_cleanup_stream(stream);
1844 + }
1845 ++
1846 ++ uvc_status_unregister(dev);
1847 ++
1848 ++ if (dev->vdev.dev)
1849 ++ v4l2_device_unregister(&dev->vdev);
1850 ++#ifdef CONFIG_MEDIA_CONTROLLER
1851 ++ if (media_devnode_is_registered(dev->mdev.devnode))
1852 ++ media_device_unregister(&dev->mdev);
1853 ++#endif
1854 + }
1855 +
1856 + int uvc_register_video_device(struct uvc_device *dev,
1857 +diff --git a/drivers/media/usb/uvc/uvc_status.c b/drivers/media/usb/uvc/uvc_status.c
1858 +index 0722dc684378..883e4cab45e7 100644
1859 +--- a/drivers/media/usb/uvc/uvc_status.c
1860 ++++ b/drivers/media/usb/uvc/uvc_status.c
1861 +@@ -54,7 +54,7 @@ error:
1862 + return ret;
1863 + }
1864 +
1865 +-static void uvc_input_cleanup(struct uvc_device *dev)
1866 ++static void uvc_input_unregister(struct uvc_device *dev)
1867 + {
1868 + if (dev->input)
1869 + input_unregister_device(dev->input);
1870 +@@ -71,7 +71,7 @@ static void uvc_input_report_key(struct uvc_device *dev, unsigned int code,
1871 +
1872 + #else
1873 + #define uvc_input_init(dev)
1874 +-#define uvc_input_cleanup(dev)
1875 ++#define uvc_input_unregister(dev)
1876 + #define uvc_input_report_key(dev, code, value)
1877 + #endif /* CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV */
1878 +
1879 +@@ -292,12 +292,16 @@ int uvc_status_init(struct uvc_device *dev)
1880 + return 0;
1881 + }
1882 +
1883 +-void uvc_status_cleanup(struct uvc_device *dev)
1884 ++void uvc_status_unregister(struct uvc_device *dev)
1885 + {
1886 + usb_kill_urb(dev->int_urb);
1887 ++ uvc_input_unregister(dev);
1888 ++}
1889 ++
1890 ++void uvc_status_cleanup(struct uvc_device *dev)
1891 ++{
1892 + usb_free_urb(dev->int_urb);
1893 + kfree(dev->status);
1894 +- uvc_input_cleanup(dev);
1895 + }
1896 +
1897 + int uvc_status_start(struct uvc_device *dev, gfp_t flags)
1898 +diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
1899 +index e5f5d84f1d1d..a738486fd9d6 100644
1900 +--- a/drivers/media/usb/uvc/uvcvideo.h
1901 ++++ b/drivers/media/usb/uvc/uvcvideo.h
1902 +@@ -750,6 +750,7 @@ int uvc_register_video_device(struct uvc_device *dev,
1903 +
1904 + /* Status */
1905 + int uvc_status_init(struct uvc_device *dev);
1906 ++void uvc_status_unregister(struct uvc_device *dev);
1907 + void uvc_status_cleanup(struct uvc_device *dev);
1908 + int uvc_status_start(struct uvc_device *dev, gfp_t flags);
1909 + void uvc_status_stop(struct uvc_device *dev);
1910 +diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
1911 +index be53044086c7..fbc56ee99682 100644
1912 +--- a/drivers/mmc/host/atmel-mci.c
1913 ++++ b/drivers/mmc/host/atmel-mci.c
1914 +@@ -1954,13 +1954,14 @@ static void atmci_tasklet_func(unsigned long priv)
1915 + }
1916 +
1917 + atmci_request_end(host, host->mrq);
1918 +- state = STATE_IDLE;
1919 ++ goto unlock; /* atmci_request_end() sets host->state */
1920 + break;
1921 + }
1922 + } while (state != prev_state);
1923 +
1924 + host->state = state;
1925 +
1926 ++unlock:
1927 + spin_unlock(&host->lock);
1928 + }
1929 +
1930 +diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
1931 +index 8da3d39e3218..258918d8a416 100644
1932 +--- a/drivers/net/dsa/mv88e6xxx/chip.c
1933 ++++ b/drivers/net/dsa/mv88e6xxx/chip.c
1934 +@@ -2391,6 +2391,107 @@ static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip)
1935 + return mv88e6xxx_g1_stats_clear(chip);
1936 + }
1937 +
1938 ++/* The mv88e6390 has some hidden registers used for debug and
1939 ++ * development. The errata also makes use of them.
1940 ++ */
1941 ++static int mv88e6390_hidden_write(struct mv88e6xxx_chip *chip, int port,
1942 ++ int reg, u16 val)
1943 ++{
1944 ++ u16 ctrl;
1945 ++ int err;
1946 ++
1947 ++ err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_DATA_PORT,
1948 ++ PORT_RESERVED_1A, val);
1949 ++ if (err)
1950 ++ return err;
1951 ++
1952 ++ ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_WRITE |
1953 ++ PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT |
1954 ++ reg;
1955 ++
1956 ++ return mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT,
1957 ++ PORT_RESERVED_1A, ctrl);
1958 ++}
1959 ++
1960 ++static int mv88e6390_hidden_wait(struct mv88e6xxx_chip *chip)
1961 ++{
1962 ++ return mv88e6xxx_wait(chip, PORT_RESERVED_1A_CTRL_PORT,
1963 ++ PORT_RESERVED_1A, PORT_RESERVED_1A_BUSY);
1964 ++}
1965 ++
1966 ++
1967 ++static int mv88e6390_hidden_read(struct mv88e6xxx_chip *chip, int port,
1968 ++ int reg, u16 *val)
1969 ++{
1970 ++ u16 ctrl;
1971 ++ int err;
1972 ++
1973 ++ ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_READ |
1974 ++ PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT |
1975 ++ reg;
1976 ++
1977 ++ err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT,
1978 ++ PORT_RESERVED_1A, ctrl);
1979 ++ if (err)
1980 ++ return err;
1981 ++
1982 ++ err = mv88e6390_hidden_wait(chip);
1983 ++ if (err)
1984 ++ return err;
1985 ++
1986 ++ return mv88e6xxx_port_read(chip, PORT_RESERVED_1A_DATA_PORT,
1987 ++ PORT_RESERVED_1A, val);
1988 ++}
1989 ++
1990 ++/* Check if the errata has already been applied. */
1991 ++static bool mv88e6390_setup_errata_applied(struct mv88e6xxx_chip *chip)
1992 ++{
1993 ++ int port;
1994 ++ int err;
1995 ++ u16 val;
1996 ++
1997 ++ for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
1998 ++ err = mv88e6390_hidden_read(chip, port, 0, &val);
1999 ++ if (err) {
2000 ++ dev_err(chip->dev,
2001 ++ "Error reading hidden register: %d\n", err);
2002 ++ return false;
2003 ++ }
2004 ++ if (val != 0x01c0)
2005 ++ return false;
2006 ++ }
2007 ++
2008 ++ return true;
2009 ++}
2010 ++
2011 ++/* The 6390 copper ports have an errata which require poking magic
2012 ++ * values into undocumented hidden registers and then performing a
2013 ++ * software reset.
2014 ++ */
2015 ++static int mv88e6390_setup_errata(struct mv88e6xxx_chip *chip)
2016 ++{
2017 ++ int port;
2018 ++ int err;
2019 ++
2020 ++ if (mv88e6390_setup_errata_applied(chip))
2021 ++ return 0;
2022 ++
2023 ++ /* Set the ports into blocking mode */
2024 ++ for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
2025 ++ err = mv88e6xxx_port_set_state(chip, port, BR_STATE_DISABLED);
2026 ++ if (err)
2027 ++ return err;
2028 ++ }
2029 ++
2030 ++ for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
2031 ++ err = mv88e6390_hidden_write(chip, port, 0, 0x01c0);
2032 ++ if (err)
2033 ++ return err;
2034 ++ }
2035 ++
2036 ++ return mv88e6xxx_software_reset(chip);
2037 ++}
2038 ++
2039 + static int mv88e6xxx_setup(struct dsa_switch *ds)
2040 + {
2041 + struct mv88e6xxx_chip *chip = ds->priv;
2042 +@@ -2403,6 +2504,12 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
2043 +
2044 + mutex_lock(&chip->reg_lock);
2045 +
2046 ++ if (chip->info->ops->setup_errata) {
2047 ++ err = chip->info->ops->setup_errata(chip);
2048 ++ if (err)
2049 ++ goto unlock;
2050 ++ }
2051 ++
2052 + /* Cache the cmode of each port. */
2053 + for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
2054 + if (chip->info->ops->port_get_cmode) {
2055 +@@ -3201,6 +3308,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
2056 +
2057 + static const struct mv88e6xxx_ops mv88e6190_ops = {
2058 + /* MV88E6XXX_FAMILY_6390 */
2059 ++ .setup_errata = mv88e6390_setup_errata,
2060 + .irl_init_all = mv88e6390_g2_irl_init_all,
2061 + .get_eeprom = mv88e6xxx_g2_get_eeprom8,
2062 + .set_eeprom = mv88e6xxx_g2_set_eeprom8,
2063 +@@ -3243,6 +3351,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
2064 +
2065 + static const struct mv88e6xxx_ops mv88e6190x_ops = {
2066 + /* MV88E6XXX_FAMILY_6390 */
2067 ++ .setup_errata = mv88e6390_setup_errata,
2068 + .irl_init_all = mv88e6390_g2_irl_init_all,
2069 + .get_eeprom = mv88e6xxx_g2_get_eeprom8,
2070 + .set_eeprom = mv88e6xxx_g2_set_eeprom8,
2071 +@@ -3285,6 +3394,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
2072 +
2073 + static const struct mv88e6xxx_ops mv88e6191_ops = {
2074 + /* MV88E6XXX_FAMILY_6390 */
2075 ++ .setup_errata = mv88e6390_setup_errata,
2076 + .irl_init_all = mv88e6390_g2_irl_init_all,
2077 + .get_eeprom = mv88e6xxx_g2_get_eeprom8,
2078 + .set_eeprom = mv88e6xxx_g2_set_eeprom8,
2079 +@@ -3374,6 +3484,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
2080 +
2081 + static const struct mv88e6xxx_ops mv88e6290_ops = {
2082 + /* MV88E6XXX_FAMILY_6390 */
2083 ++ .setup_errata = mv88e6390_setup_errata,
2084 + .irl_init_all = mv88e6390_g2_irl_init_all,
2085 + .get_eeprom = mv88e6xxx_g2_get_eeprom8,
2086 + .set_eeprom = mv88e6xxx_g2_set_eeprom8,
2087 +@@ -3675,6 +3786,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
2088 +
2089 + static const struct mv88e6xxx_ops mv88e6390_ops = {
2090 + /* MV88E6XXX_FAMILY_6390 */
2091 ++ .setup_errata = mv88e6390_setup_errata,
2092 + .irl_init_all = mv88e6390_g2_irl_init_all,
2093 + .get_eeprom = mv88e6xxx_g2_get_eeprom8,
2094 + .set_eeprom = mv88e6xxx_g2_set_eeprom8,
2095 +@@ -3722,6 +3834,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
2096 +
2097 + static const struct mv88e6xxx_ops mv88e6390x_ops = {
2098 + /* MV88E6XXX_FAMILY_6390 */
2099 ++ .setup_errata = mv88e6390_setup_errata,
2100 + .irl_init_all = mv88e6390_g2_irl_init_all,
2101 + .get_eeprom = mv88e6xxx_g2_get_eeprom8,
2102 + .set_eeprom = mv88e6xxx_g2_set_eeprom8,
2103 +diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
2104 +index f9ecb7872d32..546651d8c3e1 100644
2105 +--- a/drivers/net/dsa/mv88e6xxx/chip.h
2106 ++++ b/drivers/net/dsa/mv88e6xxx/chip.h
2107 +@@ -300,6 +300,11 @@ struct mv88e6xxx_mdio_bus {
2108 + };
2109 +
2110 + struct mv88e6xxx_ops {
2111 ++ /* Switch Setup Errata, called early in the switch setup to
2112 ++ * allow any errata actions to be performed
2113 ++ */
2114 ++ int (*setup_errata)(struct mv88e6xxx_chip *chip);
2115 ++
2116 + int (*ieee_pri_map)(struct mv88e6xxx_chip *chip);
2117 + int (*ip_pri_map)(struct mv88e6xxx_chip *chip);
2118 +
2119 +diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
2120 +index f32f56af8e35..b31910023bb6 100644
2121 +--- a/drivers/net/dsa/mv88e6xxx/port.h
2122 ++++ b/drivers/net/dsa/mv88e6xxx/port.h
2123 +@@ -251,6 +251,16 @@
2124 + /* Offset 0x19: Port IEEE Priority Remapping Registers (4-7) */
2125 + #define MV88E6095_PORT_IEEE_PRIO_REMAP_4567 0x19
2126 +
2127 ++/* Offset 0x1a: Magic undocumented errata register */
2128 ++#define PORT_RESERVED_1A 0x1a
2129 ++#define PORT_RESERVED_1A_BUSY BIT(15)
2130 ++#define PORT_RESERVED_1A_WRITE BIT(14)
2131 ++#define PORT_RESERVED_1A_READ 0
2132 ++#define PORT_RESERVED_1A_PORT_SHIFT 5
2133 ++#define PORT_RESERVED_1A_BLOCK (0xf << 10)
2134 ++#define PORT_RESERVED_1A_CTRL_PORT 4
2135 ++#define PORT_RESERVED_1A_DATA_PORT 5
2136 ++
2137 + int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg,
2138 + u16 *val);
2139 + int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg,
2140 +diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
2141 +index 37c76945ad9b..e1f821edbc21 100644
2142 +--- a/drivers/net/ethernet/intel/e1000e/ptp.c
2143 ++++ b/drivers/net/ethernet/intel/e1000e/ptp.c
2144 +@@ -173,10 +173,14 @@ static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
2145 + struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
2146 + ptp_clock_info);
2147 + unsigned long flags;
2148 +- u64 ns;
2149 ++ u64 cycles, ns;
2150 +
2151 + spin_lock_irqsave(&adapter->systim_lock, flags);
2152 +- ns = timecounter_read(&adapter->tc);
2153 ++
2154 ++ /* Use timecounter_cyc2time() to allow non-monotonic SYSTIM readings */
2155 ++ cycles = adapter->cc.read(&adapter->cc);
2156 ++ ns = timecounter_cyc2time(&adapter->tc, cycles);
2157 ++
2158 + spin_unlock_irqrestore(&adapter->systim_lock, flags);
2159 +
2160 + *ts = ns_to_timespec64(ns);
2161 +@@ -232,9 +236,12 @@ static void e1000e_systim_overflow_work(struct work_struct *work)
2162 + systim_overflow_work.work);
2163 + struct e1000_hw *hw = &adapter->hw;
2164 + struct timespec64 ts;
2165 ++ u64 ns;
2166 +
2167 +- adapter->ptp_clock_info.gettime64(&adapter->ptp_clock_info, &ts);
2168 ++ /* Update the timecounter */
2169 ++ ns = timecounter_read(&adapter->tc);
2170 +
2171 ++ ts = ns_to_timespec64(ns);
2172 + e_dbg("SYSTIM overflow check at %lld.%09lu\n",
2173 + (long long) ts.tv_sec, ts.tv_nsec);
2174 +
2175 +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
2176 +index add124e0381d..b27f7a968820 100644
2177 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
2178 ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
2179 +@@ -4,6 +4,7 @@
2180 + #include "ixgbe.h"
2181 + #include <net/xfrm.h>
2182 + #include <crypto/aead.h>
2183 ++#include <linux/if_bridge.h>
2184 +
2185 + /**
2186 + * ixgbe_ipsec_set_tx_sa - set the Tx SA registers
2187 +@@ -676,7 +677,8 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
2188 + } else {
2189 + struct tx_sa tsa;
2190 +
2191 +- if (adapter->num_vfs)
2192 ++ if (adapter->num_vfs &&
2193 ++ adapter->bridge_mode != BRIDGE_MODE_VEPA)
2194 + return -EOPNOTSUPP;
2195 +
2196 + /* find the first unused index */
2197 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2198 +index 3d1159f8a53f..de821a9fdfaf 100644
2199 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2200 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2201 +@@ -4635,12 +4635,15 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
2202 + lower_dev,
2203 + upper_dev);
2204 + } else if (netif_is_lag_master(upper_dev)) {
2205 +- if (info->linking)
2206 ++ if (info->linking) {
2207 + err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
2208 + upper_dev);
2209 +- else
2210 ++ } else {
2211 ++ mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port,
2212 ++ false);
2213 + mlxsw_sp_port_lag_leave(mlxsw_sp_port,
2214 + upper_dev);
2215 ++ }
2216 + } else if (netif_is_ovs_master(upper_dev)) {
2217 + if (info->linking)
2218 + err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
2219 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
2220 +index 4eb64cb0d9a1..0d9ea37c5d21 100644
2221 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
2222 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
2223 +@@ -1761,7 +1761,7 @@ static void
2224 + mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
2225 + struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
2226 + {
2227 +- u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid;
2228 ++ u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid;
2229 + struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2230 +
2231 + mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2232 +diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
2233 +index 07f3080eca18..5f45ffeeecb4 100644
2234 +--- a/drivers/net/ethernet/realtek/r8169.c
2235 ++++ b/drivers/net/ethernet/realtek/r8169.c
2236 +@@ -214,6 +214,8 @@ enum cfg_version {
2237 + };
2238 +
2239 + static const struct pci_device_id rtl8169_pci_tbl[] = {
2240 ++ { PCI_VDEVICE(REALTEK, 0x2502), RTL_CFG_1 },
2241 ++ { PCI_VDEVICE(REALTEK, 0x2600), RTL_CFG_1 },
2242 + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
2243 + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
2244 + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 },
2245 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
2246 +index 774e1ff01c9a..735ad838e2ba 100644
2247 +--- a/drivers/net/usb/qmi_wwan.c
2248 ++++ b/drivers/net/usb/qmi_wwan.c
2249 +@@ -123,6 +123,7 @@ static void qmimux_setup(struct net_device *dev)
2250 + dev->addr_len = 0;
2251 + dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
2252 + dev->netdev_ops = &qmimux_netdev_ops;
2253 ++ dev->mtu = 1500;
2254 + dev->needs_free_netdev = true;
2255 + }
2256 +
2257 +diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
2258 +index a63c97e2c50c..6f10331e986b 100644
2259 +--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
2260 ++++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
2261 +@@ -71,7 +71,7 @@ void ath10k_sta_update_rx_tid_stats_ampdu(struct ath10k *ar, u16 peer_id, u8 tid
2262 + spin_lock_bh(&ar->data_lock);
2263 +
2264 + peer = ath10k_peer_find_by_id(ar, peer_id);
2265 +- if (!peer)
2266 ++ if (!peer || !peer->sta)
2267 + goto out;
2268 +
2269 + arsta = (struct ath10k_sta *)peer->sta->drv_priv;
2270 +diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
2271 +index 4d1cd90d6d27..03d4cc6f35bc 100644
2272 +--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
2273 ++++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
2274 +@@ -2589,7 +2589,7 @@ static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
2275 + rcu_read_lock();
2276 + spin_lock_bh(&ar->data_lock);
2277 + peer = ath10k_peer_find_by_id(ar, peer_id);
2278 +- if (!peer) {
2279 ++ if (!peer || !peer->sta) {
2280 + ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",
2281 + peer_id);
2282 + goto out;
2283 +@@ -2642,7 +2642,7 @@ static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data)
2284 + rcu_read_lock();
2285 + spin_lock_bh(&ar->data_lock);
2286 + peer = ath10k_peer_find_by_id(ar, peer_id);
2287 +- if (!peer) {
2288 ++ if (!peer || !peer->sta) {
2289 + ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",
2290 + peer_id);
2291 + goto out;
2292 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
2293 +index afed549f5645..9a764af30f36 100644
2294 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
2295 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
2296 +@@ -2938,7 +2938,8 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
2297 + iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2298 + }
2299 +
2300 +- iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band);
2301 ++ iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
2302 ++ false);
2303 + ret = iwl_mvm_update_sta(mvm, vif, sta);
2304 + } else if (old_state == IEEE80211_STA_ASSOC &&
2305 + new_state == IEEE80211_STA_AUTHORIZED) {
2306 +@@ -2954,7 +2955,8 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
2307 + /* enable beacon filtering */
2308 + WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
2309 +
2310 +- iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band);
2311 ++ iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
2312 ++ true);
2313 +
2314 + ret = 0;
2315 + } else if (old_state == IEEE80211_STA_AUTHORIZED &&
2316 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
2317 +index b3987a0a7018..6b65ad6c9b56 100644
2318 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
2319 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
2320 +@@ -1685,7 +1685,7 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2321 + #endif /* CONFIG_IWLWIFI_DEBUGFS */
2322 +
2323 + /* rate scaling */
2324 +-int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init);
2325 ++int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync);
2326 + void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg);
2327 + int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate);
2328 + void rs_update_last_rssi(struct iwl_mvm *mvm,
2329 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
2330 +index f2830b5693d2..6b9c670fcef8 100644
2331 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
2332 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
2333 +@@ -1280,7 +1280,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2334 + (unsigned long)(lq_sta->last_tx +
2335 + (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) {
2336 + IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n");
2337 +- iwl_mvm_rs_rate_init(mvm, sta, info->band);
2338 ++ iwl_mvm_rs_rate_init(mvm, sta, info->band, true);
2339 + return;
2340 + }
2341 + lq_sta->last_tx = jiffies;
2342 +@@ -2870,9 +2870,8 @@ void rs_update_last_rssi(struct iwl_mvm *mvm,
2343 + static void rs_initialize_lq(struct iwl_mvm *mvm,
2344 + struct ieee80211_sta *sta,
2345 + struct iwl_lq_sta *lq_sta,
2346 +- enum nl80211_band band)
2347 ++ enum nl80211_band band, bool update)
2348 + {
2349 +- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2350 + struct iwl_scale_tbl_info *tbl;
2351 + struct rs_rate *rate;
2352 + u8 active_tbl = 0;
2353 +@@ -2901,8 +2900,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
2354 + rs_set_expected_tpt_table(lq_sta, tbl);
2355 + rs_fill_lq_cmd(mvm, sta, lq_sta, rate);
2356 + /* TODO restore station should remember the lq cmd */
2357 +- iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq,
2358 +- mvmsta->sta_state < IEEE80211_STA_AUTHORIZED);
2359 ++ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, !update);
2360 + }
2361 +
2362 + static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
2363 +@@ -3155,7 +3153,7 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
2364 + * Called after adding a new station to initialize rate scaling
2365 + */
2366 + static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2367 +- enum nl80211_band band)
2368 ++ enum nl80211_band band, bool update)
2369 + {
2370 + int i, j;
2371 + struct ieee80211_hw *hw = mvm->hw;
2372 +@@ -3235,7 +3233,7 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2373 + #ifdef CONFIG_IWLWIFI_DEBUGFS
2374 + iwl_mvm_reset_frame_stats(mvm);
2375 + #endif
2376 +- rs_initialize_lq(mvm, sta, lq_sta, band);
2377 ++ rs_initialize_lq(mvm, sta, lq_sta, band, update);
2378 + }
2379 +
2380 + static void rs_drv_rate_update(void *mvm_r,
2381 +@@ -3255,7 +3253,7 @@ static void rs_drv_rate_update(void *mvm_r,
2382 + for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
2383 + ieee80211_stop_tx_ba_session(sta, tid);
2384 +
2385 +- iwl_mvm_rs_rate_init(mvm, sta, sband->band);
2386 ++ iwl_mvm_rs_rate_init(mvm, sta, sband->band, true);
2387 + }
2388 +
2389 + #ifdef CONFIG_MAC80211_DEBUGFS
2390 +@@ -4112,12 +4110,12 @@ static const struct rate_control_ops rs_mvm_ops_drv = {
2391 + };
2392 +
2393 + void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2394 +- enum nl80211_band band)
2395 ++ enum nl80211_band band, bool update)
2396 + {
2397 + if (iwl_mvm_has_tlc_offload(mvm))
2398 + rs_fw_rate_init(mvm, sta, band);
2399 + else
2400 +- rs_drv_rate_init(mvm, sta, band);
2401 ++ rs_drv_rate_init(mvm, sta, band, update);
2402 + }
2403 +
2404 + int iwl_mvm_rate_control_register(void)
2405 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
2406 +index d2cf484e2b73..8e7f993e2911 100644
2407 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
2408 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
2409 +@@ -420,7 +420,7 @@ struct iwl_lq_sta {
2410 +
2411 + /* Initialize station's rate scaling information after adding station */
2412 + void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2413 +- enum nl80211_band band);
2414 ++ enum nl80211_band band, bool init);
2415 +
2416 + /* Notify RS about Tx status */
2417 + void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2418 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
2419 +index b002a7afb5f5..6a5349401aa9 100644
2420 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
2421 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
2422 +@@ -900,20 +900,19 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
2423 +
2424 + /**
2425 + * iwl_mvm_send_lq_cmd() - Send link quality command
2426 +- * @init: This command is sent as part of station initialization right
2427 +- * after station has been added.
2428 ++ * @sync: This command can be sent synchronously.
2429 + *
2430 + * The link quality command is sent as the last step of station creation.
2431 + * This is the special case in which init is set and we call a callback in
2432 + * this case to clear the state indicating that station creation is in
2433 + * progress.
2434 + */
2435 +-int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
2436 ++int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync)
2437 + {
2438 + struct iwl_host_cmd cmd = {
2439 + .id = LQ_CMD,
2440 + .len = { sizeof(struct iwl_lq_cmd), },
2441 +- .flags = init ? 0 : CMD_ASYNC,
2442 ++ .flags = sync ? 0 : CMD_ASYNC,
2443 + .data = { lq, },
2444 + };
2445 +
2446 +diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
2447 +index eda57ef12fd0..baa9cee6fa2c 100644
2448 +--- a/drivers/of/overlay.c
2449 ++++ b/drivers/of/overlay.c
2450 +@@ -378,7 +378,9 @@ static int add_changeset_node(struct overlay_changeset *ovcs,
2451 + if (ret)
2452 + return ret;
2453 +
2454 +- return build_changeset_next_level(ovcs, tchild, node);
2455 ++ ret = build_changeset_next_level(ovcs, tchild, node);
2456 ++ of_node_put(tchild);
2457 ++ return ret;
2458 + }
2459 +
2460 + if (node->phandle && tchild->phandle)
2461 +diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
2462 +index 2d6e272315a8..db3556dc90d1 100644
2463 +--- a/drivers/platform/x86/asus-wmi.c
2464 ++++ b/drivers/platform/x86/asus-wmi.c
2465 +@@ -2231,7 +2231,8 @@ static int asus_wmi_add(struct platform_device *pdev)
2466 + err = asus_wmi_backlight_init(asus);
2467 + if (err && err != -ENODEV)
2468 + goto fail_backlight;
2469 +- }
2470 ++ } else
2471 ++ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL);
2472 +
2473 + status = wmi_install_notify_handler(asus->driver->event_guid,
2474 + asus_wmi_notify, asus);
2475 +diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
2476 +index 59ecbb3b53b5..a33628550425 100644
2477 +--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
2478 ++++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
2479 +@@ -1266,7 +1266,7 @@ void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
2480 +
2481 + for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
2482 + ld = MR_TargetIdToLdGet(ldCount, drv_map);
2483 +- if (ld >= MAX_LOGICAL_DRIVES_EXT) {
2484 ++ if (ld >= MAX_LOGICAL_DRIVES_EXT - 1) {
2485 + lbInfo[ldCount].loadBalanceFlag = 0;
2486 + continue;
2487 + }
2488 +diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
2489 +index c7f95bace353..f45c54f02bfa 100644
2490 +--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
2491 ++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
2492 +@@ -2832,7 +2832,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
2493 + device_id < instance->fw_supported_vd_count)) {
2494 +
2495 + ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
2496 +- if (ld >= instance->fw_supported_vd_count)
2497 ++ if (ld >= instance->fw_supported_vd_count - 1)
2498 + fp_possible = 0;
2499 + else {
2500 + raid = MR_LdRaidGet(ld, local_map_ptr);
2501 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
2502 +index 59d7844ee022..b59bba3e6516 100644
2503 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
2504 ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
2505 +@@ -3344,8 +3344,9 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
2506 + static inline void
2507 + _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
2508 + {
2509 ++ wmb();
2510 + __raw_writeq(b, addr);
2511 +- mmiowb();
2512 ++ barrier();
2513 + }
2514 + #else
2515 + static inline void
2516 +diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
2517 +index e5bd035ebad0..4de740da547b 100644
2518 +--- a/drivers/scsi/qedi/qedi_main.c
2519 ++++ b/drivers/scsi/qedi/qedi_main.c
2520 +@@ -952,6 +952,9 @@ static int qedi_find_boot_info(struct qedi_ctx *qedi,
2521 + cls_sess = iscsi_conn_to_session(cls_conn);
2522 + sess = cls_sess->dd_data;
2523 +
2524 ++ if (!iscsi_is_session_online(cls_sess))
2525 ++ continue;
2526 ++
2527 + if (pri_ctrl_flags) {
2528 + if (!strcmp(pri_tgt->iscsi_name, sess->targetname) &&
2529 + !strcmp(pri_tgt->ip_addr, ep_ip_addr)) {
2530 +diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
2531 +index 2112ea6723c6..8c1a232ac6bf 100644
2532 +--- a/drivers/scsi/smartpqi/smartpqi_init.c
2533 ++++ b/drivers/scsi/smartpqi/smartpqi_init.c
2534 +@@ -2720,6 +2720,9 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
2535 + switch (response->header.iu_type) {
2536 + case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2537 + case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2538 ++ if (io_request->scmd)
2539 ++ io_request->scmd->result = 0;
2540 ++ /* fall through */
2541 + case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2542 + break;
2543 + case PQI_RESPONSE_IU_TASK_MANAGEMENT:
2544 +@@ -6686,6 +6689,7 @@ static void pqi_shutdown(struct pci_dev *pci_dev)
2545 + * storage.
2546 + */
2547 + rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
2548 ++ pqi_free_interrupts(ctrl_info);
2549 + pqi_reset(ctrl_info);
2550 + if (rc == 0)
2551 + return;
2552 +diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
2553 +index 14da8cc2246a..0346630b67c8 100644
2554 +--- a/drivers/staging/erofs/unzip_vle.c
2555 ++++ b/drivers/staging/erofs/unzip_vle.c
2556 +@@ -724,13 +724,18 @@ static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
2557 + struct z_erofs_vle_unzip_io *io = tagptr_unfold_ptr(t);
2558 + bool background = tagptr_unfold_tags(t);
2559 +
2560 +- if (atomic_add_return(bios, &io->pending_bios))
2561 ++ if (!background) {
2562 ++ unsigned long flags;
2563 ++
2564 ++ spin_lock_irqsave(&io->u.wait.lock, flags);
2565 ++ if (!atomic_add_return(bios, &io->pending_bios))
2566 ++ wake_up_locked(&io->u.wait);
2567 ++ spin_unlock_irqrestore(&io->u.wait.lock, flags);
2568 + return;
2569 ++ }
2570 +
2571 +- if (background)
2572 ++ if (!atomic_add_return(bios, &io->pending_bios))
2573 + queue_work(z_erofs_workqueue, &io->u.work);
2574 +- else
2575 +- wake_up(&io->u.wait);
2576 + }
2577 +
2578 + static inline void z_erofs_vle_read_endio(struct bio *bio)
2579 +diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
2580 +index cb0461a10808..93424db5f002 100644
2581 +--- a/drivers/target/target_core_spc.c
2582 ++++ b/drivers/target/target_core_spc.c
2583 +@@ -108,12 +108,17 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
2584 +
2585 + buf[7] = 0x2; /* CmdQue=1 */
2586 +
2587 +- memcpy(&buf[8], "LIO-ORG ", 8);
2588 +- memset(&buf[16], 0x20, 16);
2589 ++ /*
2590 ++ * ASCII data fields described as being left-aligned shall have any
2591 ++ * unused bytes at the end of the field (i.e., highest offset) and the
2592 ++ * unused bytes shall be filled with ASCII space characters (20h).
2593 ++ */
2594 ++ memset(&buf[8], 0x20, 8 + 16 + 4);
2595 ++ memcpy(&buf[8], "LIO-ORG", sizeof("LIO-ORG") - 1);
2596 + memcpy(&buf[16], dev->t10_wwn.model,
2597 +- min_t(size_t, strlen(dev->t10_wwn.model), 16));
2598 ++ strnlen(dev->t10_wwn.model, 16));
2599 + memcpy(&buf[32], dev->t10_wwn.revision,
2600 +- min_t(size_t, strlen(dev->t10_wwn.revision), 4));
2601 ++ strnlen(dev->t10_wwn.revision, 4));
2602 + buf[4] = 31; /* Set additional length to 31 */
2603 +
2604 + return 0;
2605 +@@ -251,7 +256,9 @@ check_t10_vend_desc:
2606 + buf[off] = 0x2; /* ASCII */
2607 + buf[off+1] = 0x1; /* T10 Vendor ID */
2608 + buf[off+2] = 0x0;
2609 +- memcpy(&buf[off+4], "LIO-ORG", 8);
2610 ++ /* left align Vendor ID and pad with spaces */
2611 ++ memset(&buf[off+4], 0x20, 8);
2612 ++ memcpy(&buf[off+4], "LIO-ORG", sizeof("LIO-ORG") - 1);
2613 + /* Extra Byte for NULL Terminator */
2614 + id_len++;
2615 + /* Identifier Length */
2616 +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
2617 +index fc3093d21b96..3f7aad45d215 100644
2618 +--- a/drivers/target/target_core_transport.c
2619 ++++ b/drivers/target/target_core_transport.c
2620 +@@ -224,19 +224,28 @@ void transport_subsystem_check_init(void)
2621 + sub_api_initialized = 1;
2622 + }
2623 +
2624 ++static void target_release_sess_cmd_refcnt(struct percpu_ref *ref)
2625 ++{
2626 ++ struct se_session *sess = container_of(ref, typeof(*sess), cmd_count);
2627 ++
2628 ++ wake_up(&sess->cmd_list_wq);
2629 ++}
2630 ++
2631 + /**
2632 + * transport_init_session - initialize a session object
2633 + * @se_sess: Session object pointer.
2634 + *
2635 + * The caller must have zero-initialized @se_sess before calling this function.
2636 + */
2637 +-void transport_init_session(struct se_session *se_sess)
2638 ++int transport_init_session(struct se_session *se_sess)
2639 + {
2640 + INIT_LIST_HEAD(&se_sess->sess_list);
2641 + INIT_LIST_HEAD(&se_sess->sess_acl_list);
2642 + INIT_LIST_HEAD(&se_sess->sess_cmd_list);
2643 + spin_lock_init(&se_sess->sess_cmd_lock);
2644 + init_waitqueue_head(&se_sess->cmd_list_wq);
2645 ++ return percpu_ref_init(&se_sess->cmd_count,
2646 ++ target_release_sess_cmd_refcnt, 0, GFP_KERNEL);
2647 + }
2648 + EXPORT_SYMBOL(transport_init_session);
2649 +
2650 +@@ -247,6 +256,7 @@ EXPORT_SYMBOL(transport_init_session);
2651 + struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
2652 + {
2653 + struct se_session *se_sess;
2654 ++ int ret;
2655 +
2656 + se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
2657 + if (!se_sess) {
2658 +@@ -254,7 +264,11 @@ struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
2659 + " se_sess_cache\n");
2660 + return ERR_PTR(-ENOMEM);
2661 + }
2662 +- transport_init_session(se_sess);
2663 ++ ret = transport_init_session(se_sess);
2664 ++ if (ret < 0) {
2665 ++ kfree(se_sess);
2666 ++ return ERR_PTR(ret);
2667 ++ }
2668 + se_sess->sup_prot_ops = sup_prot_ops;
2669 +
2670 + return se_sess;
2671 +@@ -581,6 +595,7 @@ void transport_free_session(struct se_session *se_sess)
2672 + sbitmap_queue_free(&se_sess->sess_tag_pool);
2673 + kvfree(se_sess->sess_cmd_map);
2674 + }
2675 ++ percpu_ref_exit(&se_sess->cmd_count);
2676 + kmem_cache_free(se_sess_cache, se_sess);
2677 + }
2678 + EXPORT_SYMBOL(transport_free_session);
2679 +@@ -2724,6 +2739,7 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
2680 + }
2681 + se_cmd->transport_state |= CMD_T_PRE_EXECUTE;
2682 + list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
2683 ++ percpu_ref_get(&se_sess->cmd_count);
2684 + out:
2685 + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2686 +
2687 +@@ -2754,8 +2770,6 @@ static void target_release_cmd_kref(struct kref *kref)
2688 + if (se_sess) {
2689 + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2690 + list_del_init(&se_cmd->se_cmd_list);
2691 +- if (se_sess->sess_tearing_down && list_empty(&se_sess->sess_cmd_list))
2692 +- wake_up(&se_sess->cmd_list_wq);
2693 + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2694 + }
2695 +
2696 +@@ -2763,6 +2777,8 @@ static void target_release_cmd_kref(struct kref *kref)
2697 + se_cmd->se_tfo->release_cmd(se_cmd);
2698 + if (compl)
2699 + complete(compl);
2700 ++
2701 ++ percpu_ref_put(&se_sess->cmd_count);
2702 + }
2703 +
2704 + /**
2705 +@@ -2891,6 +2907,8 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
2706 + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2707 + se_sess->sess_tearing_down = 1;
2708 + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2709 ++
2710 ++ percpu_ref_kill(&se_sess->cmd_count);
2711 + }
2712 + EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
2713 +
2714 +@@ -2905,17 +2923,14 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
2715 +
2716 + WARN_ON_ONCE(!se_sess->sess_tearing_down);
2717 +
2718 +- spin_lock_irq(&se_sess->sess_cmd_lock);
2719 + do {
2720 +- ret = wait_event_lock_irq_timeout(
2721 +- se_sess->cmd_list_wq,
2722 +- list_empty(&se_sess->sess_cmd_list),
2723 +- se_sess->sess_cmd_lock, 180 * HZ);
2724 ++ ret = wait_event_timeout(se_sess->cmd_list_wq,
2725 ++ percpu_ref_is_zero(&se_sess->cmd_count),
2726 ++ 180 * HZ);
2727 + list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list)
2728 + target_show_cmd("session shutdown: still waiting for ",
2729 + cmd);
2730 + } while (ret <= 0);
2731 +- spin_unlock_irq(&se_sess->sess_cmd_lock);
2732 + }
2733 + EXPORT_SYMBOL(target_wait_for_sess_cmds);
2734 +
2735 +diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
2736 +index 2718a933c0c6..7cdb5d7f6538 100644
2737 +--- a/drivers/target/target_core_xcopy.c
2738 ++++ b/drivers/target/target_core_xcopy.c
2739 +@@ -480,6 +480,8 @@ static const struct target_core_fabric_ops xcopy_pt_tfo = {
2740 +
2741 + int target_xcopy_setup_pt(void)
2742 + {
2743 ++ int ret;
2744 ++
2745 + xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0);
2746 + if (!xcopy_wq) {
2747 + pr_err("Unable to allocate xcopy_wq\n");
2748 +@@ -497,7 +499,9 @@ int target_xcopy_setup_pt(void)
2749 + INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list);
2750 + INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list);
2751 + memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
2752 +- transport_init_session(&xcopy_pt_sess);
2753 ++ ret = transport_init_session(&xcopy_pt_sess);
2754 ++ if (ret < 0)
2755 ++ return ret;
2756 +
2757 + xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
2758 + xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
2759 +diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
2760 +index ebd33c0232e6..89ade213a1a9 100644
2761 +--- a/drivers/tty/serial/amba-pl011.c
2762 ++++ b/drivers/tty/serial/amba-pl011.c
2763 +@@ -2780,6 +2780,7 @@ static struct platform_driver arm_sbsa_uart_platform_driver = {
2764 + .name = "sbsa-uart",
2765 + .of_match_table = of_match_ptr(sbsa_uart_of_match),
2766 + .acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match),
2767 ++ .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
2768 + },
2769 + };
2770 +
2771 +@@ -2808,6 +2809,7 @@ static struct amba_driver pl011_driver = {
2772 + .drv = {
2773 + .name = "uart-pl011",
2774 + .pm = &pl011_dev_pm_ops,
2775 ++ .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
2776 + },
2777 + .id_table = pl011_ids,
2778 + .probe = pl011_probe,
2779 +diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c
2780 +index fd80d999308d..0bdf1687983f 100644
2781 +--- a/drivers/tty/serial/pic32_uart.c
2782 ++++ b/drivers/tty/serial/pic32_uart.c
2783 +@@ -919,6 +919,7 @@ static struct platform_driver pic32_uart_platform_driver = {
2784 + .driver = {
2785 + .name = PIC32_DEV_NAME,
2786 + .of_match_table = of_match_ptr(pic32_serial_dt_ids),
2787 ++ .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_PIC32),
2788 + },
2789 + };
2790 +
2791 +diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
2792 +index 80bb56facfb6..ad126f51d549 100644
2793 +--- a/drivers/tty/serial/serial_core.c
2794 ++++ b/drivers/tty/serial/serial_core.c
2795 +@@ -205,10 +205,15 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
2796 + if (!state->xmit.buf) {
2797 + state->xmit.buf = (unsigned char *) page;
2798 + uart_circ_clear(&state->xmit);
2799 ++ uart_port_unlock(uport, flags);
2800 + } else {
2801 ++ uart_port_unlock(uport, flags);
2802 ++ /*
2803 ++ * Do not free() the page under the port lock, see
2804 ++ * uart_shutdown().
2805 ++ */
2806 + free_page(page);
2807 + }
2808 +- uart_port_unlock(uport, flags);
2809 +
2810 + retval = uport->ops->startup(uport);
2811 + if (retval == 0) {
2812 +@@ -268,6 +273,7 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
2813 + struct uart_port *uport = uart_port_check(state);
2814 + struct tty_port *port = &state->port;
2815 + unsigned long flags = 0;
2816 ++ char *xmit_buf = NULL;
2817 +
2818 + /*
2819 + * Set the TTY IO error marker
2820 +@@ -298,14 +304,18 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
2821 + tty_port_set_suspended(port, 0);
2822 +
2823 + /*
2824 +- * Free the transmit buffer page.
2825 ++ * Do not free() the transmit buffer page under the port lock since
2826 ++ * this can create various circular locking scenarios. For instance,
2827 ++ * console driver may need to allocate/free a debug object, which
2828 ++ * can endup in printk() recursion.
2829 + */
2830 + uart_port_lock(state, flags);
2831 +- if (state->xmit.buf) {
2832 +- free_page((unsigned long)state->xmit.buf);
2833 +- state->xmit.buf = NULL;
2834 +- }
2835 ++ xmit_buf = state->xmit.buf;
2836 ++ state->xmit.buf = NULL;
2837 + uart_port_unlock(uport, flags);
2838 ++
2839 ++ if (xmit_buf)
2840 ++ free_page((unsigned long)xmit_buf);
2841 + }
2842 +
2843 + /**
2844 +diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
2845 +index 6ed74735b58c..87d8dd90d605 100644
2846 +--- a/drivers/tty/serial/xilinx_uartps.c
2847 ++++ b/drivers/tty/serial/xilinx_uartps.c
2848 +@@ -1608,6 +1608,7 @@ static struct platform_driver cdns_uart_platform_driver = {
2849 + .name = CDNS_UART_NAME,
2850 + .of_match_table = cdns_uart_of_match,
2851 + .pm = &cdns_uart_dev_pm_ops,
2852 ++ .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_XILINX_PS_UART),
2853 + },
2854 + };
2855 +
2856 +diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
2857 +index 67d8a501d994..fea02c7ad4f4 100644
2858 +--- a/drivers/usb/gadget/udc/renesas_usb3.c
2859 ++++ b/drivers/usb/gadget/udc/renesas_usb3.c
2860 +@@ -358,6 +358,7 @@ struct renesas_usb3 {
2861 + bool extcon_host; /* check id and set EXTCON_USB_HOST */
2862 + bool extcon_usb; /* check vbus and set EXTCON_USB */
2863 + bool forced_b_device;
2864 ++ bool start_to_connect;
2865 + };
2866 +
2867 + #define gadget_to_renesas_usb3(_gadget) \
2868 +@@ -476,7 +477,8 @@ static void usb3_init_axi_bridge(struct renesas_usb3 *usb3)
2869 + static void usb3_init_epc_registers(struct renesas_usb3 *usb3)
2870 + {
2871 + usb3_write(usb3, ~0, USB3_USB_INT_STA_1);
2872 +- usb3_enable_irq_1(usb3, USB_INT_1_VBUS_CNG);
2873 ++ if (!usb3->workaround_for_vbus)
2874 ++ usb3_enable_irq_1(usb3, USB_INT_1_VBUS_CNG);
2875 + }
2876 +
2877 + static bool usb3_wakeup_usb2_phy(struct renesas_usb3 *usb3)
2878 +@@ -700,8 +702,7 @@ static void usb3_mode_config(struct renesas_usb3 *usb3, bool host, bool a_dev)
2879 + usb3_set_mode_by_role_sw(usb3, host);
2880 + usb3_vbus_out(usb3, a_dev);
2881 + /* for A-Peripheral or forced B-device mode */
2882 +- if ((!host && a_dev) ||
2883 +- (usb3->workaround_for_vbus && usb3->forced_b_device))
2884 ++ if ((!host && a_dev) || usb3->start_to_connect)
2885 + usb3_connect(usb3);
2886 + spin_unlock_irqrestore(&usb3->lock, flags);
2887 + }
2888 +@@ -2432,7 +2433,11 @@ static ssize_t renesas_usb3_b_device_write(struct file *file,
2889 + if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
2890 + return -EFAULT;
2891 +
2892 +- if (!strncmp(buf, "1", 1))
2893 ++ usb3->start_to_connect = false;
2894 ++ if (usb3->workaround_for_vbus && usb3->forced_b_device &&
2895 ++ !strncmp(buf, "2", 1))
2896 ++ usb3->start_to_connect = true;
2897 ++ else if (!strncmp(buf, "1", 1))
2898 + usb3->forced_b_device = true;
2899 + else
2900 + usb3->forced_b_device = false;
2901 +@@ -2440,7 +2445,7 @@ static ssize_t renesas_usb3_b_device_write(struct file *file,
2902 + if (usb3->workaround_for_vbus)
2903 + usb3_disconnect(usb3);
2904 +
2905 +- /* Let this driver call usb3_connect() anyway */
2906 ++ /* Let this driver call usb3_connect() if needed */
2907 + usb3_check_id(usb3);
2908 +
2909 + return count;
2910 +diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c
2911 +index c74cc9c309b1..3457c1fdebd1 100644
2912 +--- a/drivers/usb/typec/tcpm.c
2913 ++++ b/drivers/usb/typec/tcpm.c
2914 +@@ -317,6 +317,9 @@ struct tcpm_port {
2915 + /* Deadline in jiffies to exit src_try_wait state */
2916 + unsigned long max_wait;
2917 +
2918 ++ /* port belongs to a self powered device */
2919 ++ bool self_powered;
2920 ++
2921 + #ifdef CONFIG_DEBUG_FS
2922 + struct dentry *dentry;
2923 + struct mutex logbuffer_lock; /* log buffer access lock */
2924 +@@ -3257,7 +3260,8 @@ static void run_state_machine(struct tcpm_port *port)
2925 + case SRC_HARD_RESET_VBUS_OFF:
2926 + tcpm_set_vconn(port, true);
2927 + tcpm_set_vbus(port, false);
2928 +- tcpm_set_roles(port, false, TYPEC_SOURCE, TYPEC_HOST);
2929 ++ tcpm_set_roles(port, port->self_powered, TYPEC_SOURCE,
2930 ++ TYPEC_HOST);
2931 + tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER);
2932 + break;
2933 + case SRC_HARD_RESET_VBUS_ON:
2934 +@@ -3270,7 +3274,8 @@ static void run_state_machine(struct tcpm_port *port)
2935 + memset(&port->pps_data, 0, sizeof(port->pps_data));
2936 + tcpm_set_vconn(port, false);
2937 + tcpm_set_charge(port, false);
2938 +- tcpm_set_roles(port, false, TYPEC_SINK, TYPEC_DEVICE);
2939 ++ tcpm_set_roles(port, port->self_powered, TYPEC_SINK,
2940 ++ TYPEC_DEVICE);
2941 + /*
2942 + * VBUS may or may not toggle, depending on the adapter.
2943 + * If it doesn't toggle, transition to SNK_HARD_RESET_SINK_ON
2944 +@@ -4415,6 +4420,8 @@ sink:
2945 + return -EINVAL;
2946 + port->operating_snk_mw = mw / 1000;
2947 +
2948 ++ port->self_powered = fwnode_property_read_bool(fwnode, "self-powered");
2949 ++
2950 + return 0;
2951 + }
2952 +
2953 +@@ -4723,6 +4730,7 @@ static int tcpm_copy_caps(struct tcpm_port *port,
2954 + port->typec_caps.prefer_role = tcfg->default_role;
2955 + port->typec_caps.type = tcfg->type;
2956 + port->typec_caps.data = tcfg->data;
2957 ++ port->self_powered = port->tcpc->config->self_powered;
2958 +
2959 + return 0;
2960 + }
2961 +diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
2962 +index b2b283e48439..8fed470bb7e1 100644
2963 +--- a/fs/btrfs/dev-replace.c
2964 ++++ b/fs/btrfs/dev-replace.c
2965 +@@ -800,39 +800,58 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
2966 + case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
2967 + result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED;
2968 + btrfs_dev_replace_write_unlock(dev_replace);
2969 +- goto leave;
2970 ++ break;
2971 + case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
2972 ++ result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
2973 ++ tgt_device = dev_replace->tgtdev;
2974 ++ src_device = dev_replace->srcdev;
2975 ++ btrfs_dev_replace_write_unlock(dev_replace);
2976 ++ btrfs_scrub_cancel(fs_info);
2977 ++ /* btrfs_dev_replace_finishing() will handle the cleanup part */
2978 ++ btrfs_info_in_rcu(fs_info,
2979 ++ "dev_replace from %s (devid %llu) to %s canceled",
2980 ++ btrfs_dev_name(src_device), src_device->devid,
2981 ++ btrfs_dev_name(tgt_device));
2982 ++ break;
2983 + case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
2984 ++ /*
2985 ++ * Scrub doing the replace isn't running so we need to do the
2986 ++ * cleanup step of btrfs_dev_replace_finishing() here
2987 ++ */
2988 + result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
2989 + tgt_device = dev_replace->tgtdev;
2990 + src_device = dev_replace->srcdev;
2991 + dev_replace->tgtdev = NULL;
2992 + dev_replace->srcdev = NULL;
2993 +- break;
2994 +- }
2995 +- dev_replace->replace_state = BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED;
2996 +- dev_replace->time_stopped = ktime_get_real_seconds();
2997 +- dev_replace->item_needs_writeback = 1;
2998 +- btrfs_dev_replace_write_unlock(dev_replace);
2999 +- btrfs_scrub_cancel(fs_info);
3000 ++ dev_replace->replace_state =
3001 ++ BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED;
3002 ++ dev_replace->time_stopped = ktime_get_real_seconds();
3003 ++ dev_replace->item_needs_writeback = 1;
3004 +
3005 +- trans = btrfs_start_transaction(root, 0);
3006 +- if (IS_ERR(trans)) {
3007 +- mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
3008 +- return PTR_ERR(trans);
3009 +- }
3010 +- ret = btrfs_commit_transaction(trans);
3011 +- WARN_ON(ret);
3012 ++ btrfs_dev_replace_write_unlock(dev_replace);
3013 +
3014 +- btrfs_info_in_rcu(fs_info,
3015 +- "dev_replace from %s (devid %llu) to %s canceled",
3016 +- btrfs_dev_name(src_device), src_device->devid,
3017 +- btrfs_dev_name(tgt_device));
3018 ++ btrfs_scrub_cancel(fs_info);
3019 ++
3020 ++ trans = btrfs_start_transaction(root, 0);
3021 ++ if (IS_ERR(trans)) {
3022 ++ mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
3023 ++ return PTR_ERR(trans);
3024 ++ }
3025 ++ ret = btrfs_commit_transaction(trans);
3026 ++ WARN_ON(ret);
3027 +
3028 +- if (tgt_device)
3029 +- btrfs_destroy_dev_replace_tgtdev(tgt_device);
3030 ++ btrfs_info_in_rcu(fs_info,
3031 ++ "suspended dev_replace from %s (devid %llu) to %s canceled",
3032 ++ btrfs_dev_name(src_device), src_device->devid,
3033 ++ btrfs_dev_name(tgt_device));
3034 ++
3035 ++ if (tgt_device)
3036 ++ btrfs_destroy_dev_replace_tgtdev(tgt_device);
3037 ++ break;
3038 ++ default:
3039 ++ result = -EINVAL;
3040 ++ }
3041 +
3042 +-leave:
3043 + mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
3044 + return result;
3045 + }
3046 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
3047 +index 4f6dc56b4f4d..83b3a626c796 100644
3048 +--- a/fs/btrfs/inode.c
3049 ++++ b/fs/btrfs/inode.c
3050 +@@ -6440,14 +6440,19 @@ fail_dir_item:
3051 + err = btrfs_del_root_ref(trans, key.objectid,
3052 + root->root_key.objectid, parent_ino,
3053 + &local_index, name, name_len);
3054 +-
3055 ++ if (err)
3056 ++ btrfs_abort_transaction(trans, err);
3057 + } else if (add_backref) {
3058 + u64 local_index;
3059 + int err;
3060 +
3061 + err = btrfs_del_inode_ref(trans, root, name, name_len,
3062 + ino, parent_ino, &local_index);
3063 ++ if (err)
3064 ++ btrfs_abort_transaction(trans, err);
3065 + }
3066 ++
3067 ++ /* Return the original error code */
3068 + return ret;
3069 + }
3070 +
3071 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
3072 +index 223334f08530..0ee1cd4b56fb 100644
3073 +--- a/fs/btrfs/volumes.c
3074 ++++ b/fs/btrfs/volumes.c
3075 +@@ -4768,19 +4768,17 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3076 + /*
3077 + * Use the number of data stripes to figure out how big this chunk
3078 + * is really going to be in terms of logical address space,
3079 +- * and compare that answer with the max chunk size
3080 ++ * and compare that answer with the max chunk size. If it's higher,
3081 ++ * we try to reduce stripe_size.
3082 + */
3083 + if (stripe_size * data_stripes > max_chunk_size) {
3084 +- stripe_size = div_u64(max_chunk_size, data_stripes);
3085 +-
3086 +- /* bump the answer up to a 16MB boundary */
3087 +- stripe_size = round_up(stripe_size, SZ_16M);
3088 +-
3089 + /*
3090 +- * But don't go higher than the limits we found while searching
3091 +- * for free extents
3092 ++ * Reduce stripe_size, round it up to a 16MB boundary again and
3093 ++ * then use it, unless it ends up being even bigger than the
3094 ++ * previous value we had already.
3095 + */
3096 +- stripe_size = min(devices_info[ndevs - 1].max_avail,
3097 ++ stripe_size = min(round_up(div_u64(max_chunk_size,
3098 ++ data_stripes), SZ_16M),
3099 + stripe_size);
3100 + }
3101 +
3102 +@@ -7474,6 +7472,8 @@ int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
3103 + struct btrfs_path *path;
3104 + struct btrfs_root *root = fs_info->dev_root;
3105 + struct btrfs_key key;
3106 ++ u64 prev_devid = 0;
3107 ++ u64 prev_dev_ext_end = 0;
3108 + int ret = 0;
3109 +
3110 + key.objectid = 1;
3111 +@@ -7518,10 +7518,22 @@ int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
3112 + chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
3113 + physical_len = btrfs_dev_extent_length(leaf, dext);
3114 +
3115 ++ /* Check if this dev extent overlaps with the previous one */
3116 ++ if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
3117 ++ btrfs_err(fs_info,
3118 ++"dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
3119 ++ devid, physical_offset, prev_dev_ext_end);
3120 ++ ret = -EUCLEAN;
3121 ++ goto out;
3122 ++ }
3123 ++
3124 + ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
3125 + physical_offset, physical_len);
3126 + if (ret < 0)
3127 + goto out;
3128 ++ prev_devid = devid;
3129 ++ prev_dev_ext_end = physical_offset + physical_len;
3130 ++
3131 + ret = btrfs_next_item(root, path);
3132 + if (ret < 0)
3133 + goto out;
3134 +diff --git a/fs/iomap.c b/fs/iomap.c
3135 +index ec15cf2ec696..e57fb1e534c5 100644
3136 +--- a/fs/iomap.c
3137 ++++ b/fs/iomap.c
3138 +@@ -488,16 +488,29 @@ done:
3139 + }
3140 + EXPORT_SYMBOL_GPL(iomap_readpages);
3141 +
3142 ++/*
3143 ++ * iomap_is_partially_uptodate checks whether blocks within a page are
3144 ++ * uptodate or not.
3145 ++ *
3146 ++ * Returns true if all blocks which correspond to a file portion
3147 ++ * we want to read within the page are uptodate.
3148 ++ */
3149 + int
3150 + iomap_is_partially_uptodate(struct page *page, unsigned long from,
3151 + unsigned long count)
3152 + {
3153 + struct iomap_page *iop = to_iomap_page(page);
3154 + struct inode *inode = page->mapping->host;
3155 +- unsigned first = from >> inode->i_blkbits;
3156 +- unsigned last = (from + count - 1) >> inode->i_blkbits;
3157 ++ unsigned len, first, last;
3158 + unsigned i;
3159 +
3160 ++ /* Limit range to one page */
3161 ++ len = min_t(unsigned, PAGE_SIZE - from, count);
3162 ++
3163 ++ /* First and last blocks in range within page */
3164 ++ first = from >> inode->i_blkbits;
3165 ++ last = (from + len - 1) >> inode->i_blkbits;
3166 ++
3167 + if (iop) {
3168 + for (i = first; i <= last; i++)
3169 + if (!test_bit(i, iop->uptodate))
3170 +diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
3171 +index 902a7dd10e5c..bb6ae387469f 100644
3172 +--- a/fs/jffs2/super.c
3173 ++++ b/fs/jffs2/super.c
3174 +@@ -101,7 +101,8 @@ static int jffs2_sync_fs(struct super_block *sb, int wait)
3175 + struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
3176 +
3177 + #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
3178 +- cancel_delayed_work_sync(&c->wbuf_dwork);
3179 ++ if (jffs2_is_writebuffered(c))
3180 ++ cancel_delayed_work_sync(&c->wbuf_dwork);
3181 + #endif
3182 +
3183 + mutex_lock(&c->alloc_sem);
3184 +diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
3185 +index 7642b6712c39..30208233f65b 100644
3186 +--- a/fs/ocfs2/localalloc.c
3187 ++++ b/fs/ocfs2/localalloc.c
3188 +@@ -345,13 +345,18 @@ int ocfs2_load_local_alloc(struct ocfs2_super *osb)
3189 + if (num_used
3190 + || alloc->id1.bitmap1.i_used
3191 + || alloc->id1.bitmap1.i_total
3192 +- || la->la_bm_off)
3193 +- mlog(ML_ERROR, "Local alloc hasn't been recovered!\n"
3194 ++ || la->la_bm_off) {
3195 ++ mlog(ML_ERROR, "inconsistent detected, clean journal with"
3196 ++ " unrecovered local alloc, please run fsck.ocfs2!\n"
3197 + "found = %u, set = %u, taken = %u, off = %u\n",
3198 + num_used, le32_to_cpu(alloc->id1.bitmap1.i_used),
3199 + le32_to_cpu(alloc->id1.bitmap1.i_total),
3200 + OCFS2_LOCAL_ALLOC(alloc)->la_bm_off);
3201 +
3202 ++ status = -EINVAL;
3203 ++ goto bail;
3204 ++ }
3205 ++
3206 + osb->local_alloc_bh = alloc_bh;
3207 + osb->local_alloc_state = OCFS2_LA_ENABLED;
3208 +
3209 +diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
3210 +index 0792595ebcfb..3c777ec80d47 100644
3211 +--- a/fs/pstore/ram_core.c
3212 ++++ b/fs/pstore/ram_core.c
3213 +@@ -496,6 +496,11 @@ static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
3214 + sig ^= PERSISTENT_RAM_SIG;
3215 +
3216 + if (prz->buffer->sig == sig) {
3217 ++ if (buffer_size(prz) == 0) {
3218 ++ pr_debug("found existing empty buffer\n");
3219 ++ return 0;
3220 ++ }
3221 ++
3222 + if (buffer_size(prz) > prz->buffer_size ||
3223 + buffer_start(prz) > buffer_size(prz))
3224 + pr_info("found existing invalid buffer, size %zu, start %zu\n",
3225 +diff --git a/fs/quota/quota.c b/fs/quota/quota.c
3226 +index f0cbf58ad4da..fd5dd806f1b9 100644
3227 +--- a/fs/quota/quota.c
3228 ++++ b/fs/quota/quota.c
3229 +@@ -791,7 +791,8 @@ static int quotactl_cmd_write(int cmd)
3230 + /* Return true if quotactl command is manipulating quota on/off state */
3231 + static bool quotactl_cmd_onoff(int cmd)
3232 + {
3233 +- return (cmd == Q_QUOTAON) || (cmd == Q_QUOTAOFF);
3234 ++ return (cmd == Q_QUOTAON) || (cmd == Q_QUOTAOFF) ||
3235 ++ (cmd == Q_XQUOTAON) || (cmd == Q_XQUOTAOFF);
3236 + }
3237 +
3238 + /*
3239 +diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
3240 +index 7a85e609fc27..d8b8323e80f4 100644
3241 +--- a/fs/userfaultfd.c
3242 ++++ b/fs/userfaultfd.c
3243 +@@ -736,10 +736,18 @@ void mremap_userfaultfd_prep(struct vm_area_struct *vma,
3244 + struct userfaultfd_ctx *ctx;
3245 +
3246 + ctx = vma->vm_userfaultfd_ctx.ctx;
3247 +- if (ctx && (ctx->features & UFFD_FEATURE_EVENT_REMAP)) {
3248 ++
3249 ++ if (!ctx)
3250 ++ return;
3251 ++
3252 ++ if (ctx->features & UFFD_FEATURE_EVENT_REMAP) {
3253 + vm_ctx->ctx = ctx;
3254 + userfaultfd_ctx_get(ctx);
3255 + WRITE_ONCE(ctx->mmap_changing, true);
3256 ++ } else {
3257 ++ /* Drop uffd context if remap feature not enabled */
3258 ++ vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
3259 ++ vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
3260 + }
3261 + }
3262 +
3263 +diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
3264 +index 9a6bc0951cfa..c31157135598 100644
3265 +--- a/include/linux/backing-dev-defs.h
3266 ++++ b/include/linux/backing-dev-defs.h
3267 +@@ -258,6 +258,14 @@ static inline void wb_get(struct bdi_writeback *wb)
3268 + */
3269 + static inline void wb_put(struct bdi_writeback *wb)
3270 + {
3271 ++ if (WARN_ON_ONCE(!wb->bdi)) {
3272 ++ /*
3273 ++ * A driver bug might cause a file to be removed before bdi was
3274 ++ * initialized.
3275 ++ */
3276 ++ return;
3277 ++ }
3278 ++
3279 + if (wb != &wb->bdi->wb)
3280 + percpu_ref_put(&wb->refcnt);
3281 + }
3282 +diff --git a/include/linux/filter.h b/include/linux/filter.h
3283 +index 6791a0ac0139..ec90d5255cf7 100644
3284 +--- a/include/linux/filter.h
3285 ++++ b/include/linux/filter.h
3286 +@@ -665,24 +665,10 @@ static inline u32 bpf_ctx_off_adjust_machine(u32 size)
3287 + return size;
3288 + }
3289 +
3290 +-static inline bool bpf_ctx_narrow_align_ok(u32 off, u32 size_access,
3291 +- u32 size_default)
3292 +-{
3293 +- size_default = bpf_ctx_off_adjust_machine(size_default);
3294 +- size_access = bpf_ctx_off_adjust_machine(size_access);
3295 +-
3296 +-#ifdef __LITTLE_ENDIAN
3297 +- return (off & (size_default - 1)) == 0;
3298 +-#else
3299 +- return (off & (size_default - 1)) + size_access == size_default;
3300 +-#endif
3301 +-}
3302 +-
3303 + static inline bool
3304 + bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
3305 + {
3306 +- return bpf_ctx_narrow_align_ok(off, size, size_default) &&
3307 +- size <= size_default && (size & (size - 1)) == 0;
3308 ++ return size <= size_default && (size & (size - 1)) == 0;
3309 + }
3310 +
3311 + #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
3312 +diff --git a/include/linux/swap.h b/include/linux/swap.h
3313 +index 8e2c11e692ba..77221c16733a 100644
3314 +--- a/include/linux/swap.h
3315 ++++ b/include/linux/swap.h
3316 +@@ -232,7 +232,6 @@ struct swap_info_struct {
3317 + unsigned long flags; /* SWP_USED etc: see above */
3318 + signed short prio; /* swap priority of this type */
3319 + struct plist_node list; /* entry in swap_active_head */
3320 +- struct plist_node avail_lists[MAX_NUMNODES];/* entry in swap_avail_heads */
3321 + signed char type; /* strange name for an index */
3322 + unsigned int max; /* extent of the swap_map */
3323 + unsigned char *swap_map; /* vmalloc'ed array of usage counts */
3324 +@@ -273,6 +272,16 @@ struct swap_info_struct {
3325 + */
3326 + struct work_struct discard_work; /* discard worker */
3327 + struct swap_cluster_list discard_clusters; /* discard clusters list */
3328 ++ struct plist_node avail_lists[0]; /*
3329 ++ * entries in swap_avail_heads, one
3330 ++ * entry per node.
3331 ++ * Must be last as the number of the
3332 ++ * array is nr_node_ids, which is not
3333 ++ * a fixed value so have to allocate
3334 ++ * dynamically.
3335 ++ * And it has to be an array so that
3336 ++ * plist_for_each_* can work.
3337 ++ */
3338 + };
3339 +
3340 + #ifdef CONFIG_64BIT
3341 +diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h
3342 +index 7e7fbfb84e8e..50c74a77db55 100644
3343 +--- a/include/linux/usb/tcpm.h
3344 ++++ b/include/linux/usb/tcpm.h
3345 +@@ -89,6 +89,7 @@ struct tcpc_config {
3346 + enum typec_port_data data;
3347 + enum typec_role default_role;
3348 + bool try_role_hw; /* try.{src,snk} implemented in hardware */
3349 ++ bool self_powered; /* port belongs to a self powered device */
3350 +
3351 + const struct typec_altmode_desc *alt_modes;
3352 + };
3353 +diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
3354 +index 7a4ee7852ca4..2cfd3b4573b0 100644
3355 +--- a/include/target/target_core_base.h
3356 ++++ b/include/target/target_core_base.h
3357 +@@ -602,6 +602,7 @@ struct se_session {
3358 + struct se_node_acl *se_node_acl;
3359 + struct se_portal_group *se_tpg;
3360 + void *fabric_sess_ptr;
3361 ++ struct percpu_ref cmd_count;
3362 + struct list_head sess_list;
3363 + struct list_head sess_acl_list;
3364 + struct list_head sess_cmd_list;
3365 +diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
3366 +index f4147b398431..eb9d0923c55c 100644
3367 +--- a/include/target/target_core_fabric.h
3368 ++++ b/include/target/target_core_fabric.h
3369 +@@ -116,7 +116,7 @@ struct se_session *target_setup_session(struct se_portal_group *,
3370 + struct se_session *, void *));
3371 + void target_remove_session(struct se_session *);
3372 +
3373 +-void transport_init_session(struct se_session *);
3374 ++int transport_init_session(struct se_session *se_sess);
3375 + struct se_session *transport_alloc_session(enum target_prot_op);
3376 + int transport_alloc_session_tags(struct se_session *, unsigned int,
3377 + unsigned int);
3378 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
3379 +index 2954e4b3abd5..341806668f03 100644
3380 +--- a/kernel/bpf/verifier.c
3381 ++++ b/kernel/bpf/verifier.c
3382 +@@ -3285,12 +3285,15 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
3383 + return err;
3384 +
3385 + if (BPF_SRC(insn->code) == BPF_X) {
3386 ++ struct bpf_reg_state *src_reg = regs + insn->src_reg;
3387 ++ struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
3388 ++
3389 + if (BPF_CLASS(insn->code) == BPF_ALU64) {
3390 + /* case: R1 = R2
3391 + * copy register state to dest reg
3392 + */
3393 +- regs[insn->dst_reg] = regs[insn->src_reg];
3394 +- regs[insn->dst_reg].live |= REG_LIVE_WRITTEN;
3395 ++ *dst_reg = *src_reg;
3396 ++ dst_reg->live |= REG_LIVE_WRITTEN;
3397 + } else {
3398 + /* R1 = (u32) R2 */
3399 + if (is_pointer_value(env, insn->src_reg)) {
3400 +@@ -3298,9 +3301,14 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
3401 + "R%d partial copy of pointer\n",
3402 + insn->src_reg);
3403 + return -EACCES;
3404 ++ } else if (src_reg->type == SCALAR_VALUE) {
3405 ++ *dst_reg = *src_reg;
3406 ++ dst_reg->live |= REG_LIVE_WRITTEN;
3407 ++ } else {
3408 ++ mark_reg_unknown(env, regs,
3409 ++ insn->dst_reg);
3410 + }
3411 +- mark_reg_unknown(env, regs, insn->dst_reg);
3412 +- coerce_reg_to_size(&regs[insn->dst_reg], 4);
3413 ++ coerce_reg_to_size(dst_reg, 4);
3414 + }
3415 + } else {
3416 + /* case: R = imm
3417 +@@ -5341,10 +5349,10 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
3418 + int i, cnt, size, ctx_field_size, delta = 0;
3419 + const int insn_cnt = env->prog->len;
3420 + struct bpf_insn insn_buf[16], *insn;
3421 ++ u32 target_size, size_default, off;
3422 + struct bpf_prog *new_prog;
3423 + enum bpf_access_type type;
3424 + bool is_narrower_load;
3425 +- u32 target_size;
3426 +
3427 + if (ops->gen_prologue) {
3428 + cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
3429 +@@ -5421,9 +5429,9 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
3430 + * we will apply proper mask to the result.
3431 + */
3432 + is_narrower_load = size < ctx_field_size;
3433 ++ size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
3434 ++ off = insn->off;
3435 + if (is_narrower_load) {
3436 +- u32 size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
3437 +- u32 off = insn->off;
3438 + u8 size_code;
3439 +
3440 + if (type == BPF_WRITE) {
3441 +@@ -5451,12 +5459,23 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
3442 + }
3443 +
3444 + if (is_narrower_load && size < target_size) {
3445 +- if (ctx_field_size <= 4)
3446 ++ u8 shift = (off & (size_default - 1)) * 8;
3447 ++
3448 ++ if (ctx_field_size <= 4) {
3449 ++ if (shift)
3450 ++ insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
3451 ++ insn->dst_reg,
3452 ++ shift);
3453 + insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
3454 + (1 << size * 8) - 1);
3455 +- else
3456 ++ } else {
3457 ++ if (shift)
3458 ++ insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
3459 ++ insn->dst_reg,
3460 ++ shift);
3461 + insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
3462 + (1 << size * 8) - 1);
3463 ++ }
3464 + }
3465 +
3466 + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
3467 +diff --git a/mm/page-writeback.c b/mm/page-writeback.c
3468 +index 84ae9bf5858a..ea4fd3af3b4b 100644
3469 +--- a/mm/page-writeback.c
3470 ++++ b/mm/page-writeback.c
3471 +@@ -2156,6 +2156,7 @@ int write_cache_pages(struct address_space *mapping,
3472 + {
3473 + int ret = 0;
3474 + int done = 0;
3475 ++ int error;
3476 + struct pagevec pvec;
3477 + int nr_pages;
3478 + pgoff_t uninitialized_var(writeback_index);
3479 +@@ -2236,25 +2237,31 @@ continue_unlock:
3480 + goto continue_unlock;
3481 +
3482 + trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
3483 +- ret = (*writepage)(page, wbc, data);
3484 +- if (unlikely(ret)) {
3485 +- if (ret == AOP_WRITEPAGE_ACTIVATE) {
3486 ++ error = (*writepage)(page, wbc, data);
3487 ++ if (unlikely(error)) {
3488 ++ /*
3489 ++ * Handle errors according to the type of
3490 ++ * writeback. There's no need to continue for
3491 ++ * background writeback. Just push done_index
3492 ++ * past this page so media errors won't choke
3493 ++ * writeout for the entire file. For integrity
3494 ++ * writeback, we must process the entire dirty
3495 ++ * set regardless of errors because the fs may
3496 ++ * still have state to clear for each page. In
3497 ++ * that case we continue processing and return
3498 ++ * the first error.
3499 ++ */
3500 ++ if (error == AOP_WRITEPAGE_ACTIVATE) {
3501 + unlock_page(page);
3502 +- ret = 0;
3503 +- } else {
3504 +- /*
3505 +- * done_index is set past this page,
3506 +- * so media errors will not choke
3507 +- * background writeout for the entire
3508 +- * file. This has consequences for
3509 +- * range_cyclic semantics (ie. it may
3510 +- * not be suitable for data integrity
3511 +- * writeout).
3512 +- */
3513 ++ error = 0;
3514 ++ } else if (wbc->sync_mode != WB_SYNC_ALL) {
3515 ++ ret = error;
3516 + done_index = page->index + 1;
3517 + done = 1;
3518 + break;
3519 + }
3520 ++ if (!ret)
3521 ++ ret = error;
3522 + }
3523 +
3524 + /*
3525 +diff --git a/mm/swapfile.c b/mm/swapfile.c
3526 +index 67aaf7ae22ff..340ef3177686 100644
3527 +--- a/mm/swapfile.c
3528 ++++ b/mm/swapfile.c
3529 +@@ -2820,8 +2820,9 @@ static struct swap_info_struct *alloc_swap_info(void)
3530 + struct swap_info_struct *p;
3531 + unsigned int type;
3532 + int i;
3533 ++ int size = sizeof(*p) + nr_node_ids * sizeof(struct plist_node);
3534 +
3535 +- p = kvzalloc(sizeof(*p), GFP_KERNEL);
3536 ++ p = kvzalloc(size, GFP_KERNEL);
3537 + if (!p)
3538 + return ERR_PTR(-ENOMEM);
3539 +
3540 +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
3541 +index f12555f23a49..7f800c3480f7 100644
3542 +--- a/net/bluetooth/hci_event.c
3543 ++++ b/net/bluetooth/hci_event.c
3544 +@@ -5668,6 +5668,12 @@ static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
3545 + return true;
3546 + }
3547 +
3548 ++ /* Check if request ended in Command Status - no way to retreive
3549 ++ * any extra parameters in this case.
3550 ++ */
3551 ++ if (hdr->evt == HCI_EV_CMD_STATUS)
3552 ++ return false;
3553 ++
3554 + if (hdr->evt != HCI_EV_CMD_COMPLETE) {
3555 + bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
3556 + hdr->evt);
3557 +diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
3558 +index 5372e2042adf..2cb8da465b98 100644
3559 +--- a/net/bridge/br_forward.c
3560 ++++ b/net/bridge/br_forward.c
3561 +@@ -65,6 +65,7 @@ EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
3562 +
3563 + int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
3564 + {
3565 ++ skb->tstamp = 0;
3566 + return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING,
3567 + net, sk, skb, NULL, skb->dev,
3568 + br_dev_queue_push_xmit);
3569 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
3570 +index abbbd7fd17fe..589ec5b9ec5f 100644
3571 +--- a/net/core/skbuff.c
3572 ++++ b/net/core/skbuff.c
3573 +@@ -5258,7 +5258,6 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
3574 + unsigned long chunk;
3575 + struct sk_buff *skb;
3576 + struct page *page;
3577 +- gfp_t gfp_head;
3578 + int i;
3579 +
3580 + *errcode = -EMSGSIZE;
3581 +@@ -5268,12 +5267,8 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
3582 + if (npages > MAX_SKB_FRAGS)
3583 + return NULL;
3584 +
3585 +- gfp_head = gfp_mask;
3586 +- if (gfp_head & __GFP_DIRECT_RECLAIM)
3587 +- gfp_head |= __GFP_RETRY_MAYFAIL;
3588 +-
3589 + *errcode = -ENOBUFS;
3590 +- skb = alloc_skb(header_len, gfp_head);
3591 ++ skb = alloc_skb(header_len, gfp_mask);
3592 + if (!skb)
3593 + return NULL;
3594 +
3595 +diff --git a/net/core/sock.c b/net/core/sock.c
3596 +index 5a8a3b76832f..c9668dcb5eb9 100644
3597 +--- a/net/core/sock.c
3598 ++++ b/net/core/sock.c
3599 +@@ -698,6 +698,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
3600 + break;
3601 + case SO_DONTROUTE:
3602 + sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
3603 ++ sk_dst_reset(sk);
3604 + break;
3605 + case SO_BROADCAST:
3606 + sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
3607 +diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
3608 +index 2c8d313ae216..fb1e7f237f53 100644
3609 +--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
3610 ++++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
3611 +@@ -57,17 +57,14 @@ struct clusterip_config {
3612 + enum clusterip_hashmode hash_mode; /* which hashing mode */
3613 + u_int32_t hash_initval; /* hash initialization */
3614 + struct rcu_head rcu;
3615 +-
3616 ++ struct net *net; /* netns for pernet list */
3617 + char ifname[IFNAMSIZ]; /* device ifname */
3618 +- struct notifier_block notifier; /* refresh c->ifindex in it */
3619 + };
3620 +
3621 + #ifdef CONFIG_PROC_FS
3622 + static const struct file_operations clusterip_proc_fops;
3623 + #endif
3624 +
3625 +-static unsigned int clusterip_net_id __read_mostly;
3626 +-
3627 + struct clusterip_net {
3628 + struct list_head configs;
3629 + /* lock protects the configs list */
3630 +@@ -78,16 +75,30 @@ struct clusterip_net {
3631 + #endif
3632 + };
3633 +
3634 ++static unsigned int clusterip_net_id __read_mostly;
3635 ++static inline struct clusterip_net *clusterip_pernet(struct net *net)
3636 ++{
3637 ++ return net_generic(net, clusterip_net_id);
3638 ++}
3639 ++
3640 + static inline void
3641 + clusterip_config_get(struct clusterip_config *c)
3642 + {
3643 + refcount_inc(&c->refcount);
3644 + }
3645 +
3646 +-
3647 + static void clusterip_config_rcu_free(struct rcu_head *head)
3648 + {
3649 +- kfree(container_of(head, struct clusterip_config, rcu));
3650 ++ struct clusterip_config *config;
3651 ++ struct net_device *dev;
3652 ++
3653 ++ config = container_of(head, struct clusterip_config, rcu);
3654 ++ dev = dev_get_by_name(config->net, config->ifname);
3655 ++ if (dev) {
3656 ++ dev_mc_del(dev, config->clustermac);
3657 ++ dev_put(dev);
3658 ++ }
3659 ++ kfree(config);
3660 + }
3661 +
3662 + static inline void
3663 +@@ -101,9 +112,9 @@ clusterip_config_put(struct clusterip_config *c)
3664 + * entry(rule) is removed, remove the config from lists, but don't free it
3665 + * yet, since proc-files could still be holding references */
3666 + static inline void
3667 +-clusterip_config_entry_put(struct net *net, struct clusterip_config *c)
3668 ++clusterip_config_entry_put(struct clusterip_config *c)
3669 + {
3670 +- struct clusterip_net *cn = net_generic(net, clusterip_net_id);
3671 ++ struct clusterip_net *cn = clusterip_pernet(c->net);
3672 +
3673 + local_bh_disable();
3674 + if (refcount_dec_and_lock(&c->entries, &cn->lock)) {
3675 +@@ -118,8 +129,6 @@ clusterip_config_entry_put(struct net *net, struct clusterip_config *c)
3676 + spin_unlock(&cn->lock);
3677 + local_bh_enable();
3678 +
3679 +- unregister_netdevice_notifier(&c->notifier);
3680 +-
3681 + return;
3682 + }
3683 + local_bh_enable();
3684 +@@ -129,7 +138,7 @@ static struct clusterip_config *
3685 + __clusterip_config_find(struct net *net, __be32 clusterip)
3686 + {
3687 + struct clusterip_config *c;
3688 +- struct clusterip_net *cn = net_generic(net, clusterip_net_id);
3689 ++ struct clusterip_net *cn = clusterip_pernet(net);
3690 +
3691 + list_for_each_entry_rcu(c, &cn->configs, list) {
3692 + if (c->clusterip == clusterip)
3693 +@@ -181,32 +190,37 @@ clusterip_netdev_event(struct notifier_block *this, unsigned long event,
3694 + void *ptr)
3695 + {
3696 + struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3697 ++ struct net *net = dev_net(dev);
3698 ++ struct clusterip_net *cn = clusterip_pernet(net);
3699 + struct clusterip_config *c;
3700 +
3701 +- c = container_of(this, struct clusterip_config, notifier);
3702 +- switch (event) {
3703 +- case NETDEV_REGISTER:
3704 +- if (!strcmp(dev->name, c->ifname)) {
3705 +- c->ifindex = dev->ifindex;
3706 +- dev_mc_add(dev, c->clustermac);
3707 +- }
3708 +- break;
3709 +- case NETDEV_UNREGISTER:
3710 +- if (dev->ifindex == c->ifindex) {
3711 +- dev_mc_del(dev, c->clustermac);
3712 +- c->ifindex = -1;
3713 +- }
3714 +- break;
3715 +- case NETDEV_CHANGENAME:
3716 +- if (!strcmp(dev->name, c->ifname)) {
3717 +- c->ifindex = dev->ifindex;
3718 +- dev_mc_add(dev, c->clustermac);
3719 +- } else if (dev->ifindex == c->ifindex) {
3720 +- dev_mc_del(dev, c->clustermac);
3721 +- c->ifindex = -1;
3722 ++ spin_lock_bh(&cn->lock);
3723 ++ list_for_each_entry_rcu(c, &cn->configs, list) {
3724 ++ switch (event) {
3725 ++ case NETDEV_REGISTER:
3726 ++ if (!strcmp(dev->name, c->ifname)) {
3727 ++ c->ifindex = dev->ifindex;
3728 ++ dev_mc_add(dev, c->clustermac);
3729 ++ }
3730 ++ break;
3731 ++ case NETDEV_UNREGISTER:
3732 ++ if (dev->ifindex == c->ifindex) {
3733 ++ dev_mc_del(dev, c->clustermac);
3734 ++ c->ifindex = -1;
3735 ++ }
3736 ++ break;
3737 ++ case NETDEV_CHANGENAME:
3738 ++ if (!strcmp(dev->name, c->ifname)) {
3739 ++ c->ifindex = dev->ifindex;
3740 ++ dev_mc_add(dev, c->clustermac);
3741 ++ } else if (dev->ifindex == c->ifindex) {
3742 ++ dev_mc_del(dev, c->clustermac);
3743 ++ c->ifindex = -1;
3744 ++ }
3745 ++ break;
3746 + }
3747 +- break;
3748 + }
3749 ++ spin_unlock_bh(&cn->lock);
3750 +
3751 + return NOTIFY_DONE;
3752 + }
3753 +@@ -215,30 +229,44 @@ static struct clusterip_config *
3754 + clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
3755 + __be32 ip, const char *iniface)
3756 + {
3757 +- struct clusterip_net *cn = net_generic(net, clusterip_net_id);
3758 ++ struct clusterip_net *cn = clusterip_pernet(net);
3759 + struct clusterip_config *c;
3760 ++ struct net_device *dev;
3761 + int err;
3762 +
3763 ++ if (iniface[0] == '\0') {
3764 ++ pr_info("Please specify an interface name\n");
3765 ++ return ERR_PTR(-EINVAL);
3766 ++ }
3767 ++
3768 + c = kzalloc(sizeof(*c), GFP_ATOMIC);
3769 + if (!c)
3770 + return ERR_PTR(-ENOMEM);
3771 +
3772 +- strcpy(c->ifname, iniface);
3773 +- c->ifindex = -1;
3774 +- c->clusterip = ip;
3775 ++ dev = dev_get_by_name(net, iniface);
3776 ++ if (!dev) {
3777 ++ pr_info("no such interface %s\n", iniface);
3778 ++ kfree(c);
3779 ++ return ERR_PTR(-ENOENT);
3780 ++ }
3781 ++ c->ifindex = dev->ifindex;
3782 ++ strcpy(c->ifname, dev->name);
3783 + memcpy(&c->clustermac, &i->clustermac, ETH_ALEN);
3784 ++ dev_mc_add(dev, c->clustermac);
3785 ++ dev_put(dev);
3786 ++
3787 ++ c->clusterip = ip;
3788 + c->num_total_nodes = i->num_total_nodes;
3789 + clusterip_config_init_nodelist(c, i);
3790 + c->hash_mode = i->hash_mode;
3791 + c->hash_initval = i->hash_initval;
3792 ++ c->net = net;
3793 + refcount_set(&c->refcount, 1);
3794 +
3795 + spin_lock_bh(&cn->lock);
3796 + if (__clusterip_config_find(net, ip)) {
3797 +- spin_unlock_bh(&cn->lock);
3798 +- kfree(c);
3799 +-
3800 +- return ERR_PTR(-EBUSY);
3801 ++ err = -EBUSY;
3802 ++ goto out_config_put;
3803 + }
3804 +
3805 + list_add_rcu(&c->list, &cn->configs);
3806 +@@ -260,22 +288,17 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
3807 + }
3808 + #endif
3809 +
3810 +- c->notifier.notifier_call = clusterip_netdev_event;
3811 +- err = register_netdevice_notifier(&c->notifier);
3812 +- if (!err) {
3813 +- refcount_set(&c->entries, 1);
3814 +- return c;
3815 +- }
3816 ++ refcount_set(&c->entries, 1);
3817 ++ return c;
3818 +
3819 + #ifdef CONFIG_PROC_FS
3820 +- proc_remove(c->pde);
3821 + err:
3822 + #endif
3823 + spin_lock_bh(&cn->lock);
3824 + list_del_rcu(&c->list);
3825 ++out_config_put:
3826 + spin_unlock_bh(&cn->lock);
3827 + clusterip_config_put(c);
3828 +-
3829 + return ERR_PTR(err);
3830 + }
3831 +
3832 +@@ -475,34 +498,20 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
3833 + &e->ip.dst.s_addr);
3834 + return -EINVAL;
3835 + } else {
3836 +- struct net_device *dev;
3837 +-
3838 +- if (e->ip.iniface[0] == '\0') {
3839 +- pr_info("Please specify an interface name\n");
3840 +- return -EINVAL;
3841 +- }
3842 +-
3843 +- dev = dev_get_by_name(par->net, e->ip.iniface);
3844 +- if (!dev) {
3845 +- pr_info("no such interface %s\n",
3846 +- e->ip.iniface);
3847 +- return -ENOENT;
3848 +- }
3849 +- dev_put(dev);
3850 +-
3851 + config = clusterip_config_init(par->net, cipinfo,
3852 + e->ip.dst.s_addr,
3853 + e->ip.iniface);
3854 + if (IS_ERR(config))
3855 + return PTR_ERR(config);
3856 + }
3857 +- }
3858 ++ } else if (memcmp(&config->clustermac, &cipinfo->clustermac, ETH_ALEN))
3859 ++ return -EINVAL;
3860 +
3861 + ret = nf_ct_netns_get(par->net, par->family);
3862 + if (ret < 0) {
3863 + pr_info("cannot load conntrack support for proto=%u\n",
3864 + par->family);
3865 +- clusterip_config_entry_put(par->net, config);
3866 ++ clusterip_config_entry_put(config);
3867 + clusterip_config_put(config);
3868 + return ret;
3869 + }
3870 +@@ -524,7 +533,7 @@ static void clusterip_tg_destroy(const struct xt_tgdtor_param *par)
3871 +
3872 + /* if no more entries are referencing the config, remove it
3873 + * from the list and destroy the proc entry */
3874 +- clusterip_config_entry_put(par->net, cipinfo->config);
3875 ++ clusterip_config_entry_put(cipinfo->config);
3876 +
3877 + clusterip_config_put(cipinfo->config);
3878 +
3879 +@@ -806,7 +815,7 @@ static const struct file_operations clusterip_proc_fops = {
3880 +
3881 + static int clusterip_net_init(struct net *net)
3882 + {
3883 +- struct clusterip_net *cn = net_generic(net, clusterip_net_id);
3884 ++ struct clusterip_net *cn = clusterip_pernet(net);
3885 + int ret;
3886 +
3887 + INIT_LIST_HEAD(&cn->configs);
3888 +@@ -831,13 +840,12 @@ static int clusterip_net_init(struct net *net)
3889 +
3890 + static void clusterip_net_exit(struct net *net)
3891 + {
3892 +- struct clusterip_net *cn = net_generic(net, clusterip_net_id);
3893 ++ struct clusterip_net *cn = clusterip_pernet(net);
3894 + #ifdef CONFIG_PROC_FS
3895 + proc_remove(cn->procdir);
3896 + cn->procdir = NULL;
3897 + #endif
3898 + nf_unregister_net_hook(net, &cip_arp_ops);
3899 +- WARN_ON_ONCE(!list_empty(&cn->configs));
3900 + }
3901 +
3902 + static struct pernet_operations clusterip_net_ops = {
3903 +@@ -847,6 +855,10 @@ static struct pernet_operations clusterip_net_ops = {
3904 + .size = sizeof(struct clusterip_net),
3905 + };
3906 +
3907 ++struct notifier_block cip_netdev_notifier = {
3908 ++ .notifier_call = clusterip_netdev_event
3909 ++};
3910 ++
3911 + static int __init clusterip_tg_init(void)
3912 + {
3913 + int ret;
3914 +@@ -859,11 +871,17 @@ static int __init clusterip_tg_init(void)
3915 + if (ret < 0)
3916 + goto cleanup_subsys;
3917 +
3918 ++ ret = register_netdevice_notifier(&cip_netdev_notifier);
3919 ++ if (ret < 0)
3920 ++ goto unregister_target;
3921 ++
3922 + pr_info("ClusterIP Version %s loaded successfully\n",
3923 + CLUSTERIP_VERSION);
3924 +
3925 + return 0;
3926 +
3927 ++unregister_target:
3928 ++ xt_unregister_target(&clusterip_tg_reg);
3929 + cleanup_subsys:
3930 + unregister_pernet_subsys(&clusterip_net_ops);
3931 + return ret;
3932 +@@ -873,6 +891,7 @@ static void __exit clusterip_tg_exit(void)
3933 + {
3934 + pr_info("ClusterIP Version %s unloading\n", CLUSTERIP_VERSION);
3935 +
3936 ++ unregister_netdevice_notifier(&cip_netdev_notifier);
3937 + xt_unregister_target(&clusterip_tg_reg);
3938 + unregister_pernet_subsys(&clusterip_net_ops);
3939 +
3940 +diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
3941 +index 9a4261e50272..506b2ae07bb3 100644
3942 +--- a/net/ipv6/af_inet6.c
3943 ++++ b/net/ipv6/af_inet6.c
3944 +@@ -309,6 +309,7 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
3945 +
3946 + /* Check if the address belongs to the host. */
3947 + if (addr_type == IPV6_ADDR_MAPPED) {
3948 ++ struct net_device *dev = NULL;
3949 + int chk_addr_ret;
3950 +
3951 + /* Binding to v4-mapped address on a v6-only socket
3952 +@@ -319,9 +320,20 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
3953 + goto out;
3954 + }
3955 +
3956 ++ rcu_read_lock();
3957 ++ if (sk->sk_bound_dev_if) {
3958 ++ dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
3959 ++ if (!dev) {
3960 ++ err = -ENODEV;
3961 ++ goto out_unlock;
3962 ++ }
3963 ++ }
3964 ++
3965 + /* Reproduce AF_INET checks to make the bindings consistent */
3966 + v4addr = addr->sin6_addr.s6_addr32[3];
3967 +- chk_addr_ret = inet_addr_type(net, v4addr);
3968 ++ chk_addr_ret = inet_addr_type_dev_table(net, dev, v4addr);
3969 ++ rcu_read_unlock();
3970 ++
3971 + if (!inet_can_nonlocal_bind(net, inet) &&
3972 + v4addr != htonl(INADDR_ANY) &&
3973 + chk_addr_ret != RTN_LOCAL &&
3974 +diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
3975 +index c00b6a2e8e3c..13ade5782847 100644
3976 +--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
3977 ++++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
3978 +@@ -219,10 +219,6 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
3979 + struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
3980 + u32 ip;
3981 +
3982 +- /* MAC can be src only */
3983 +- if (!(opt->flags & IPSET_DIM_TWO_SRC))
3984 +- return 0;
3985 +-
3986 + ip = ntohl(ip4addr(skb, opt->flags & IPSET_DIM_ONE_SRC));
3987 + if (ip < map->first_ip || ip > map->last_ip)
3988 + return -IPSET_ERR_BITMAP_RANGE;
3989 +@@ -233,7 +229,11 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
3990 + return -EINVAL;
3991 +
3992 + e.id = ip_to_id(map, ip);
3993 +- memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN);
3994 ++
3995 ++ if (opt->flags & IPSET_DIM_ONE_SRC)
3996 ++ ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
3997 ++ else
3998 ++ ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
3999 +
4000 + return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
4001 + }
4002 +diff --git a/net/netfilter/ipset/ip_set_hash_ipmac.c b/net/netfilter/ipset/ip_set_hash_ipmac.c
4003 +index 1ab5ed2f6839..fd87de3ed55b 100644
4004 +--- a/net/netfilter/ipset/ip_set_hash_ipmac.c
4005 ++++ b/net/netfilter/ipset/ip_set_hash_ipmac.c
4006 +@@ -103,7 +103,11 @@ hash_ipmac4_kadt(struct ip_set *set, const struct sk_buff *skb,
4007 + (skb_mac_header(skb) + ETH_HLEN) > skb->data)
4008 + return -EINVAL;
4009 +
4010 +- memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN);
4011 ++ if (opt->flags & IPSET_DIM_ONE_SRC)
4012 ++ ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
4013 ++ else
4014 ++ ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
4015 ++
4016 + if (ether_addr_equal(e.ether, invalid_ether))
4017 + return -EINVAL;
4018 +
4019 +@@ -211,15 +215,15 @@ hash_ipmac6_kadt(struct ip_set *set, const struct sk_buff *skb,
4020 + };
4021 + struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
4022 +
4023 +- /* MAC can be src only */
4024 +- if (!(opt->flags & IPSET_DIM_TWO_SRC))
4025 +- return 0;
4026 +-
4027 + if (skb_mac_header(skb) < skb->head ||
4028 + (skb_mac_header(skb) + ETH_HLEN) > skb->data)
4029 + return -EINVAL;
4030 +
4031 +- memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN);
4032 ++ if (opt->flags & IPSET_DIM_ONE_SRC)
4033 ++ ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
4034 ++ else
4035 ++ ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
4036 ++
4037 + if (ether_addr_equal(e.ether, invalid_ether))
4038 + return -EINVAL;
4039 +
4040 +diff --git a/net/netfilter/ipset/ip_set_hash_mac.c b/net/netfilter/ipset/ip_set_hash_mac.c
4041 +index f9d5a2a1e3d0..4fe5f243d0a3 100644
4042 +--- a/net/netfilter/ipset/ip_set_hash_mac.c
4043 ++++ b/net/netfilter/ipset/ip_set_hash_mac.c
4044 +@@ -81,15 +81,15 @@ hash_mac4_kadt(struct ip_set *set, const struct sk_buff *skb,
4045 + struct hash_mac4_elem e = { { .foo[0] = 0, .foo[1] = 0 } };
4046 + struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
4047 +
4048 +- /* MAC can be src only */
4049 +- if (!(opt->flags & IPSET_DIM_ONE_SRC))
4050 +- return 0;
4051 +-
4052 + if (skb_mac_header(skb) < skb->head ||
4053 + (skb_mac_header(skb) + ETH_HLEN) > skb->data)
4054 + return -EINVAL;
4055 +
4056 +- ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
4057 ++ if (opt->flags & IPSET_DIM_ONE_SRC)
4058 ++ ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
4059 ++ else
4060 ++ ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
4061 ++
4062 + if (is_zero_ether_addr(e.ether))
4063 + return -EINVAL;
4064 + return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
4065 +diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c
4066 +index 904e775d1a44..cf40a8284a38 100644
4067 +--- a/samples/bpf/bpf_load.c
4068 ++++ b/samples/bpf/bpf_load.c
4069 +@@ -55,6 +55,23 @@ static int populate_prog_array(const char *event, int prog_fd)
4070 + return 0;
4071 + }
4072 +
4073 ++static int write_kprobe_events(const char *val)
4074 ++{
4075 ++ int fd, ret, flags;
4076 ++
4077 ++ if ((val != NULL) && (val[0] == '\0'))
4078 ++ flags = O_WRONLY | O_TRUNC;
4079 ++ else
4080 ++ flags = O_WRONLY | O_APPEND;
4081 ++
4082 ++ fd = open("/sys/kernel/debug/tracing/kprobe_events", flags);
4083 ++
4084 ++ ret = write(fd, val, strlen(val));
4085 ++ close(fd);
4086 ++
4087 ++ return ret;
4088 ++}
4089 ++
4090 + static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
4091 + {
4092 + bool is_socket = strncmp(event, "socket", 6) == 0;
4093 +@@ -166,10 +183,9 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
4094 +
4095 + #ifdef __x86_64__
4096 + if (strncmp(event, "sys_", 4) == 0) {
4097 +- snprintf(buf, sizeof(buf),
4098 +- "echo '%c:__x64_%s __x64_%s' >> /sys/kernel/debug/tracing/kprobe_events",
4099 +- is_kprobe ? 'p' : 'r', event, event);
4100 +- err = system(buf);
4101 ++ snprintf(buf, sizeof(buf), "%c:__x64_%s __x64_%s",
4102 ++ is_kprobe ? 'p' : 'r', event, event);
4103 ++ err = write_kprobe_events(buf);
4104 + if (err >= 0) {
4105 + need_normal_check = false;
4106 + event_prefix = "__x64_";
4107 +@@ -177,10 +193,9 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
4108 + }
4109 + #endif
4110 + if (need_normal_check) {
4111 +- snprintf(buf, sizeof(buf),
4112 +- "echo '%c:%s %s' >> /sys/kernel/debug/tracing/kprobe_events",
4113 +- is_kprobe ? 'p' : 'r', event, event);
4114 +- err = system(buf);
4115 ++ snprintf(buf, sizeof(buf), "%c:%s %s",
4116 ++ is_kprobe ? 'p' : 'r', event, event);
4117 ++ err = write_kprobe_events(buf);
4118 + if (err < 0) {
4119 + printf("failed to create kprobe '%s' error '%s'\n",
4120 + event, strerror(errno));
4121 +@@ -520,7 +535,7 @@ static int do_load_bpf_file(const char *path, fixup_map_cb fixup_map)
4122 + return 1;
4123 +
4124 + /* clear all kprobes */
4125 +- i = system("echo \"\" > /sys/kernel/debug/tracing/kprobe_events");
4126 ++ i = write_kprobe_events("");
4127 +
4128 + /* scan over all elf sections to get license and map info */
4129 + for (i = 1; i < ehdr.e_shnum; i++) {
4130 +diff --git a/scripts/kconfig/zconf.l b/scripts/kconfig/zconf.l
4131 +index 25bd2b89fe3f..c2f577d71964 100644
4132 +--- a/scripts/kconfig/zconf.l
4133 ++++ b/scripts/kconfig/zconf.l
4134 +@@ -73,7 +73,7 @@ static void warn_ignored_character(char chr)
4135 + {
4136 + fprintf(stderr,
4137 + "%s:%d:warning: ignoring unsupported character '%c'\n",
4138 +- zconf_curname(), zconf_lineno(), chr);
4139 ++ current_file->name, yylineno, chr);
4140 + }
4141 + %}
4142 +
4143 +@@ -221,6 +221,8 @@ n [A-Za-z0-9_-]
4144 + }
4145 + <<EOF>> {
4146 + BEGIN(INITIAL);
4147 ++ yylval.string = text;
4148 ++ return T_WORD_QUOTE;
4149 + }
4150 + }
4151 +
4152 +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
4153 +index fe251c6f09f1..3c3878f0d2fa 100644
4154 +--- a/security/selinux/hooks.c
4155 ++++ b/security/selinux/hooks.c
4156 +@@ -2934,7 +2934,7 @@ static int selinux_sb_kern_mount(struct super_block *sb, int flags, void *data)
4157 + return rc;
4158 +
4159 + /* Allow all mounts performed by the kernel */
4160 +- if (flags & MS_KERNMOUNT)
4161 ++ if (flags & (MS_KERNMOUNT | MS_SUBMOUNT))
4162 + return 0;
4163 +
4164 + ad.type = LSM_AUDIT_DATA_DENTRY;
4165 +diff --git a/sound/firewire/Kconfig b/sound/firewire/Kconfig
4166 +index 529d9f405fa9..0cb65d0864cc 100644
4167 +--- a/sound/firewire/Kconfig
4168 ++++ b/sound/firewire/Kconfig
4169 +@@ -41,6 +41,7 @@ config SND_OXFW
4170 + * Mackie(Loud) U.420/U.420d
4171 + * TASCAM FireOne
4172 + * Stanton Controllers & Systems 1 Deck/Mixer
4173 ++ * APOGEE duet FireWire
4174 +
4175 + To compile this driver as a module, choose M here: the module
4176 + will be called snd-oxfw.
4177 +diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
4178 +index 93676354f87f..de4af8a41ff0 100644
4179 +--- a/sound/firewire/bebob/bebob.c
4180 ++++ b/sound/firewire/bebob/bebob.c
4181 +@@ -434,7 +434,7 @@ static const struct ieee1394_device_id bebob_id_table[] = {
4182 + /* Apogee Electronics, DA/AD/DD-16X (X-FireWire card) */
4183 + SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x00010048, &spec_normal),
4184 + /* Apogee Electronics, Ensemble */
4185 +- SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x00001eee, &spec_normal),
4186 ++ SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x01eeee, &spec_normal),
4187 + /* ESI, Quatafire610 */
4188 + SND_BEBOB_DEV_ENTRY(VEN_ESI, 0x00010064, &spec_normal),
4189 + /* AcousticReality, eARMasterOne */
4190 +diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
4191 +index 2ea8be6c8584..5f82a375725a 100644
4192 +--- a/sound/firewire/oxfw/oxfw.c
4193 ++++ b/sound/firewire/oxfw/oxfw.c
4194 +@@ -20,6 +20,7 @@
4195 + #define VENDOR_LACIE 0x00d04b
4196 + #define VENDOR_TASCAM 0x00022e
4197 + #define OUI_STANTON 0x001260
4198 ++#define OUI_APOGEE 0x0003db
4199 +
4200 + #define MODEL_SATELLITE 0x00200f
4201 +
4202 +@@ -436,6 +437,13 @@ static const struct ieee1394_device_id oxfw_id_table[] = {
4203 + .vendor_id = OUI_STANTON,
4204 + .model_id = 0x002000,
4205 + },
4206 ++ // APOGEE, duet FireWire
4207 ++ {
4208 ++ .match_flags = IEEE1394_MATCH_VENDOR_ID |
4209 ++ IEEE1394_MATCH_MODEL_ID,
4210 ++ .vendor_id = OUI_APOGEE,
4211 ++ .model_id = 0x01dddd,
4212 ++ },
4213 + { }
4214 + };
4215 + MODULE_DEVICE_TABLE(ieee1394, oxfw_id_table);
4216 +diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c
4217 +index 3135e9eafd18..7f376b63a166 100644
4218 +--- a/sound/soc/amd/acp-pcm-dma.c
4219 ++++ b/sound/soc/amd/acp-pcm-dma.c
4220 +@@ -1147,18 +1147,21 @@ static int acp_dma_new(struct snd_soc_pcm_runtime *rtd)
4221 + struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd,
4222 + DRV_NAME);
4223 + struct audio_drv_data *adata = dev_get_drvdata(component->dev);
4224 ++ struct device *parent = component->dev->parent;
4225 +
4226 + switch (adata->asic_type) {
4227 + case CHIP_STONEY:
4228 + ret = snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
4229 + SNDRV_DMA_TYPE_DEV,
4230 +- NULL, ST_MIN_BUFFER,
4231 ++ parent,
4232 ++ ST_MIN_BUFFER,
4233 + ST_MAX_BUFFER);
4234 + break;
4235 + default:
4236 + ret = snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
4237 + SNDRV_DMA_TYPE_DEV,
4238 +- NULL, MIN_BUFFER,
4239 ++ parent,
4240 ++ MIN_BUFFER,
4241 + MAX_BUFFER);
4242 + break;
4243 + }
4244 +diff --git a/sound/soc/codecs/pcm3168a.c b/sound/soc/codecs/pcm3168a.c
4245 +index 3356c91f55b0..e3de1ff3b6c2 100644
4246 +--- a/sound/soc/codecs/pcm3168a.c
4247 ++++ b/sound/soc/codecs/pcm3168a.c
4248 +@@ -688,15 +688,22 @@ err_clk:
4249 + }
4250 + EXPORT_SYMBOL_GPL(pcm3168a_probe);
4251 +
4252 +-void pcm3168a_remove(struct device *dev)
4253 ++static void pcm3168a_disable(struct device *dev)
4254 + {
4255 + struct pcm3168a_priv *pcm3168a = dev_get_drvdata(dev);
4256 +
4257 +- pm_runtime_disable(dev);
4258 + regulator_bulk_disable(ARRAY_SIZE(pcm3168a->supplies),
4259 +- pcm3168a->supplies);
4260 ++ pcm3168a->supplies);
4261 + clk_disable_unprepare(pcm3168a->scki);
4262 + }
4263 ++
4264 ++void pcm3168a_remove(struct device *dev)
4265 ++{
4266 ++ pm_runtime_disable(dev);
4267 ++#ifndef CONFIG_PM
4268 ++ pcm3168a_disable(dev);
4269 ++#endif
4270 ++}
4271 + EXPORT_SYMBOL_GPL(pcm3168a_remove);
4272 +
4273 + #ifdef CONFIG_PM
4274 +@@ -751,10 +758,7 @@ static int pcm3168a_rt_suspend(struct device *dev)
4275 +
4276 + regcache_cache_only(pcm3168a->regmap, true);
4277 +
4278 +- regulator_bulk_disable(ARRAY_SIZE(pcm3168a->supplies),
4279 +- pcm3168a->supplies);
4280 +-
4281 +- clk_disable_unprepare(pcm3168a->scki);
4282 ++ pcm3168a_disable(dev);
4283 +
4284 + return 0;
4285 + }
4286 +diff --git a/tools/lib/subcmd/Makefile b/tools/lib/subcmd/Makefile
4287 +index 95563b8e1ad7..ed61fb3a46c0 100644
4288 +--- a/tools/lib/subcmd/Makefile
4289 ++++ b/tools/lib/subcmd/Makefile
4290 +@@ -36,8 +36,6 @@ endif
4291 + CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
4292 +
4293 + CFLAGS += -I$(srctree)/tools/include/
4294 +-CFLAGS += -I$(srctree)/include/uapi
4295 +-CFLAGS += -I$(srctree)/include
4296 +
4297 + SUBCMD_IN := $(OUTPUT)libsubcmd-in.o
4298 +
4299 +diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
4300 +index e30d20fb482d..f00ea77f5f08 100644
4301 +--- a/tools/perf/Makefile.config
4302 ++++ b/tools/perf/Makefile.config
4303 +@@ -294,6 +294,8 @@ ifndef NO_BIONIC
4304 + $(call feature_check,bionic)
4305 + ifeq ($(feature-bionic), 1)
4306 + BIONIC := 1
4307 ++ CFLAGS += -DLACKS_SIGQUEUE_PROTOTYPE
4308 ++ CFLAGS += -DLACKS_OPEN_MEMSTREAM_PROTOTYPE
4309 + EXTLIBS := $(filter-out -lrt,$(EXTLIBS))
4310 + EXTLIBS := $(filter-out -lpthread,$(EXTLIBS))
4311 + endif
4312 +diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
4313 +index db0ba8caf5a2..ba8ecaf52200 100644
4314 +--- a/tools/perf/arch/x86/util/intel-pt.c
4315 ++++ b/tools/perf/arch/x86/util/intel-pt.c
4316 +@@ -524,10 +524,21 @@ static int intel_pt_validate_config(struct perf_pmu *intel_pt_pmu,
4317 + struct perf_evsel *evsel)
4318 + {
4319 + int err;
4320 ++ char c;
4321 +
4322 + if (!evsel)
4323 + return 0;
4324 +
4325 ++ /*
4326 ++ * If supported, force pass-through config term (pt=1) even if user
4327 ++ * sets pt=0, which avoids senseless kernel errors.
4328 ++ */
4329 ++ if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 &&
4330 ++ !(evsel->attr.config & 1)) {
4331 ++ pr_warning("pt=0 doesn't make sense, forcing pt=1\n");
4332 ++ evsel->attr.config |= 1;
4333 ++ }
4334 ++
4335 + err = intel_pt_val_config_term(intel_pt_pmu, "caps/cycle_thresholds",
4336 + "cyc_thresh", "caps/psb_cyc",
4337 + evsel->attr.config);
4338 +diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
4339 +index d097b5b47eb8..40720150ccd8 100644
4340 +--- a/tools/perf/builtin-stat.c
4341 ++++ b/tools/perf/builtin-stat.c
4342 +@@ -1961,7 +1961,7 @@ static int parse_metric_groups(const struct option *opt,
4343 + return metricgroup__parse_groups(opt, str, &metric_events);
4344 + }
4345 +
4346 +-static const struct option stat_options[] = {
4347 ++static struct option stat_options[] = {
4348 + OPT_BOOLEAN('T', "transaction", &transaction_run,
4349 + "hardware transaction statistics"),
4350 + OPT_CALLBACK('e', "event", &evsel_list, "event",
4351 +@@ -2847,6 +2847,12 @@ int cmd_stat(int argc, const char **argv)
4352 + return -ENOMEM;
4353 +
4354 + parse_events__shrink_config_terms();
4355 ++
4356 ++ /* String-parsing callback-based options would segfault when negated */
4357 ++ set_option_flag(stat_options, 'e', "event", PARSE_OPT_NONEG);
4358 ++ set_option_flag(stat_options, 'M', "metrics", PARSE_OPT_NONEG);
4359 ++ set_option_flag(stat_options, 'G', "cgroup", PARSE_OPT_NONEG);
4360 ++
4361 + argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands,
4362 + (const char **) stat_usage,
4363 + PARSE_OPT_STOP_AT_NON_OPTION);
4364 +diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
4365 +index a827919c6263..775b99833e51 100644
4366 +--- a/tools/perf/builtin-timechart.c
4367 ++++ b/tools/perf/builtin-timechart.c
4368 +@@ -43,6 +43,10 @@
4369 + #include "util/data.h"
4370 + #include "util/debug.h"
4371 +
4372 ++#ifdef LACKS_OPEN_MEMSTREAM_PROTOTYPE
4373 ++FILE *open_memstream(char **ptr, size_t *sizeloc);
4374 ++#endif
4375 ++
4376 + #define SUPPORT_OLD_POWER_EVENTS 1
4377 + #define PWR_EVENT_EXIT -1
4378 +
4379 +diff --git a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
4380 +index 36c903faed0b..71e9737f4614 100644
4381 +--- a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
4382 ++++ b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
4383 +@@ -73,7 +73,7 @@
4384 + },
4385 + {
4386 + "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
4387 +- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS_PS + MEM_LOAD_RETIRED.FB_HIT_PS )",
4388 ++ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
4389 + "MetricGroup": "Memory_Bound;Memory_Lat",
4390 + "MetricName": "Load_Miss_Real_Latency"
4391 + },
4392 +diff --git a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
4393 +index 36c903faed0b..71e9737f4614 100644
4394 +--- a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
4395 ++++ b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
4396 +@@ -73,7 +73,7 @@
4397 + },
4398 + {
4399 + "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
4400 +- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS_PS + MEM_LOAD_RETIRED.FB_HIT_PS )",
4401 ++ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
4402 + "MetricGroup": "Memory_Bound;Memory_Lat",
4403 + "MetricName": "Load_Miss_Real_Latency"
4404 + },
4405 +diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c
4406 +index a467615c5a0e..910e25e64188 100644
4407 +--- a/tools/perf/tests/bp_signal.c
4408 ++++ b/tools/perf/tests/bp_signal.c
4409 +@@ -291,12 +291,20 @@ int test__bp_signal(struct test *test __maybe_unused, int subtest __maybe_unused
4410 +
4411 + bool test__bp_signal_is_supported(void)
4412 + {
4413 +-/*
4414 +- * The powerpc so far does not have support to even create
4415 +- * instruction breakpoint using the perf event interface.
4416 +- * Once it's there we can release this.
4417 +- */
4418 +-#if defined(__powerpc__) || defined(__s390x__)
4419 ++ /*
4420 ++ * PowerPC and S390 do not support creation of instruction
4421 ++ * breakpoints using the perf_event interface.
4422 ++ *
4423 ++ * ARM requires explicit rounding down of the instruction
4424 ++ * pointer in Thumb mode, and then requires the single-step
4425 ++ * to be handled explicitly in the overflow handler to avoid
4426 ++ * stepping into the SIGIO handler and getting stuck on the
4427 ++ * breakpointed instruction.
4428 ++ *
4429 ++ * Just disable the test for these architectures until these
4430 ++ * issues are resolved.
4431 ++ */
4432 ++#if defined(__powerpc__) || defined(__s390x__) || defined(__arm__)
4433 + return false;
4434 + #else
4435 + return true;
4436 +diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
4437 +index ca577658e890..7b5e15cc6b71 100644
4438 +--- a/tools/perf/util/cs-etm.c
4439 ++++ b/tools/perf/util/cs-etm.c
4440 +@@ -1005,7 +1005,7 @@ static int cs_etm__flush(struct cs_etm_queue *etmq)
4441 + }
4442 +
4443 + swap_packet:
4444 +- if (etmq->etm->synth_opts.last_branch) {
4445 ++ if (etm->sample_branches || etm->synth_opts.last_branch) {
4446 + /*
4447 + * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
4448 + * the next incoming packet.
4449 +diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
4450 +index be440df29615..819aa4491b53 100644
4451 +--- a/tools/perf/util/evlist.c
4452 ++++ b/tools/perf/util/evlist.c
4453 +@@ -34,6 +34,10 @@
4454 + #include <linux/log2.h>
4455 + #include <linux/err.h>
4456 +
4457 ++#ifdef LACKS_SIGQUEUE_PROTOTYPE
4458 ++int sigqueue(pid_t pid, int sig, const union sigval value);
4459 ++#endif
4460 ++
4461 + #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
4462 + #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
4463 +
4464 +diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
4465 +index f8cd3e7c9186..ebb18a9bc460 100644
4466 +--- a/tools/perf/util/parse-events.c
4467 ++++ b/tools/perf/util/parse-events.c
4468 +@@ -2454,7 +2454,7 @@ restart:
4469 + if (!name_only && strlen(syms->alias))
4470 + snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias);
4471 + else
4472 +- strncpy(name, syms->symbol, MAX_NAME_LEN);
4473 ++ strlcpy(name, syms->symbol, MAX_NAME_LEN);
4474 +
4475 + evt_list[evt_i] = strdup(name);
4476 + if (evt_list[evt_i] == NULL)
4477 +diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
4478 +index 1cbada2dc6be..f735ee038713 100644
4479 +--- a/tools/perf/util/svghelper.c
4480 ++++ b/tools/perf/util/svghelper.c
4481 +@@ -334,7 +334,7 @@ static char *cpu_model(void)
4482 + if (file) {
4483 + while (fgets(buf, 255, file)) {
4484 + if (strstr(buf, "model name")) {
4485 +- strncpy(cpu_m, &buf[13], 255);
4486 ++ strlcpy(cpu_m, &buf[13], 255);
4487 + break;
4488 + }
4489 + }
4490 +diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
4491 +index fff7fb1285fc..f3f874ba186b 100644
4492 +--- a/tools/testing/selftests/bpf/Makefile
4493 ++++ b/tools/testing/selftests/bpf/Makefile
4494 +@@ -124,6 +124,16 @@ endif
4495 + endif
4496 + endif
4497 +
4498 ++# Have one program compiled without "-target bpf" to test whether libbpf loads
4499 ++# it successfully
4500 ++$(OUTPUT)/test_xdp.o: test_xdp.c
4501 ++ $(CLANG) $(CLANG_FLAGS) \
4502 ++ -O2 -emit-llvm -c $< -o - | \
4503 ++ $(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@
4504 ++ifeq ($(DWARF2BTF),y)
4505 ++ $(BTF_PAHOLE) -J $@
4506 ++endif
4507 ++
4508 + $(OUTPUT)/%.o: %.c
4509 + $(CLANG) $(CLANG_FLAGS) \
4510 + -O2 -target bpf -emit-llvm -c $< -o - | \
4511 +diff --git a/tools/testing/selftests/bpf/test_libbpf.sh b/tools/testing/selftests/bpf/test_libbpf.sh
4512 +index d97dc914cd49..8b1bc96d8e0c 100755
4513 +--- a/tools/testing/selftests/bpf/test_libbpf.sh
4514 ++++ b/tools/testing/selftests/bpf/test_libbpf.sh
4515 +@@ -33,17 +33,11 @@ trap exit_handler 0 2 3 6 9
4516 +
4517 + libbpf_open_file test_l4lb.o
4518 +
4519 +-# TODO: fix libbpf to load noinline functions
4520 +-# [warning] libbpf: incorrect bpf_call opcode
4521 +-#libbpf_open_file test_l4lb_noinline.o
4522 ++# Load a program with BPF-to-BPF calls
4523 ++libbpf_open_file test_l4lb_noinline.o
4524 +
4525 +-# TODO: fix test_xdp_meta.c to load with libbpf
4526 +-# [warning] libbpf: test_xdp_meta.o doesn't provide kernel version
4527 +-#libbpf_open_file test_xdp_meta.o
4528 +-
4529 +-# TODO: fix libbpf to handle .eh_frame
4530 +-# [warning] libbpf: relocation failed: no section(10)
4531 +-#libbpf_open_file ../../../../samples/bpf/tracex3_kern.o
4532 ++# Load a program compiled without the "-target bpf" flag
4533 ++libbpf_open_file test_xdp.o
4534 +
4535 + # Success
4536 + exit 0
4537 +diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
4538 +index e436b67f2426..9db5a7378f40 100644
4539 +--- a/tools/testing/selftests/bpf/test_verifier.c
4540 ++++ b/tools/testing/selftests/bpf/test_verifier.c
4541 +@@ -2748,6 +2748,19 @@ static struct bpf_test tests[] = {
4542 + .result_unpriv = REJECT,
4543 + .result = ACCEPT,
4544 + },
4545 ++ {
4546 ++ "alu32: mov u32 const",
4547 ++ .insns = {
4548 ++ BPF_MOV32_IMM(BPF_REG_7, 0),
4549 ++ BPF_ALU32_IMM(BPF_AND, BPF_REG_7, 1),
4550 ++ BPF_MOV32_REG(BPF_REG_0, BPF_REG_7),
4551 ++ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4552 ++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
4553 ++ BPF_EXIT_INSN(),
4554 ++ },
4555 ++ .result = ACCEPT,
4556 ++ .retval = 0,
4557 ++ },
4558 + {
4559 + "unpriv: partial copy of pointer",
4560 + .insns = {
4561 +diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
4562 +index 6ae3730c4ee3..76d654ef3234 100644
4563 +--- a/tools/testing/selftests/kselftest_harness.h
4564 ++++ b/tools/testing/selftests/kselftest_harness.h
4565 +@@ -354,7 +354,7 @@
4566 + * ASSERT_EQ(expected, measured): expected == measured
4567 + */
4568 + #define ASSERT_EQ(expected, seen) \
4569 +- __EXPECT(expected, seen, ==, 1)
4570 ++ __EXPECT(expected, #expected, seen, #seen, ==, 1)
4571 +
4572 + /**
4573 + * ASSERT_NE(expected, seen)
4574 +@@ -365,7 +365,7 @@
4575 + * ASSERT_NE(expected, measured): expected != measured
4576 + */
4577 + #define ASSERT_NE(expected, seen) \
4578 +- __EXPECT(expected, seen, !=, 1)
4579 ++ __EXPECT(expected, #expected, seen, #seen, !=, 1)
4580 +
4581 + /**
4582 + * ASSERT_LT(expected, seen)
4583 +@@ -376,7 +376,7 @@
4584 + * ASSERT_LT(expected, measured): expected < measured
4585 + */
4586 + #define ASSERT_LT(expected, seen) \
4587 +- __EXPECT(expected, seen, <, 1)
4588 ++ __EXPECT(expected, #expected, seen, #seen, <, 1)
4589 +
4590 + /**
4591 + * ASSERT_LE(expected, seen)
4592 +@@ -387,7 +387,7 @@
4593 + * ASSERT_LE(expected, measured): expected <= measured
4594 + */
4595 + #define ASSERT_LE(expected, seen) \
4596 +- __EXPECT(expected, seen, <=, 1)
4597 ++ __EXPECT(expected, #expected, seen, #seen, <=, 1)
4598 +
4599 + /**
4600 + * ASSERT_GT(expected, seen)
4601 +@@ -398,7 +398,7 @@
4602 + * ASSERT_GT(expected, measured): expected > measured
4603 + */
4604 + #define ASSERT_GT(expected, seen) \
4605 +- __EXPECT(expected, seen, >, 1)
4606 ++ __EXPECT(expected, #expected, seen, #seen, >, 1)
4607 +
4608 + /**
4609 + * ASSERT_GE(expected, seen)
4610 +@@ -409,7 +409,7 @@
4611 + * ASSERT_GE(expected, measured): expected >= measured
4612 + */
4613 + #define ASSERT_GE(expected, seen) \
4614 +- __EXPECT(expected, seen, >=, 1)
4615 ++ __EXPECT(expected, #expected, seen, #seen, >=, 1)
4616 +
4617 + /**
4618 + * ASSERT_NULL(seen)
4619 +@@ -419,7 +419,7 @@
4620 + * ASSERT_NULL(measured): NULL == measured
4621 + */
4622 + #define ASSERT_NULL(seen) \
4623 +- __EXPECT(NULL, seen, ==, 1)
4624 ++ __EXPECT(NULL, "NULL", seen, #seen, ==, 1)
4625 +
4626 + /**
4627 + * ASSERT_TRUE(seen)
4628 +@@ -429,7 +429,7 @@
4629 + * ASSERT_TRUE(measured): measured != 0
4630 + */
4631 + #define ASSERT_TRUE(seen) \
4632 +- ASSERT_NE(0, seen)
4633 ++ __EXPECT(0, "0", seen, #seen, !=, 1)
4634 +
4635 + /**
4636 + * ASSERT_FALSE(seen)
4637 +@@ -439,7 +439,7 @@
4638 + * ASSERT_FALSE(measured): measured == 0
4639 + */
4640 + #define ASSERT_FALSE(seen) \
4641 +- ASSERT_EQ(0, seen)
4642 ++ __EXPECT(0, "0", seen, #seen, ==, 1)
4643 +
4644 + /**
4645 + * ASSERT_STREQ(expected, seen)
4646 +@@ -472,7 +472,7 @@
4647 + * EXPECT_EQ(expected, measured): expected == measured
4648 + */
4649 + #define EXPECT_EQ(expected, seen) \
4650 +- __EXPECT(expected, seen, ==, 0)
4651 ++ __EXPECT(expected, #expected, seen, #seen, ==, 0)
4652 +
4653 + /**
4654 + * EXPECT_NE(expected, seen)
4655 +@@ -483,7 +483,7 @@
4656 + * EXPECT_NE(expected, measured): expected != measured
4657 + */
4658 + #define EXPECT_NE(expected, seen) \
4659 +- __EXPECT(expected, seen, !=, 0)
4660 ++ __EXPECT(expected, #expected, seen, #seen, !=, 0)
4661 +
4662 + /**
4663 + * EXPECT_LT(expected, seen)
4664 +@@ -494,7 +494,7 @@
4665 + * EXPECT_LT(expected, measured): expected < measured
4666 + */
4667 + #define EXPECT_LT(expected, seen) \
4668 +- __EXPECT(expected, seen, <, 0)
4669 ++ __EXPECT(expected, #expected, seen, #seen, <, 0)
4670 +
4671 + /**
4672 + * EXPECT_LE(expected, seen)
4673 +@@ -505,7 +505,7 @@
4674 + * EXPECT_LE(expected, measured): expected <= measured
4675 + */
4676 + #define EXPECT_LE(expected, seen) \
4677 +- __EXPECT(expected, seen, <=, 0)
4678 ++ __EXPECT(expected, #expected, seen, #seen, <=, 0)
4679 +
4680 + /**
4681 + * EXPECT_GT(expected, seen)
4682 +@@ -516,7 +516,7 @@
4683 + * EXPECT_GT(expected, measured): expected > measured
4684 + */
4685 + #define EXPECT_GT(expected, seen) \
4686 +- __EXPECT(expected, seen, >, 0)
4687 ++ __EXPECT(expected, #expected, seen, #seen, >, 0)
4688 +
4689 + /**
4690 + * EXPECT_GE(expected, seen)
4691 +@@ -527,7 +527,7 @@
4692 + * EXPECT_GE(expected, measured): expected >= measured
4693 + */
4694 + #define EXPECT_GE(expected, seen) \
4695 +- __EXPECT(expected, seen, >=, 0)
4696 ++ __EXPECT(expected, #expected, seen, #seen, >=, 0)
4697 +
4698 + /**
4699 + * EXPECT_NULL(seen)
4700 +@@ -537,7 +537,7 @@
4701 + * EXPECT_NULL(measured): NULL == measured
4702 + */
4703 + #define EXPECT_NULL(seen) \
4704 +- __EXPECT(NULL, seen, ==, 0)
4705 ++ __EXPECT(NULL, "NULL", seen, #seen, ==, 0)
4706 +
4707 + /**
4708 + * EXPECT_TRUE(seen)
4709 +@@ -547,7 +547,7 @@
4710 + * EXPECT_TRUE(measured): 0 != measured
4711 + */
4712 + #define EXPECT_TRUE(seen) \
4713 +- EXPECT_NE(0, seen)
4714 ++ __EXPECT(0, "0", seen, #seen, !=, 0)
4715 +
4716 + /**
4717 + * EXPECT_FALSE(seen)
4718 +@@ -557,7 +557,7 @@
4719 + * EXPECT_FALSE(measured): 0 == measured
4720 + */
4721 + #define EXPECT_FALSE(seen) \
4722 +- EXPECT_EQ(0, seen)
4723 ++ __EXPECT(0, "0", seen, #seen, ==, 0)
4724 +
4725 + /**
4726 + * EXPECT_STREQ(expected, seen)
4727 +@@ -597,7 +597,7 @@
4728 + if (_metadata->passed && _metadata->step < 255) \
4729 + _metadata->step++;
4730 +
4731 +-#define __EXPECT(_expected, _seen, _t, _assert) do { \
4732 ++#define __EXPECT(_expected, _expected_str, _seen, _seen_str, _t, _assert) do { \
4733 + /* Avoid multiple evaluation of the cases */ \
4734 + __typeof__(_expected) __exp = (_expected); \
4735 + __typeof__(_seen) __seen = (_seen); \
4736 +@@ -606,8 +606,8 @@
4737 + unsigned long long __exp_print = (uintptr_t)__exp; \
4738 + unsigned long long __seen_print = (uintptr_t)__seen; \
4739 + __TH_LOG("Expected %s (%llu) %s %s (%llu)", \
4740 +- #_expected, __exp_print, #_t, \
4741 +- #_seen, __seen_print); \
4742 ++ _expected_str, __exp_print, #_t, \
4743 ++ _seen_str, __seen_print); \
4744 + _metadata->passed = 0; \
4745 + /* Ensure the optional handler is triggered */ \
4746 + _metadata->trigger = 1; \