Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Thu, 15 Sep 2022 10:31:50
Message-Id: 1663237896.8325c14c96a71a8cea99233517e770f90a444b29.mpagano@gentoo
1 commit: 8325c14c96a71a8cea99233517e770f90a444b29
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Sep 15 10:31:36 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Sep 15 10:31:36 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8325c14c
7
8 Linux patch 5.4.213
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1212_linux-5.4.213.patch | 2998 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 3002 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index daf9aa4b..1f79a793 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -891,6 +891,10 @@ Patch: 1211_linux-5.4.212.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.4.212
23
24 +Patch: 1212_linux-5.4.213.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.4.213
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1212_linux-5.4.213.patch b/1212_linux-5.4.213.patch
33 new file mode 100644
34 index 00000000..522768b7
35 --- /dev/null
36 +++ b/1212_linux-5.4.213.patch
37 @@ -0,0 +1,2998 @@
38 +diff --git a/Makefile b/Makefile
39 +index cecfe23f521f1..4a4c83d2b3f7c 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 4
46 +-SUBLEVEL = 212
47 ++SUBLEVEL = 213
48 + EXTRAVERSION =
49 + NAME = Kleptomaniac Octopus
50 +
51 +diff --git a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
52 +index eea317b41020d..5e454a694b78a 100644
53 +--- a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
54 ++++ b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
55 +@@ -51,16 +51,6 @@
56 + vin-supply = <&reg_3p3v_s5>;
57 + };
58 +
59 +- reg_3p3v_s0: regulator-3p3v-s0 {
60 +- compatible = "regulator-fixed";
61 +- regulator-name = "V_3V3_S0";
62 +- regulator-min-microvolt = <3300000>;
63 +- regulator-max-microvolt = <3300000>;
64 +- regulator-always-on;
65 +- regulator-boot-on;
66 +- vin-supply = <&reg_3p3v_s5>;
67 +- };
68 +-
69 + reg_3p3v_s5: regulator-3p3v-s5 {
70 + compatible = "regulator-fixed";
71 + regulator-name = "V_3V3_S5";
72 +diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c
73 +index 587543c6c51cb..97c42be71338a 100644
74 +--- a/arch/arm64/kernel/cacheinfo.c
75 ++++ b/arch/arm64/kernel/cacheinfo.c
76 +@@ -45,7 +45,8 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
77 +
78 + int init_cache_level(unsigned int cpu)
79 + {
80 +- unsigned int ctype, level, leaves, fw_level;
81 ++ unsigned int ctype, level, leaves;
82 ++ int fw_level;
83 + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
84 +
85 + for (level = 1, leaves = 0; level <= MAX_CACHE_LEVEL; level++) {
86 +@@ -63,6 +64,9 @@ int init_cache_level(unsigned int cpu)
87 + else
88 + fw_level = acpi_find_last_cache_level(cpu);
89 +
90 ++ if (fw_level < 0)
91 ++ return fw_level;
92 ++
93 + if (level < fw_level) {
94 + /*
95 + * some external caches not specified in CLIDR_EL1
96 +diff --git a/arch/mips/loongson32/ls1c/board.c b/arch/mips/loongson32/ls1c/board.c
97 +index e9de6da0ce51f..9dcfe9de55b0a 100644
98 +--- a/arch/mips/loongson32/ls1c/board.c
99 ++++ b/arch/mips/loongson32/ls1c/board.c
100 +@@ -15,7 +15,6 @@ static struct platform_device *ls1c_platform_devices[] __initdata = {
101 + static int __init ls1c_platform_init(void)
102 + {
103 + ls1x_serial_set_uartclk(&ls1x_uart_pdev);
104 +- ls1x_rtc_set_extclk(&ls1x_rtc_pdev);
105 +
106 + return platform_add_devices(ls1c_platform_devices,
107 + ARRAY_SIZE(ls1c_platform_devices));
108 +diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
109 +index 951a339369dd5..b59a0c3d36921 100644
110 +--- a/arch/parisc/kernel/head.S
111 ++++ b/arch/parisc/kernel/head.S
112 +@@ -22,7 +22,7 @@
113 + #include <linux/linkage.h>
114 + #include <linux/init.h>
115 +
116 +- .level PA_ASM_LEVEL
117 ++ .level 1.1
118 +
119 + __INITDATA
120 + ENTRY(boot_args)
121 +@@ -69,6 +69,47 @@ $bss_loop:
122 + stw,ma %arg2,4(%r1)
123 + stw,ma %arg3,4(%r1)
124 +
125 ++#if !defined(CONFIG_64BIT) && defined(CONFIG_PA20)
126 ++ /* This 32-bit kernel was compiled for PA2.0 CPUs. Check current CPU
127 ++ * and halt kernel if we detect a PA1.x CPU. */
128 ++ ldi 32,%r10
129 ++ mtctl %r10,%cr11
130 ++ .level 2.0
131 ++ mfctl,w %cr11,%r10
132 ++ .level 1.1
133 ++ comib,<>,n 0,%r10,$cpu_ok
134 ++
135 ++ load32 PA(msg1),%arg0
136 ++ ldi msg1_end-msg1,%arg1
137 ++$iodc_panic:
138 ++ copy %arg0, %r10
139 ++ copy %arg1, %r11
140 ++ load32 PA(init_stack),%sp
141 ++#define MEM_CONS 0x3A0
142 ++ ldw MEM_CONS+32(%r0),%arg0 // HPA
143 ++ ldi ENTRY_IO_COUT,%arg1
144 ++ ldw MEM_CONS+36(%r0),%arg2 // SPA
145 ++ ldw MEM_CONS+8(%r0),%arg3 // layers
146 ++ load32 PA(__bss_start),%r1
147 ++ stw %r1,-52(%sp) // arg4
148 ++ stw %r0,-56(%sp) // arg5
149 ++ stw %r10,-60(%sp) // arg6 = ptr to text
150 ++ stw %r11,-64(%sp) // arg7 = len
151 ++ stw %r0,-68(%sp) // arg8
152 ++ load32 PA(.iodc_panic_ret), %rp
153 ++ ldw MEM_CONS+40(%r0),%r1 // ENTRY_IODC
154 ++ bv,n (%r1)
155 ++.iodc_panic_ret:
156 ++ b . /* wait endless with ... */
157 ++ or %r10,%r10,%r10 /* qemu idle sleep */
158 ++msg1: .ascii "Can't boot kernel which was built for PA8x00 CPUs on this machine.\r\n"
159 ++msg1_end:
160 ++
161 ++$cpu_ok:
162 ++#endif
163 ++
164 ++ .level PA_ASM_LEVEL
165 ++
166 + /* Initialize startup VM. Just map first 16/32 MB of memory */
167 + load32 PA(swapper_pg_dir),%r4
168 + mtctl %r4,%cr24 /* Initialize kernel root pointer */
169 +diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S
170 +index 5b905a2f4e4df..4a8f3526f5a53 100644
171 +--- a/arch/powerpc/kernel/systbl.S
172 ++++ b/arch/powerpc/kernel/systbl.S
173 +@@ -25,6 +25,7 @@ sys_call_table:
174 + #include <asm/syscall_table_64.h>
175 + #undef __SYSCALL
176 + #else
177 ++ .p2align 2
178 + #define __SYSCALL(nr, entry) .long entry
179 + #include <asm/syscall_table_32.h>
180 + #undef __SYSCALL
181 +diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
182 +index de8f0bf5f238c..487725e49b961 100644
183 +--- a/arch/s390/include/asm/hugetlb.h
184 ++++ b/arch/s390/include/asm/hugetlb.h
185 +@@ -35,9 +35,11 @@ static inline bool is_hugepage_only_range(struct mm_struct *mm,
186 + static inline int prepare_hugepage_range(struct file *file,
187 + unsigned long addr, unsigned long len)
188 + {
189 +- if (len & ~HPAGE_MASK)
190 ++ struct hstate *h = hstate_file(file);
191 ++
192 ++ if (len & ~huge_page_mask(h))
193 + return -EINVAL;
194 +- if (addr & ~HPAGE_MASK)
195 ++ if (addr & ~huge_page_mask(h))
196 + return -EINVAL;
197 + return 0;
198 + }
199 +diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
200 +index 7e0eb40209177..4df41695caec8 100644
201 +--- a/arch/s390/kernel/vmlinux.lds.S
202 ++++ b/arch/s390/kernel/vmlinux.lds.S
203 +@@ -124,6 +124,7 @@ SECTIONS
204 + /*
205 + * Table with the patch locations to undo expolines
206 + */
207 ++ . = ALIGN(4);
208 + .nospec_call_table : {
209 + __nospec_call_start = . ;
210 + *(.s390_indirect*)
211 +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
212 +index 1e5df3ccdd5cb..a1ee1a760c3eb 100644
213 +--- a/arch/x86/include/asm/nospec-branch.h
214 ++++ b/arch/x86/include/asm/nospec-branch.h
215 +@@ -44,6 +44,7 @@
216 + * the optimal version — two calls, each with their own speculation
217 + * trap should their return address end up getting used, in a loop.
218 + */
219 ++#ifdef CONFIG_X86_64
220 + #define __FILL_RETURN_BUFFER(reg, nr, sp) \
221 + mov $(nr/2), reg; \
222 + 771: \
223 +@@ -64,6 +65,19 @@
224 + add $(BITS_PER_LONG/8) * nr, sp; \
225 + /* barrier for jnz misprediction */ \
226 + lfence;
227 ++#else
228 ++/*
229 ++ * i386 doesn't unconditionally have LFENCE, as such it can't
230 ++ * do a loop.
231 ++ */
232 ++#define __FILL_RETURN_BUFFER(reg, nr, sp) \
233 ++ .rept nr; \
234 ++ call 772f; \
235 ++ int3; \
236 ++772:; \
237 ++ .endr; \
238 ++ add $(BITS_PER_LONG/8) * nr, sp;
239 ++#endif
240 +
241 + #define __ISSUE_UNBALANCED_RET_GUARD(sp) \
242 + call 881f; \
243 +diff --git a/drivers/android/binder.c b/drivers/android/binder.c
244 +index c273d0df69394..807ee97254795 100644
245 +--- a/drivers/android/binder.c
246 ++++ b/drivers/android/binder.c
247 +@@ -1748,6 +1748,18 @@ static int binder_inc_ref_for_node(struct binder_proc *proc,
248 + }
249 + ret = binder_inc_ref_olocked(ref, strong, target_list);
250 + *rdata = ref->data;
251 ++ if (ret && ref == new_ref) {
252 ++ /*
253 ++ * Cleanup the failed reference here as the target
254 ++ * could now be dead and have already released its
255 ++ * references by now. Calling on the new reference
256 ++ * with strong=0 and a tmp_refs will not decrement
257 ++ * the node. The new_ref gets kfree'd below.
258 ++ */
259 ++ binder_cleanup_ref_olocked(new_ref);
260 ++ ref = NULL;
261 ++ }
262 ++
263 + binder_proc_unlock(proc);
264 + if (new_ref && ref != new_ref)
265 + /*
266 +diff --git a/drivers/base/dd.c b/drivers/base/dd.c
267 +index 4e45c87ed1778..10063d8a1b7d4 100644
268 +--- a/drivers/base/dd.c
269 ++++ b/drivers/base/dd.c
270 +@@ -818,6 +818,11 @@ static int __device_attach_driver(struct device_driver *drv, void *_data)
271 + } else if (ret == -EPROBE_DEFER) {
272 + dev_dbg(dev, "Device match requests probe deferral\n");
273 + driver_deferred_probe_add(dev);
274 ++ /*
275 ++ * Device can't match with a driver right now, so don't attempt
276 ++ * to match or bind with other drivers on the bus.
277 ++ */
278 ++ return ret;
279 + } else if (ret < 0) {
280 + dev_dbg(dev, "Bus failed to match device: %d", ret);
281 + return ret;
282 +@@ -1057,6 +1062,11 @@ static int __driver_attach(struct device *dev, void *data)
283 + } else if (ret == -EPROBE_DEFER) {
284 + dev_dbg(dev, "Device match requests probe deferral\n");
285 + driver_deferred_probe_add(dev);
286 ++ /*
287 ++ * Driver could not match with device, but may match with
288 ++ * another device on the bus.
289 ++ */
290 ++ return 0;
291 + } else if (ret < 0) {
292 + dev_dbg(dev, "Bus failed to match device: %d", ret);
293 + return ret;
294 +diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
295 +index 1654fd0eedc94..a790a8ca02ff4 100644
296 +--- a/drivers/clk/bcm/clk-raspberrypi.c
297 ++++ b/drivers/clk/bcm/clk-raspberrypi.c
298 +@@ -113,7 +113,7 @@ static unsigned long raspberrypi_fw_pll_get_rate(struct clk_hw *hw,
299 + RPI_FIRMWARE_ARM_CLK_ID,
300 + &val);
301 + if (ret)
302 +- return ret;
303 ++ return 0;
304 +
305 + return val * RPI_FIRMWARE_PLLB_ARM_DIV_RATE;
306 + }
307 +diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
308 +index 13332f89e034b..c002f83adf573 100644
309 +--- a/drivers/clk/clk.c
310 ++++ b/drivers/clk/clk.c
311 +@@ -852,10 +852,9 @@ static void clk_core_unprepare(struct clk_core *core)
312 + if (core->ops->unprepare)
313 + core->ops->unprepare(core->hw);
314 +
315 +- clk_pm_runtime_put(core);
316 +-
317 + trace_clk_unprepare_complete(core);
318 + clk_core_unprepare(core->parent);
319 ++ clk_pm_runtime_put(core);
320 + }
321 +
322 + static void clk_core_unprepare_lock(struct clk_core *core)
323 +diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
324 +index b1395133389ed..b82cc8beac671 100644
325 +--- a/drivers/firmware/efi/capsule-loader.c
326 ++++ b/drivers/firmware/efi/capsule-loader.c
327 +@@ -241,29 +241,6 @@ failed:
328 + return ret;
329 + }
330 +
331 +-/**
332 +- * efi_capsule_flush - called by file close or file flush
333 +- * @file: file pointer
334 +- * @id: not used
335 +- *
336 +- * If a capsule is being partially uploaded then calling this function
337 +- * will be treated as upload termination and will free those completed
338 +- * buffer pages and -ECANCELED will be returned.
339 +- **/
340 +-static int efi_capsule_flush(struct file *file, fl_owner_t id)
341 +-{
342 +- int ret = 0;
343 +- struct capsule_info *cap_info = file->private_data;
344 +-
345 +- if (cap_info->index > 0) {
346 +- pr_err("capsule upload not complete\n");
347 +- efi_free_all_buff_pages(cap_info);
348 +- ret = -ECANCELED;
349 +- }
350 +-
351 +- return ret;
352 +-}
353 +-
354 + /**
355 + * efi_capsule_release - called by file close
356 + * @inode: not used
357 +@@ -276,6 +253,13 @@ static int efi_capsule_release(struct inode *inode, struct file *file)
358 + {
359 + struct capsule_info *cap_info = file->private_data;
360 +
361 ++ if (cap_info->index > 0 &&
362 ++ (cap_info->header.headersize == 0 ||
363 ++ cap_info->count < cap_info->total_size)) {
364 ++ pr_err("capsule upload not complete\n");
365 ++ efi_free_all_buff_pages(cap_info);
366 ++ }
367 ++
368 + kfree(cap_info->pages);
369 + kfree(cap_info->phys);
370 + kfree(file->private_data);
371 +@@ -323,7 +307,6 @@ static const struct file_operations efi_capsule_fops = {
372 + .owner = THIS_MODULE,
373 + .open = efi_capsule_open,
374 + .write = efi_capsule_write,
375 +- .flush = efi_capsule_flush,
376 + .release = efi_capsule_release,
377 + .llseek = no_llseek,
378 + };
379 +diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
380 +index 317f54f19477e..c81d73d5e0159 100644
381 +--- a/drivers/gpio/gpio-pca953x.c
382 ++++ b/drivers/gpio/gpio-pca953x.c
383 +@@ -1198,7 +1198,9 @@ static int pca953x_suspend(struct device *dev)
384 + {
385 + struct pca953x_chip *chip = dev_get_drvdata(dev);
386 +
387 ++ mutex_lock(&chip->i2c_lock);
388 + regcache_cache_only(chip->regmap, true);
389 ++ mutex_unlock(&chip->i2c_lock);
390 +
391 + if (atomic_read(&chip->wakeup_path))
392 + device_set_wakeup_path(dev);
393 +@@ -1221,13 +1223,17 @@ static int pca953x_resume(struct device *dev)
394 + }
395 + }
396 +
397 ++ mutex_lock(&chip->i2c_lock);
398 + regcache_cache_only(chip->regmap, false);
399 + regcache_mark_dirty(chip->regmap);
400 + ret = pca953x_regcache_sync(dev);
401 +- if (ret)
402 ++ if (ret) {
403 ++ mutex_unlock(&chip->i2c_lock);
404 + return ret;
405 ++ }
406 +
407 + ret = regcache_sync(chip->regmap);
408 ++ mutex_unlock(&chip->i2c_lock);
409 + if (ret) {
410 + dev_err(dev, "Failed to restore register map: %d\n", ret);
411 + return ret;
412 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
413 +index 5906a8951a6c6..685a2df01d096 100644
414 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
415 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
416 +@@ -2472,7 +2472,8 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
417 +
418 + gfx_v9_0_tiling_mode_table_init(adev);
419 +
420 +- gfx_v9_0_setup_rb(adev);
421 ++ if (adev->gfx.num_gfx_rings)
422 ++ gfx_v9_0_setup_rb(adev);
423 + gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
424 + adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
425 +
426 +diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
427 +index 641f1258f08dc..e60157fe7a7bf 100644
428 +--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
429 ++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
430 +@@ -182,6 +182,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
431 + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
432 + WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
433 +
434 ++ tmp = mmVM_L2_CNTL3_DEFAULT;
435 + if (adev->gmc.translate_further) {
436 + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
437 + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
438 +diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
439 +index 25a2d80287d67..d6a72f3cb1fbd 100644
440 +--- a/drivers/gpu/drm/drm_gem.c
441 ++++ b/drivers/gpu/drm/drm_gem.c
442 +@@ -167,21 +167,6 @@ void drm_gem_private_object_init(struct drm_device *dev,
443 + }
444 + EXPORT_SYMBOL(drm_gem_private_object_init);
445 +
446 +-static void
447 +-drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
448 +-{
449 +- /*
450 +- * Note: obj->dma_buf can't disappear as long as we still hold a
451 +- * handle reference in obj->handle_count.
452 +- */
453 +- mutex_lock(&filp->prime.lock);
454 +- if (obj->dma_buf) {
455 +- drm_prime_remove_buf_handle_locked(&filp->prime,
456 +- obj->dma_buf);
457 +- }
458 +- mutex_unlock(&filp->prime.lock);
459 +-}
460 +-
461 + /**
462 + * drm_gem_object_handle_free - release resources bound to userspace handles
463 + * @obj: GEM object to clean up.
464 +@@ -255,7 +240,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
465 + else if (dev->driver->gem_close_object)
466 + dev->driver->gem_close_object(obj, file_priv);
467 +
468 +- drm_gem_remove_prime_handles(obj, file_priv);
469 ++ drm_prime_remove_buf_handle(&file_priv->prime, id);
470 + drm_vma_node_revoke(&obj->vma_node, file_priv);
471 +
472 + drm_gem_object_handle_put_unlocked(obj);
473 +diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
474 +index 51a2055c8f18a..41a9a9bae5848 100644
475 +--- a/drivers/gpu/drm/drm_internal.h
476 ++++ b/drivers/gpu/drm/drm_internal.h
477 +@@ -59,8 +59,8 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
478 +
479 + void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
480 + void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
481 +-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
482 +- struct dma_buf *dma_buf);
483 ++void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
484 ++ uint32_t handle);
485 +
486 + /* drm_drv.c */
487 + struct drm_minor *drm_minor_acquire(unsigned int minor_id);
488 +diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
489 +index 0a2316e0e8121..6b7cf0170f9d1 100644
490 +--- a/drivers/gpu/drm/drm_prime.c
491 ++++ b/drivers/gpu/drm/drm_prime.c
492 +@@ -187,29 +187,33 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
493 + return -ENOENT;
494 + }
495 +
496 +-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
497 +- struct dma_buf *dma_buf)
498 ++void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
499 ++ uint32_t handle)
500 + {
501 + struct rb_node *rb;
502 +
503 +- rb = prime_fpriv->dmabufs.rb_node;
504 ++ mutex_lock(&prime_fpriv->lock);
505 ++
506 ++ rb = prime_fpriv->handles.rb_node;
507 + while (rb) {
508 + struct drm_prime_member *member;
509 +
510 +- member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
511 +- if (member->dma_buf == dma_buf) {
512 ++ member = rb_entry(rb, struct drm_prime_member, handle_rb);
513 ++ if (member->handle == handle) {
514 + rb_erase(&member->handle_rb, &prime_fpriv->handles);
515 + rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
516 +
517 +- dma_buf_put(dma_buf);
518 ++ dma_buf_put(member->dma_buf);
519 + kfree(member);
520 +- return;
521 +- } else if (member->dma_buf < dma_buf) {
522 ++ break;
523 ++ } else if (member->handle < handle) {
524 + rb = rb->rb_right;
525 + } else {
526 + rb = rb->rb_left;
527 + }
528 + }
529 ++
530 ++ mutex_unlock(&prime_fpriv->lock);
531 + }
532 +
533 + void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
534 +diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
535 +index 399b1542509f7..d79314992adad 100644
536 +--- a/drivers/gpu/drm/i915/display/intel_quirks.c
537 ++++ b/drivers/gpu/drm/i915/display/intel_quirks.c
538 +@@ -146,6 +146,9 @@ static struct intel_quirk intel_quirks[] = {
539 + /* ASRock ITX*/
540 + { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
541 + { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
542 ++ /* ECS Liva Q2 */
543 ++ { 0x3185, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
544 ++ { 0x3184, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
545 + };
546 +
547 + void intel_init_quirks(struct drm_i915_private *i915)
548 +diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
549 +index 245c20d36f1b2..45ffccdcd50e7 100644
550 +--- a/drivers/gpu/drm/i915/gvt/handlers.c
551 ++++ b/drivers/gpu/drm/i915/gvt/handlers.c
552 +@@ -654,7 +654,7 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
553 + else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
554 + index = FDI_RX_IMR_TO_PIPE(offset);
555 + else {
556 +- gvt_vgpu_err("Unsupport registers %x\n", offset);
557 ++ gvt_vgpu_err("Unsupported registers %x\n", offset);
558 + return -EINVAL;
559 + }
560 +
561 +diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
562 +index b7b7c1a9164ab..726c881394576 100644
563 +--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
564 ++++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
565 +@@ -97,7 +97,7 @@ static const char * const dsi_8996_bus_clk_names[] = {
566 + static const struct msm_dsi_config msm8996_dsi_cfg = {
567 + .io_offset = DSI_6G_REG_SHIFT,
568 + .reg_cfg = {
569 +- .num = 2,
570 ++ .num = 3,
571 + .regs = {
572 + {"vdda", 18160, 1 }, /* 1.25 V */
573 + {"vcca", 17000, 32 }, /* 0.925 V */
574 +diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
575 +index 60d50643d0b5c..08a95c3a94444 100644
576 +--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
577 ++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
578 +@@ -347,7 +347,7 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
579 + } else {
580 + timing->shared_timings.clk_pre =
581 + linear_inter(tmax, tmin, pcnt2, 0, false);
582 +- timing->shared_timings.clk_pre_inc_by_2 = 0;
583 ++ timing->shared_timings.clk_pre_inc_by_2 = 0;
584 + }
585 +
586 + timing->ta_go = 3;
587 +diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
588 +index 5d017f0aec665..e892582e847b5 100644
589 +--- a/drivers/gpu/drm/radeon/radeon_device.c
590 ++++ b/drivers/gpu/drm/radeon/radeon_device.c
591 +@@ -1623,6 +1623,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
592 + if (r) {
593 + /* delay GPU reset to resume */
594 + radeon_fence_driver_force_completion(rdev, i);
595 ++ } else {
596 ++ /* finish executing delayed work */
597 ++ flush_delayed_work(&rdev->fence_drv[i].lockup_work);
598 + }
599 + }
600 +
601 +diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
602 +index 3ea4021f267cf..d96e435cc42b1 100644
603 +--- a/drivers/hwmon/gpio-fan.c
604 ++++ b/drivers/hwmon/gpio-fan.c
605 +@@ -391,6 +391,9 @@ static int gpio_fan_set_cur_state(struct thermal_cooling_device *cdev,
606 + if (!fan_data)
607 + return -EINVAL;
608 +
609 ++ if (state >= fan_data->num_speed)
610 ++ return -EINVAL;
611 ++
612 + set_fan_speed(fan_data, state);
613 + return 0;
614 + }
615 +diff --git a/drivers/iio/adc/mcp3911.c b/drivers/iio/adc/mcp3911.c
616 +index dd52f08ec82e2..4e2e8e819b1e7 100644
617 +--- a/drivers/iio/adc/mcp3911.c
618 ++++ b/drivers/iio/adc/mcp3911.c
619 +@@ -38,8 +38,8 @@
620 + #define MCP3911_CHANNEL(x) (MCP3911_REG_CHANNEL0 + x * 3)
621 + #define MCP3911_OFFCAL(x) (MCP3911_REG_OFFCAL_CH0 + x * 6)
622 +
623 +-/* Internal voltage reference in uV */
624 +-#define MCP3911_INT_VREF_UV 1200000
625 ++/* Internal voltage reference in mV */
626 ++#define MCP3911_INT_VREF_MV 1200
627 +
628 + #define MCP3911_REG_READ(reg, id) ((((reg) << 1) | ((id) << 5) | (1 << 0)) & 0xff)
629 + #define MCP3911_REG_WRITE(reg, id) ((((reg) << 1) | ((id) << 5) | (0 << 0)) & 0xff)
630 +@@ -111,6 +111,8 @@ static int mcp3911_read_raw(struct iio_dev *indio_dev,
631 + if (ret)
632 + goto out;
633 +
634 ++ *val = sign_extend32(*val, 23);
635 ++
636 + ret = IIO_VAL_INT;
637 + break;
638 +
639 +@@ -135,11 +137,18 @@ static int mcp3911_read_raw(struct iio_dev *indio_dev,
640 +
641 + *val = ret / 1000;
642 + } else {
643 +- *val = MCP3911_INT_VREF_UV;
644 ++ *val = MCP3911_INT_VREF_MV;
645 + }
646 +
647 +- *val2 = 24;
648 +- ret = IIO_VAL_FRACTIONAL_LOG2;
649 ++ /*
650 ++ * For 24bit Conversion
651 ++ * Raw = ((Voltage)/(Vref) * 2^23 * Gain * 1.5
652 ++ * Voltage = Raw * (Vref)/(2^23 * Gain * 1.5)
653 ++ */
654 ++
655 ++ /* val2 = (2^23 * 1.5) */
656 ++ *val2 = 12582912;
657 ++ ret = IIO_VAL_FRACTIONAL;
658 + break;
659 + }
660 +
661 +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
662 +index de7df5ab06f3b..cf174aa7fe25b 100644
663 +--- a/drivers/infiniband/core/cma.c
664 ++++ b/drivers/infiniband/core/cma.c
665 +@@ -1719,8 +1719,8 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
666 + }
667 +
668 + if (!validate_net_dev(*net_dev,
669 +- (struct sockaddr *)&req->listen_addr_storage,
670 +- (struct sockaddr *)&req->src_addr_storage)) {
671 ++ (struct sockaddr *)&req->src_addr_storage,
672 ++ (struct sockaddr *)&req->listen_addr_storage)) {
673 + id_priv = ERR_PTR(-EHOSTUNREACH);
674 + goto err;
675 + }
676 +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
677 +index 76a14db7028dd..b9ab3ca3079c7 100644
678 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
679 ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
680 +@@ -89,7 +89,7 @@
681 + #define HNS_ROCE_V2_SCCC_ENTRY_SZ 32
682 + #define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ PAGE_SIZE
683 + #define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE
684 +-#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
685 ++#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFF000
686 + #define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
687 + #define HNS_ROCE_INVALID_LKEY 0x100
688 + #define HNS_ROCE_CMQ_TX_TIMEOUT 30000
689 +diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
690 +index 348c1df69cdc6..3897a3ce02ad0 100644
691 +--- a/drivers/infiniband/hw/mlx5/mad.c
692 ++++ b/drivers/infiniband/hw/mlx5/mad.c
693 +@@ -219,6 +219,12 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u8 port_num,
694 + mdev = dev->mdev;
695 + mdev_port_num = 1;
696 + }
697 ++ if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) {
698 ++ /* set local port to one for Function-Per-Port HCA. */
699 ++ mdev = dev->mdev;
700 ++ mdev_port_num = 1;
701 ++ }
702 ++
703 + /* Declaring support of extended counters */
704 + if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) {
705 + struct ib_class_port_info cpi = {};
706 +diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
707 +index 424918eb1cd4a..5e6d96bd2eb12 100644
708 +--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
709 ++++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
710 +@@ -29,7 +29,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
711 + dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
712 +
713 + if (paddr)
714 +- return virt_to_page(paddr);
715 ++ return virt_to_page((void *)paddr);
716 +
717 + return NULL;
718 + }
719 +@@ -523,13 +523,23 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
720 + kunmap(p);
721 + }
722 + } else {
723 +- u64 va = sge->laddr + sge_off;
724 ++ /*
725 ++ * Cast to an uintptr_t to preserve all 64 bits
726 ++ * in sge->laddr.
727 ++ */
728 ++ uintptr_t va = (uintptr_t)(sge->laddr + sge_off);
729 +
730 +- page_array[seg] = virt_to_page(va & PAGE_MASK);
731 ++ /*
732 ++ * virt_to_page() takes a (void *) pointer
733 ++ * so cast to a (void *) meaning it will be 64
734 ++ * bits on a 64 bit platform and 32 bits on a
735 ++ * 32 bit platform.
736 ++ */
737 ++ page_array[seg] = virt_to_page((void *)(va & PAGE_MASK));
738 + if (do_crc)
739 + crypto_shash_update(
740 + c_tx->mpa_crc_hd,
741 +- (void *)(uintptr_t)va,
742 ++ (void *)va,
743 + plen);
744 + }
745 +
746 +diff --git a/drivers/input/joystick/iforce/iforce-serio.c b/drivers/input/joystick/iforce/iforce-serio.c
747 +index f95a81b9fac72..2380546d79782 100644
748 +--- a/drivers/input/joystick/iforce/iforce-serio.c
749 ++++ b/drivers/input/joystick/iforce/iforce-serio.c
750 +@@ -39,7 +39,7 @@ static void iforce_serio_xmit(struct iforce *iforce)
751 +
752 + again:
753 + if (iforce->xmit.head == iforce->xmit.tail) {
754 +- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
755 ++ iforce_clear_xmit_and_wake(iforce);
756 + spin_unlock_irqrestore(&iforce->xmit_lock, flags);
757 + return;
758 + }
759 +@@ -64,7 +64,7 @@ again:
760 + if (test_and_clear_bit(IFORCE_XMIT_AGAIN, iforce->xmit_flags))
761 + goto again;
762 +
763 +- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
764 ++ iforce_clear_xmit_and_wake(iforce);
765 +
766 + spin_unlock_irqrestore(&iforce->xmit_lock, flags);
767 + }
768 +@@ -169,7 +169,7 @@ static irqreturn_t iforce_serio_irq(struct serio *serio,
769 + iforce_serio->cmd_response_len = iforce_serio->len;
770 +
771 + /* Signal that command is done */
772 +- wake_up(&iforce->wait);
773 ++ wake_up_all(&iforce->wait);
774 + } else if (likely(iforce->type)) {
775 + iforce_process_packet(iforce, iforce_serio->id,
776 + iforce_serio->data_in,
777 +diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
778 +index ea58805c480fa..cba92bd590a8d 100644
779 +--- a/drivers/input/joystick/iforce/iforce-usb.c
780 ++++ b/drivers/input/joystick/iforce/iforce-usb.c
781 +@@ -30,7 +30,7 @@ static void __iforce_usb_xmit(struct iforce *iforce)
782 + spin_lock_irqsave(&iforce->xmit_lock, flags);
783 +
784 + if (iforce->xmit.head == iforce->xmit.tail) {
785 +- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
786 ++ iforce_clear_xmit_and_wake(iforce);
787 + spin_unlock_irqrestore(&iforce->xmit_lock, flags);
788 + return;
789 + }
790 +@@ -58,9 +58,9 @@ static void __iforce_usb_xmit(struct iforce *iforce)
791 + XMIT_INC(iforce->xmit.tail, n);
792 +
793 + if ( (n=usb_submit_urb(iforce_usb->out, GFP_ATOMIC)) ) {
794 +- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
795 + dev_warn(&iforce_usb->intf->dev,
796 + "usb_submit_urb failed %d\n", n);
797 ++ iforce_clear_xmit_and_wake(iforce);
798 + }
799 +
800 + /* The IFORCE_XMIT_RUNNING bit is not cleared here. That's intended.
801 +@@ -175,15 +175,15 @@ static void iforce_usb_out(struct urb *urb)
802 + struct iforce *iforce = &iforce_usb->iforce;
803 +
804 + if (urb->status) {
805 +- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
806 + dev_dbg(&iforce_usb->intf->dev, "urb->status %d, exiting\n",
807 + urb->status);
808 ++ iforce_clear_xmit_and_wake(iforce);
809 + return;
810 + }
811 +
812 + __iforce_usb_xmit(iforce);
813 +
814 +- wake_up(&iforce->wait);
815 ++ wake_up_all(&iforce->wait);
816 + }
817 +
818 + static int iforce_usb_probe(struct usb_interface *intf,
819 +diff --git a/drivers/input/joystick/iforce/iforce.h b/drivers/input/joystick/iforce/iforce.h
820 +index 6aa761ebbdf77..9ccb9107ccbef 100644
821 +--- a/drivers/input/joystick/iforce/iforce.h
822 ++++ b/drivers/input/joystick/iforce/iforce.h
823 +@@ -119,6 +119,12 @@ static inline int iforce_get_id_packet(struct iforce *iforce, u8 id,
824 + response_data, response_len);
825 + }
826 +
827 ++static inline void iforce_clear_xmit_and_wake(struct iforce *iforce)
828 ++{
829 ++ clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
830 ++ wake_up_all(&iforce->wait);
831 ++}
832 ++
833 + /* Public functions */
834 + /* iforce-main.c */
835 + int iforce_init_device(struct device *parent, u16 bustype,
836 +diff --git a/drivers/input/misc/rk805-pwrkey.c b/drivers/input/misc/rk805-pwrkey.c
837 +index 3fb64dbda1a21..76873aa005b41 100644
838 +--- a/drivers/input/misc/rk805-pwrkey.c
839 ++++ b/drivers/input/misc/rk805-pwrkey.c
840 +@@ -98,6 +98,7 @@ static struct platform_driver rk805_pwrkey_driver = {
841 + };
842 + module_platform_driver(rk805_pwrkey_driver);
843 +
844 ++MODULE_ALIAS("platform:rk805-pwrkey");
845 + MODULE_AUTHOR("Joseph Chen <chenjh@××××××××××.com>");
846 + MODULE_DESCRIPTION("RK805 PMIC Power Key driver");
847 + MODULE_LICENSE("GPL");
848 +diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
849 +index fb5ddf3864fdd..5812ef3345feb 100644
850 +--- a/drivers/misc/fastrpc.c
851 ++++ b/drivers/misc/fastrpc.c
852 +@@ -1357,7 +1357,12 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
853 + of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions);
854 +
855 + spin_lock_irqsave(&cctx->lock, flags);
856 +- sess = &cctx->session[cctx->sesscount];
857 ++ if (cctx->sesscount >= FASTRPC_MAX_SESSIONS) {
858 ++ dev_err(&pdev->dev, "too many sessions\n");
859 ++ spin_unlock_irqrestore(&cctx->lock, flags);
860 ++ return -ENOSPC;
861 ++ }
862 ++ sess = &cctx->session[cctx->sesscount++];
863 + sess->used = false;
864 + sess->valid = true;
865 + sess->dev = dev;
866 +@@ -1370,13 +1375,12 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
867 + struct fastrpc_session_ctx *dup_sess;
868 +
869 + for (i = 1; i < sessions; i++) {
870 +- if (cctx->sesscount++ >= FASTRPC_MAX_SESSIONS)
871 ++ if (cctx->sesscount >= FASTRPC_MAX_SESSIONS)
872 + break;
873 +- dup_sess = &cctx->session[cctx->sesscount];
874 ++ dup_sess = &cctx->session[cctx->sesscount++];
875 + memcpy(dup_sess, sess, sizeof(*dup_sess));
876 + }
877 + }
878 +- cctx->sesscount++;
879 + spin_unlock_irqrestore(&cctx->lock, flags);
880 + rc = dma_set_mask(dev, DMA_BIT_MASK(32));
881 + if (rc) {
882 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
883 +index 5706abb3c0eaa..10125b02d1543 100644
884 +--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
885 ++++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
886 +@@ -178,6 +178,10 @@ void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset)
887 + "Cannot locate client instance close routine\n");
888 + return;
889 + }
890 ++ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
891 ++ dev_dbg(&pf->pdev->dev, "Client is not open, abort close\n");
892 ++ return;
893 ++ }
894 + cdev->client->ops->close(&cdev->lan_info, cdev->client, reset);
895 + clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
896 + i40e_client_release_qvlist(&cdev->lan_info);
897 +@@ -376,7 +380,6 @@ void i40e_client_subtask(struct i40e_pf *pf)
898 + /* Remove failed client instance */
899 + clear_bit(__I40E_CLIENT_INSTANCE_OPENED,
900 + &cdev->state);
901 +- i40e_client_del_instance(pf);
902 + return;
903 + }
904 + }
905 +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
906 +index 4a3baa7e01424..0eec05d905eb0 100644
907 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
908 ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
909 +@@ -700,10 +700,10 @@ void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
910 +
911 + void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
912 + {
913 +- struct dentry *mvpp2_dir, *mvpp2_root;
914 ++ static struct dentry *mvpp2_root;
915 ++ struct dentry *mvpp2_dir;
916 + int ret, i;
917 +
918 +- mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL);
919 + if (!mvpp2_root)
920 + mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL);
921 +
922 +diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
923 +index 8157666209798..e4d919de7e3fc 100644
924 +--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
925 ++++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
926 +@@ -1273,7 +1273,7 @@ static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
927 + bool removing;
928 + int err = 0;
929 +
930 +- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
931 ++ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
932 + if (!entry)
933 + return -ENOMEM;
934 +
935 +diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
936 +index 5945ac5f38eea..cb29da961e12b 100644
937 +--- a/drivers/net/ieee802154/adf7242.c
938 ++++ b/drivers/net/ieee802154/adf7242.c
939 +@@ -1310,10 +1310,11 @@ static int adf7242_remove(struct spi_device *spi)
940 +
941 + debugfs_remove_recursive(lp->debugfs_root);
942 +
943 ++ ieee802154_unregister_hw(lp->hw);
944 ++
945 + cancel_delayed_work_sync(&lp->work);
946 + destroy_workqueue(lp->wqueue);
947 +
948 +- ieee802154_unregister_hw(lp->hw);
949 + mutex_destroy(&lp->bmux);
950 + ieee802154_free_hw(lp->hw);
951 +
952 +diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
953 +index ae17d2f9d5347..cc1522550f2c4 100644
954 +--- a/drivers/net/phy/dp83822.c
955 ++++ b/drivers/net/phy/dp83822.c
956 +@@ -198,7 +198,6 @@ static int dp83822_config_intr(struct phy_device *phydev)
957 + return misr_status;
958 +
959 + misr_status |= (DP83822_RX_ERR_HF_INT_EN |
960 +- DP83822_FALSE_CARRIER_HF_INT_EN |
961 + DP83822_ANEG_COMPLETE_INT_EN |
962 + DP83822_DUP_MODE_CHANGE_INT_EN |
963 + DP83822_SPEED_CHANGED_INT_EN |
964 +diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
965 +index ce891ac32388f..b79a8aeab4c79 100644
966 +--- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c
967 ++++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
968 +@@ -2403,7 +2403,7 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
969 + /* Repeat initial/next rate.
970 + * For legacy IL_NUMBER_TRY == 1, this loop will not execute.
971 + * For HT IL_HT_NUMBER_TRY == 3, this executes twice. */
972 +- while (repeat_rate > 0) {
973 ++ while (repeat_rate > 0 && idx < (LINK_QUAL_MAX_RETRY_NUM - 1)) {
974 + if (is_legacy(tbl_type.lq_type)) {
975 + if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
976 + ant_toggle_cnt++;
977 +@@ -2422,8 +2422,6 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
978 + cpu_to_le32(new_rate);
979 + repeat_rate--;
980 + idx++;
981 +- if (idx >= LINK_QUAL_MAX_RETRY_NUM)
982 +- goto out;
983 + }
984 +
985 + il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
986 +@@ -2468,7 +2466,6 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
987 + repeat_rate--;
988 + }
989 +
990 +-out:
991 + lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
992 + lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
993 +
994 +diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
995 +index 2a27ac9aedbaa..3169859cd3906 100644
996 +--- a/drivers/nvme/host/tcp.c
997 ++++ b/drivers/nvme/host/tcp.c
998 +@@ -1074,7 +1074,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
999 + if (result > 0)
1000 + pending = true;
1001 +
1002 +- if (!pending)
1003 ++ if (!pending || !queue->rd_enabled)
1004 + return;
1005 +
1006 + } while (!time_after(jiffies, deadline)); /* quota is exhausted */
1007 +diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
1008 +index ee81d94fe810c..ff206faae775c 100644
1009 +--- a/drivers/nvme/target/core.c
1010 ++++ b/drivers/nvme/target/core.c
1011 +@@ -709,6 +709,8 @@ static void nvmet_set_error(struct nvmet_req *req, u16 status)
1012 +
1013 + static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
1014 + {
1015 ++ struct nvmet_ns *ns = req->ns;
1016 ++
1017 + if (!req->sq->sqhd_disabled)
1018 + nvmet_update_sq_head(req);
1019 + req->cqe->sq_id = cpu_to_le16(req->sq->qid);
1020 +@@ -719,9 +721,9 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
1021 +
1022 + trace_nvmet_req_complete(req);
1023 +
1024 +- if (req->ns)
1025 +- nvmet_put_namespace(req->ns);
1026 + req->ops->queue_response(req);
1027 ++ if (ns)
1028 ++ nvmet_put_namespace(ns);
1029 + }
1030 +
1031 + void nvmet_req_complete(struct nvmet_req *req, u16 status)
1032 +diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
1033 +index 5013568c571e5..6209d58e9492a 100644
1034 +--- a/drivers/parisc/ccio-dma.c
1035 ++++ b/drivers/parisc/ccio-dma.c
1036 +@@ -1378,15 +1378,17 @@ ccio_init_resource(struct resource *res, char *name, void __iomem *ioaddr)
1037 + }
1038 + }
1039 +
1040 +-static void __init ccio_init_resources(struct ioc *ioc)
1041 ++static int __init ccio_init_resources(struct ioc *ioc)
1042 + {
1043 + struct resource *res = ioc->mmio_region;
1044 + char *name = kmalloc(14, GFP_KERNEL);
1045 +-
1046 ++ if (unlikely(!name))
1047 ++ return -ENOMEM;
1048 + snprintf(name, 14, "GSC Bus [%d/]", ioc->hw_path);
1049 +
1050 + ccio_init_resource(res, name, &ioc->ioc_regs->io_io_low);
1051 + ccio_init_resource(res + 1, name, &ioc->ioc_regs->io_io_low_hv);
1052 ++ return 0;
1053 + }
1054 +
1055 + static int new_ioc_area(struct resource *res, unsigned long size,
1056 +@@ -1541,7 +1543,10 @@ static int __init ccio_probe(struct parisc_device *dev)
1057 + return -ENOMEM;
1058 + }
1059 + ccio_ioc_init(ioc);
1060 +- ccio_init_resources(ioc);
1061 ++ if (ccio_init_resources(ioc)) {
1062 ++ kfree(ioc);
1063 ++ return -ENOMEM;
1064 ++ }
1065 + hppa_dma_ops = &ccio_ops;
1066 +
1067 + hba = kzalloc(sizeof(*hba), GFP_KERNEL);
1068 +diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
1069 +index 597cfabc0967c..ee349a16b73a4 100644
1070 +--- a/drivers/platform/x86/pmc_atom.c
1071 ++++ b/drivers/platform/x86/pmc_atom.c
1072 +@@ -244,7 +244,7 @@ static void pmc_power_off(void)
1073 + pm1_cnt_port = acpi_base_addr + PM1_CNT;
1074 +
1075 + pm1_cnt_value = inl(pm1_cnt_port);
1076 +- pm1_cnt_value &= SLEEP_TYPE_MASK;
1077 ++ pm1_cnt_value &= ~SLEEP_TYPE_MASK;
1078 + pm1_cnt_value |= SLEEP_TYPE_S5;
1079 + pm1_cnt_value |= SLEEP_ENABLE;
1080 +
1081 +diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
1082 +index ae2addadb36f2..6ba3f6e7ea4f8 100644
1083 +--- a/drivers/regulator/core.c
1084 ++++ b/drivers/regulator/core.c
1085 +@@ -2486,13 +2486,18 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
1086 + */
1087 + static int _regulator_handle_consumer_enable(struct regulator *regulator)
1088 + {
1089 ++ int ret;
1090 + struct regulator_dev *rdev = regulator->rdev;
1091 +
1092 + lockdep_assert_held_once(&rdev->mutex.base);
1093 +
1094 + regulator->enable_count++;
1095 +- if (regulator->uA_load && regulator->enable_count == 1)
1096 +- return drms_uA_update(rdev);
1097 ++ if (regulator->uA_load && regulator->enable_count == 1) {
1098 ++ ret = drms_uA_update(rdev);
1099 ++ if (ret)
1100 ++ regulator->enable_count--;
1101 ++ return ret;
1102 ++ }
1103 +
1104 + return 0;
1105 + }
1106 +diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
1107 +index b5cee2a2ac66c..8930696021fbd 100644
1108 +--- a/drivers/scsi/lpfc/lpfc_init.c
1109 ++++ b/drivers/scsi/lpfc/lpfc_init.c
1110 +@@ -6537,7 +6537,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
1111 + /* Allocate device driver memory */
1112 + rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
1113 + if (rc)
1114 +- return -ENOMEM;
1115 ++ goto out_destroy_workqueue;
1116 +
1117 + /* IF Type 2 ports get initialized now. */
1118 + if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1119 +@@ -6911,6 +6911,9 @@ out_free_bsmbx:
1120 + lpfc_destroy_bootstrap_mbox(phba);
1121 + out_free_mem:
1122 + lpfc_mem_free(phba);
1123 ++out_destroy_workqueue:
1124 ++ destroy_workqueue(phba->wq);
1125 ++ phba->wq = NULL;
1126 + return rc;
1127 + }
1128 +
1129 +diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
1130 +index a78a702511faa..944273f60d224 100644
1131 +--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
1132 ++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
1133 +@@ -5182,7 +5182,6 @@ megasas_alloc_fusion_context(struct megasas_instance *instance)
1134 + if (!fusion->log_to_span) {
1135 + dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1136 + __func__, __LINE__);
1137 +- kfree(instance->ctrl_context);
1138 + return -ENOMEM;
1139 + }
1140 + }
1141 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
1142 +index 97c1f242ef0a3..044a00edb5459 100644
1143 +--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
1144 ++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
1145 +@@ -3238,6 +3238,7 @@ static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
1146 + fw_event = list_first_entry(&ioc->fw_event_list,
1147 + struct fw_event_work, list);
1148 + list_del_init(&fw_event->list);
1149 ++ fw_event_work_put(fw_event);
1150 + }
1151 + spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
1152 +
1153 +@@ -3272,7 +3273,6 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
1154 + if (cancel_work_sync(&fw_event->work))
1155 + fw_event_work_put(fw_event);
1156 +
1157 +- fw_event_work_put(fw_event);
1158 + }
1159 + }
1160 +
1161 +diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
1162 +index c6ec7d95bcfcc..722fd54e537cf 100644
1163 +--- a/drivers/soc/bcm/brcmstb/pm/pm-arm.c
1164 ++++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
1165 +@@ -681,13 +681,14 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
1166 + const struct of_device_id *of_id = NULL;
1167 + struct device_node *dn;
1168 + void __iomem *base;
1169 +- int ret, i;
1170 ++ int ret, i, s;
1171 +
1172 + /* AON ctrl registers */
1173 + base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL);
1174 + if (IS_ERR(base)) {
1175 + pr_err("error mapping AON_CTRL\n");
1176 +- return PTR_ERR(base);
1177 ++ ret = PTR_ERR(base);
1178 ++ goto aon_err;
1179 + }
1180 + ctrl.aon_ctrl_base = base;
1181 +
1182 +@@ -697,8 +698,10 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
1183 + /* Assume standard offset */
1184 + ctrl.aon_sram = ctrl.aon_ctrl_base +
1185 + AON_CTRL_SYSTEM_DATA_RAM_OFS;
1186 ++ s = 0;
1187 + } else {
1188 + ctrl.aon_sram = base;
1189 ++ s = 1;
1190 + }
1191 +
1192 + writel_relaxed(0, ctrl.aon_sram + AON_REG_PANIC);
1193 +@@ -708,7 +711,8 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
1194 + (const void **)&ddr_phy_data);
1195 + if (IS_ERR(base)) {
1196 + pr_err("error mapping DDR PHY\n");
1197 +- return PTR_ERR(base);
1198 ++ ret = PTR_ERR(base);
1199 ++ goto ddr_phy_err;
1200 + }
1201 + ctrl.support_warm_boot = ddr_phy_data->supports_warm_boot;
1202 + ctrl.pll_status_offset = ddr_phy_data->pll_status_offset;
1203 +@@ -728,17 +732,20 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
1204 + for_each_matching_node(dn, ddr_shimphy_dt_ids) {
1205 + i = ctrl.num_memc;
1206 + if (i >= MAX_NUM_MEMC) {
1207 ++ of_node_put(dn);
1208 + pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC);
1209 + break;
1210 + }
1211 +
1212 + base = of_io_request_and_map(dn, 0, dn->full_name);
1213 + if (IS_ERR(base)) {
1214 ++ of_node_put(dn);
1215 + if (!ctrl.support_warm_boot)
1216 + break;
1217 +
1218 + pr_err("error mapping DDR SHIMPHY %d\n", i);
1219 +- return PTR_ERR(base);
1220 ++ ret = PTR_ERR(base);
1221 ++ goto ddr_shimphy_err;
1222 + }
1223 + ctrl.memcs[i].ddr_shimphy_base = base;
1224 + ctrl.num_memc++;
1225 +@@ -749,14 +756,18 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
1226 + for_each_matching_node(dn, brcmstb_memc_of_match) {
1227 + base = of_iomap(dn, 0);
1228 + if (!base) {
1229 ++ of_node_put(dn);
1230 + pr_err("error mapping DDR Sequencer %d\n", i);
1231 +- return -ENOMEM;
1232 ++ ret = -ENOMEM;
1233 ++ goto brcmstb_memc_err;
1234 + }
1235 +
1236 + of_id = of_match_node(brcmstb_memc_of_match, dn);
1237 + if (!of_id) {
1238 + iounmap(base);
1239 +- return -EINVAL;
1240 ++ of_node_put(dn);
1241 ++ ret = -EINVAL;
1242 ++ goto brcmstb_memc_err;
1243 + }
1244 +
1245 + ddr_seq_data = of_id->data;
1246 +@@ -776,21 +787,24 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
1247 + dn = of_find_matching_node(NULL, sram_dt_ids);
1248 + if (!dn) {
1249 + pr_err("SRAM not found\n");
1250 +- return -EINVAL;
1251 ++ ret = -EINVAL;
1252 ++ goto brcmstb_memc_err;
1253 + }
1254 +
1255 + ret = brcmstb_init_sram(dn);
1256 + of_node_put(dn);
1257 + if (ret) {
1258 + pr_err("error setting up SRAM for PM\n");
1259 +- return ret;
1260 ++ goto brcmstb_memc_err;
1261 + }
1262 +
1263 + ctrl.pdev = pdev;
1264 +
1265 + ctrl.s3_params = kmalloc(sizeof(*ctrl.s3_params), GFP_KERNEL);
1266 +- if (!ctrl.s3_params)
1267 +- return -ENOMEM;
1268 ++ if (!ctrl.s3_params) {
1269 ++ ret = -ENOMEM;
1270 ++ goto s3_params_err;
1271 ++ }
1272 + ctrl.s3_params_pa = dma_map_single(&pdev->dev, ctrl.s3_params,
1273 + sizeof(*ctrl.s3_params),
1274 + DMA_TO_DEVICE);
1275 +@@ -810,7 +824,21 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
1276 +
1277 + out:
1278 + kfree(ctrl.s3_params);
1279 +-
1280 ++s3_params_err:
1281 ++ iounmap(ctrl.boot_sram);
1282 ++brcmstb_memc_err:
1283 ++ for (i--; i >= 0; i--)
1284 ++ iounmap(ctrl.memcs[i].ddr_ctrl);
1285 ++ddr_shimphy_err:
1286 ++ for (i = 0; i < ctrl.num_memc; i++)
1287 ++ iounmap(ctrl.memcs[i].ddr_shimphy_base);
1288 ++
1289 ++ iounmap(ctrl.memcs[0].ddr_phy_base);
1290 ++ddr_phy_err:
1291 ++ iounmap(ctrl.aon_ctrl_base);
1292 ++ if (s)
1293 ++ iounmap(ctrl.aon_sram);
1294 ++aon_err:
1295 + pr_warn("PM: initialization failed with code %d\n", ret);
1296 +
1297 + return ret;
1298 +diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
1299 +index ff3cb09c57a63..30e965c410ffd 100644
1300 +--- a/drivers/staging/rtl8712/rtl8712_cmd.c
1301 ++++ b/drivers/staging/rtl8712/rtl8712_cmd.c
1302 +@@ -117,34 +117,6 @@ static void r871x_internal_cmd_hdl(struct _adapter *padapter, u8 *pbuf)
1303 + kfree(pdrvcmd->pbuf);
1304 + }
1305 +
1306 +-static u8 read_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
1307 +-{
1308 +- void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
1309 +- struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
1310 +-
1311 +- /* invoke cmd->callback function */
1312 +- pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
1313 +- if (!pcmd_callback)
1314 +- r8712_free_cmd_obj(pcmd);
1315 +- else
1316 +- pcmd_callback(padapter, pcmd);
1317 +- return H2C_SUCCESS;
1318 +-}
1319 +-
1320 +-static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
1321 +-{
1322 +- void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
1323 +- struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
1324 +-
1325 +- /* invoke cmd->callback function */
1326 +- pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
1327 +- if (!pcmd_callback)
1328 +- r8712_free_cmd_obj(pcmd);
1329 +- else
1330 +- pcmd_callback(padapter, pcmd);
1331 +- return H2C_SUCCESS;
1332 +-}
1333 +-
1334 + static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
1335 + {
1336 + struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
1337 +@@ -213,14 +185,6 @@ static struct cmd_obj *cmd_hdl_filter(struct _adapter *padapter,
1338 + pcmd_r = NULL;
1339 +
1340 + switch (pcmd->cmdcode) {
1341 +- case GEN_CMD_CODE(_Read_MACREG):
1342 +- read_macreg_hdl(padapter, (u8 *)pcmd);
1343 +- pcmd_r = pcmd;
1344 +- break;
1345 +- case GEN_CMD_CODE(_Write_MACREG):
1346 +- write_macreg_hdl(padapter, (u8 *)pcmd);
1347 +- pcmd_r = pcmd;
1348 +- break;
1349 + case GEN_CMD_CODE(_Read_BBREG):
1350 + read_bbreg_hdl(padapter, (u8 *)pcmd);
1351 + break;
1352 +diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
1353 +index 2ec1af8f7968d..fd074c6ebe0d3 100644
1354 +--- a/drivers/thunderbolt/ctl.c
1355 ++++ b/drivers/thunderbolt/ctl.c
1356 +@@ -388,7 +388,7 @@ static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
1357 +
1358 + static int tb_async_error(const struct ctl_pkg *pkg)
1359 + {
1360 +- const struct cfg_error_pkg *error = (const struct cfg_error_pkg *)pkg;
1361 ++ const struct cfg_error_pkg *error = pkg->buffer;
1362 +
1363 + if (pkg->frame.eof != TB_CFG_PKG_ERROR)
1364 + return false;
1365 +diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
1366 +index 4bdc12908146e..f3f582c3dc874 100644
1367 +--- a/drivers/tty/serial/fsl_lpuart.c
1368 ++++ b/drivers/tty/serial/fsl_lpuart.c
1369 +@@ -1277,9 +1277,9 @@ static int lpuart_config_rs485(struct uart_port *port,
1370 + * Note: UART is assumed to be active high.
1371 + */
1372 + if (rs485->flags & SER_RS485_RTS_ON_SEND)
1373 +- modem &= ~UARTMODEM_TXRTSPOL;
1374 +- else if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
1375 + modem |= UARTMODEM_TXRTSPOL;
1376 ++ else if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
1377 ++ modem &= ~UARTMODEM_TXRTSPOL;
1378 + }
1379 +
1380 + /* Store the new configuration */
1381 +@@ -1981,6 +1981,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
1382 + uart_update_timeout(port, termios->c_cflag, baud);
1383 +
1384 + /* wait transmit engin complete */
1385 ++ lpuart32_write(&sport->port, 0, UARTMODIR);
1386 + lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC);
1387 +
1388 + /* disable transmit and receive */
1389 +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
1390 +index b4f97df8e0000..e00ebda492198 100644
1391 +--- a/drivers/tty/vt/vt.c
1392 ++++ b/drivers/tty/vt/vt.c
1393 +@@ -4587,9 +4587,11 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op)
1394 + console_lock();
1395 + if (vc->vc_mode != KD_TEXT)
1396 + rc = -EINVAL;
1397 +- else if (vc->vc_sw->con_font_set)
1398 ++ else if (vc->vc_sw->con_font_set) {
1399 ++ if (vc_is_sel(vc))
1400 ++ clear_selection();
1401 + rc = vc->vc_sw->con_font_set(vc, &font, op->flags);
1402 +- else
1403 ++ } else
1404 + rc = -ENOSYS;
1405 + console_unlock();
1406 + kfree(font.data);
1407 +@@ -4616,9 +4618,11 @@ static int con_font_default(struct vc_data *vc, struct console_font_op *op)
1408 + console_unlock();
1409 + return -EINVAL;
1410 + }
1411 +- if (vc->vc_sw->con_font_default)
1412 ++ if (vc->vc_sw->con_font_default) {
1413 ++ if (vc_is_sel(vc))
1414 ++ clear_selection();
1415 + rc = vc->vc_sw->con_font_default(vc, &font, s);
1416 +- else
1417 ++ } else
1418 + rc = -ENOSYS;
1419 + console_unlock();
1420 + if (!rc) {
1421 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
1422 +index 5dc8827ede7e8..edb62b49f572e 100644
1423 +--- a/drivers/usb/class/cdc-acm.c
1424 ++++ b/drivers/usb/class/cdc-acm.c
1425 +@@ -1843,6 +1843,9 @@ static const struct usb_device_id acm_ids[] = {
1426 + { USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
1427 + .driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
1428 + },
1429 ++ { USB_DEVICE(0x0c26, 0x0020), /* Icom ICF3400 Serie */
1430 ++ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
1431 ++ },
1432 + { USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */
1433 + .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
1434 + },
1435 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
1436 +index 4cf0dc7f330dd..68d860a3fd617 100644
1437 +--- a/drivers/usb/core/hub.c
1438 ++++ b/drivers/usb/core/hub.c
1439 +@@ -5923,6 +5923,11 @@ re_enumerate_no_bos:
1440 + * the reset is over (using their post_reset method).
1441 + *
1442 + * Return: The same as for usb_reset_and_verify_device().
1443 ++ * However, if a reset is already in progress (for instance, if a
1444 ++ * driver doesn't have pre_ or post_reset() callbacks, and while
1445 ++ * being unbound or re-bound during the ongoing reset its disconnect()
1446 ++ * or probe() routine tries to perform a second, nested reset), the
1447 ++ * routine returns -EINPROGRESS.
1448 + *
1449 + * Note:
1450 + * The caller must own the device lock. For example, it's safe to use
1451 +@@ -5956,6 +5961,10 @@ int usb_reset_device(struct usb_device *udev)
1452 + return -EISDIR;
1453 + }
1454 +
1455 ++ if (udev->reset_in_progress)
1456 ++ return -EINPROGRESS;
1457 ++ udev->reset_in_progress = 1;
1458 ++
1459 + port_dev = hub->ports[udev->portnum - 1];
1460 +
1461 + /*
1462 +@@ -6020,6 +6029,7 @@ int usb_reset_device(struct usb_device *udev)
1463 +
1464 + usb_autosuspend_device(udev);
1465 + memalloc_noio_restore(noio_flag);
1466 ++ udev->reset_in_progress = 0;
1467 + return ret;
1468 + }
1469 + EXPORT_SYMBOL_GPL(usb_reset_device);
1470 +diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
1471 +index 34bb6124f1e2f..4f640c0c51b39 100644
1472 +--- a/drivers/usb/dwc2/platform.c
1473 ++++ b/drivers/usb/dwc2/platform.c
1474 +@@ -142,9 +142,9 @@ static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
1475 + } else if (hsotg->plat && hsotg->plat->phy_init) {
1476 + ret = hsotg->plat->phy_init(pdev, hsotg->plat->phy_type);
1477 + } else {
1478 +- ret = phy_power_on(hsotg->phy);
1479 ++ ret = phy_init(hsotg->phy);
1480 + if (ret == 0)
1481 +- ret = phy_init(hsotg->phy);
1482 ++ ret = phy_power_on(hsotg->phy);
1483 + }
1484 +
1485 + return ret;
1486 +@@ -176,9 +176,9 @@ static int __dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg)
1487 + } else if (hsotg->plat && hsotg->plat->phy_exit) {
1488 + ret = hsotg->plat->phy_exit(pdev, hsotg->plat->phy_type);
1489 + } else {
1490 +- ret = phy_exit(hsotg->phy);
1491 ++ ret = phy_power_off(hsotg->phy);
1492 + if (ret == 0)
1493 +- ret = phy_power_off(hsotg->phy);
1494 ++ ret = phy_exit(hsotg->phy);
1495 + }
1496 + if (ret)
1497 + return ret;
1498 +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
1499 +index 8cc81f193e9c1..f4655665a1b5c 100644
1500 +--- a/drivers/usb/dwc3/core.c
1501 ++++ b/drivers/usb/dwc3/core.c
1502 +@@ -694,15 +694,16 @@ static void dwc3_core_exit(struct dwc3 *dwc)
1503 + {
1504 + dwc3_event_buffers_cleanup(dwc);
1505 +
1506 ++ usb_phy_set_suspend(dwc->usb2_phy, 1);
1507 ++ usb_phy_set_suspend(dwc->usb3_phy, 1);
1508 ++ phy_power_off(dwc->usb2_generic_phy);
1509 ++ phy_power_off(dwc->usb3_generic_phy);
1510 ++
1511 + usb_phy_shutdown(dwc->usb2_phy);
1512 + usb_phy_shutdown(dwc->usb3_phy);
1513 + phy_exit(dwc->usb2_generic_phy);
1514 + phy_exit(dwc->usb3_generic_phy);
1515 +
1516 +- usb_phy_set_suspend(dwc->usb2_phy, 1);
1517 +- usb_phy_set_suspend(dwc->usb3_phy, 1);
1518 +- phy_power_off(dwc->usb2_generic_phy);
1519 +- phy_power_off(dwc->usb3_generic_phy);
1520 + clk_bulk_disable_unprepare(dwc->num_clks, dwc->clks);
1521 + reset_control_assert(dwc->reset);
1522 + }
1523 +@@ -1537,16 +1538,16 @@ err5:
1524 + dwc3_debugfs_exit(dwc);
1525 + dwc3_event_buffers_cleanup(dwc);
1526 +
1527 +- usb_phy_shutdown(dwc->usb2_phy);
1528 +- usb_phy_shutdown(dwc->usb3_phy);
1529 +- phy_exit(dwc->usb2_generic_phy);
1530 +- phy_exit(dwc->usb3_generic_phy);
1531 +-
1532 + usb_phy_set_suspend(dwc->usb2_phy, 1);
1533 + usb_phy_set_suspend(dwc->usb3_phy, 1);
1534 + phy_power_off(dwc->usb2_generic_phy);
1535 + phy_power_off(dwc->usb3_generic_phy);
1536 +
1537 ++ usb_phy_shutdown(dwc->usb2_phy);
1538 ++ usb_phy_shutdown(dwc->usb3_phy);
1539 ++ phy_exit(dwc->usb2_generic_phy);
1540 ++ phy_exit(dwc->usb3_generic_phy);
1541 ++
1542 + dwc3_ulpi_exit(dwc);
1543 +
1544 + err4:
1545 +diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
1546 +index 7874b97e33227..aed35276e0e0c 100644
1547 +--- a/drivers/usb/dwc3/dwc3-qcom.c
1548 ++++ b/drivers/usb/dwc3/dwc3-qcom.c
1549 +@@ -190,6 +190,14 @@ static int dwc3_qcom_register_extcon(struct dwc3_qcom *qcom)
1550 + return 0;
1551 + }
1552 +
1553 ++/* Only usable in contexts where the role can not change. */
1554 ++static bool dwc3_qcom_is_host(struct dwc3_qcom *qcom)
1555 ++{
1556 ++ struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3);
1557 ++
1558 ++ return dwc->xhci;
1559 ++}
1560 ++
1561 + static void dwc3_qcom_disable_interrupts(struct dwc3_qcom *qcom)
1562 + {
1563 + if (qcom->hs_phy_irq) {
1564 +@@ -297,7 +305,11 @@ static irqreturn_t qcom_dwc3_resume_irq(int irq, void *data)
1565 + if (qcom->pm_suspended)
1566 + return IRQ_HANDLED;
1567 +
1568 +- if (dwc->xhci)
1569 ++ /*
1570 ++ * This is safe as role switching is done from a freezable workqueue
1571 ++ * and the wakeup interrupts are disabled as part of resume.
1572 ++ */
1573 ++ if (dwc3_qcom_is_host(qcom))
1574 + pm_runtime_resume(&dwc->xhci->dev);
1575 +
1576 + return IRQ_HANDLED;
1577 +diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
1578 +index fa252870c926f..a4eacc5cf58af 100644
1579 +--- a/drivers/usb/dwc3/host.c
1580 ++++ b/drivers/usb/dwc3/host.c
1581 +@@ -9,8 +9,13 @@
1582 +
1583 + #include <linux/platform_device.h>
1584 +
1585 ++#include "../host/xhci-plat.h"
1586 + #include "core.h"
1587 +
1588 ++static const struct xhci_plat_priv dwc3_xhci_plat_priv = {
1589 ++ .quirks = XHCI_SKIP_PHY_INIT,
1590 ++};
1591 ++
1592 + static int dwc3_host_get_irq(struct dwc3 *dwc)
1593 + {
1594 + struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
1595 +@@ -85,6 +90,11 @@ int dwc3_host_init(struct dwc3 *dwc)
1596 + goto err;
1597 + }
1598 +
1599 ++ ret = platform_device_add_data(xhci, &dwc3_xhci_plat_priv,
1600 ++ sizeof(dwc3_xhci_plat_priv));
1601 ++ if (ret)
1602 ++ goto err;
1603 ++
1604 + memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
1605 +
1606 + if (dwc->usb3_lpm_capable)
1607 +@@ -128,4 +138,5 @@ err:
1608 + void dwc3_host_exit(struct dwc3 *dwc)
1609 + {
1610 + platform_device_unregister(dwc->xhci);
1611 ++ dwc->xhci = NULL;
1612 + }
1613 +diff --git a/drivers/usb/gadget/function/storage_common.c b/drivers/usb/gadget/function/storage_common.c
1614 +index f7e6c42558eb7..021984921f919 100644
1615 +--- a/drivers/usb/gadget/function/storage_common.c
1616 ++++ b/drivers/usb/gadget/function/storage_common.c
1617 +@@ -294,8 +294,10 @@ EXPORT_SYMBOL_GPL(fsg_lun_fsync_sub);
1618 + void store_cdrom_address(u8 *dest, int msf, u32 addr)
1619 + {
1620 + if (msf) {
1621 +- /* Convert to Minutes-Seconds-Frames */
1622 +- addr >>= 2; /* Convert to 2048-byte frames */
1623 ++ /*
1624 ++ * Convert to Minutes-Seconds-Frames.
1625 ++ * Sector size is already set to 2048 bytes.
1626 ++ */
1627 + addr += 2*75; /* Lead-in occupies 2 seconds */
1628 + dest[3] = addr % 75; /* Frames */
1629 + addr /= 75;
1630 +diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
1631 +index 9c066d1c512b1..66cb9f08bff10 100644
1632 +--- a/drivers/usb/host/xhci-hub.c
1633 ++++ b/drivers/usb/host/xhci-hub.c
1634 +@@ -566,7 +566,7 @@ struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd)
1635 + * It will release and re-aquire the lock while calling ACPI
1636 + * method.
1637 + */
1638 +-void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd,
1639 ++static void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd,
1640 + u16 index, bool on, unsigned long *flags)
1641 + {
1642 + struct xhci_hub *rhub;
1643 +@@ -1555,6 +1555,17 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
1644 +
1645 + status = bus_state->resuming_ports;
1646 +
1647 ++ /*
1648 ++ * SS devices are only visible to roothub after link training completes.
1649 ++ * Keep polling roothubs for a grace period after xHC start
1650 ++ */
1651 ++ if (xhci->run_graceperiod) {
1652 ++ if (time_before(jiffies, xhci->run_graceperiod))
1653 ++ status = 1;
1654 ++ else
1655 ++ xhci->run_graceperiod = 0;
1656 ++ }
1657 ++
1658 + mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC;
1659 +
1660 + /* For each port, did anything change? If so, set that bit in buf. */
1661 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
1662 +index 9fe35bb67731e..5ce16a259e612 100644
1663 +--- a/drivers/usb/host/xhci.c
1664 ++++ b/drivers/usb/host/xhci.c
1665 +@@ -149,9 +149,11 @@ int xhci_start(struct xhci_hcd *xhci)
1666 + xhci_err(xhci, "Host took too long to start, "
1667 + "waited %u microseconds.\n",
1668 + XHCI_MAX_HALT_USEC);
1669 +- if (!ret)
1670 ++ if (!ret) {
1671 + /* clear state flags. Including dying, halted or removing */
1672 + xhci->xhc_state = 0;
1673 ++ xhci->run_graceperiod = jiffies + msecs_to_jiffies(500);
1674 ++ }
1675 +
1676 + return ret;
1677 + }
1678 +@@ -775,8 +777,6 @@ static void xhci_stop(struct usb_hcd *hcd)
1679 + void xhci_shutdown(struct usb_hcd *hcd)
1680 + {
1681 + struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1682 +- unsigned long flags;
1683 +- int i;
1684 +
1685 + if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
1686 + usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
1687 +@@ -792,21 +792,12 @@ void xhci_shutdown(struct usb_hcd *hcd)
1688 + del_timer_sync(&xhci->shared_hcd->rh_timer);
1689 + }
1690 +
1691 +- spin_lock_irqsave(&xhci->lock, flags);
1692 ++ spin_lock_irq(&xhci->lock);
1693 + xhci_halt(xhci);
1694 +-
1695 +- /* Power off USB2 ports*/
1696 +- for (i = 0; i < xhci->usb2_rhub.num_ports; i++)
1697 +- xhci_set_port_power(xhci, xhci->main_hcd, i, false, &flags);
1698 +-
1699 +- /* Power off USB3 ports*/
1700 +- for (i = 0; i < xhci->usb3_rhub.num_ports; i++)
1701 +- xhci_set_port_power(xhci, xhci->shared_hcd, i, false, &flags);
1702 +-
1703 + /* Workaround for spurious wakeups at shutdown with HSW */
1704 + if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
1705 + xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
1706 +- spin_unlock_irqrestore(&xhci->lock, flags);
1707 ++ spin_unlock_irq(&xhci->lock);
1708 +
1709 + xhci_cleanup_msix(xhci);
1710 +
1711 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
1712 +index 5a6ad776858e3..0dc448630197c 100644
1713 +--- a/drivers/usb/host/xhci.h
1714 ++++ b/drivers/usb/host/xhci.h
1715 +@@ -1814,7 +1814,7 @@ struct xhci_hcd {
1716 +
1717 + /* Host controller watchdog timer structures */
1718 + unsigned int xhc_state;
1719 +-
1720 ++ unsigned long run_graceperiod;
1721 + u32 command;
1722 + struct s3_save s3;
1723 + /* Host controller is dying - not responding to commands. "I'm not dead yet!"
1724 +@@ -2155,8 +2155,6 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
1725 + int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
1726 + int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1);
1727 + struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd);
1728 +-void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd, u16 index,
1729 +- bool on, unsigned long *flags);
1730 +
1731 + void xhci_hc_died(struct xhci_hcd *xhci);
1732 +
1733 +diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
1734 +index f06a09e59d8ba..f37bde88eb5d5 100644
1735 +--- a/drivers/usb/serial/ch341.c
1736 ++++ b/drivers/usb/serial/ch341.c
1737 +@@ -96,7 +96,9 @@ struct ch341_private {
1738 + u8 mcr;
1739 + u8 msr;
1740 + u8 lcr;
1741 ++
1742 + unsigned long quirks;
1743 ++ u8 version;
1744 + };
1745 +
1746 + static void ch341_set_termios(struct tty_struct *tty,
1747 +@@ -175,13 +177,20 @@ static int ch341_set_baudrate_lcr(struct usb_device *dev,
1748 + /*
1749 + * CH341A buffers data until a full endpoint-size packet (32 bytes)
1750 + * has been received unless bit 7 is set.
1751 ++ *
1752 ++ * At least one device with version 0x27 appears to have this bit
1753 ++ * inverted.
1754 + */
1755 +- a |= BIT(7);
1756 ++ if (priv->version > 0x27)
1757 ++ a |= BIT(7);
1758 +
1759 + r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x1312, a);
1760 + if (r)
1761 + return r;
1762 +
1763 ++ if (priv->version < 0x30)
1764 ++ return 0;
1765 ++
1766 + r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x2518, lcr);
1767 + if (r)
1768 + return r;
1769 +@@ -233,7 +242,9 @@ static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
1770 + r = ch341_control_in(dev, CH341_REQ_READ_VERSION, 0, 0, buffer, size);
1771 + if (r < 0)
1772 + goto out;
1773 +- dev_dbg(&dev->dev, "Chip version: 0x%02x\n", buffer[0]);
1774 ++
1775 ++ priv->version = buffer[0];
1776 ++ dev_dbg(&dev->dev, "Chip version: 0x%02x\n", priv->version);
1777 +
1778 + r = ch341_control_out(dev, CH341_REQ_SERIAL_INIT, 0, 0);
1779 + if (r < 0)
1780 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
1781 +index d5a1832aa48eb..bc4fd79a13dbe 100644
1782 +--- a/drivers/usb/serial/cp210x.c
1783 ++++ b/drivers/usb/serial/cp210x.c
1784 +@@ -131,6 +131,7 @@ static const struct usb_device_id id_table[] = {
1785 + { USB_DEVICE(0x10C4, 0x83AA) }, /* Mark-10 Digital Force Gauge */
1786 + { USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */
1787 + { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
1788 ++ { USB_DEVICE(0x10C4, 0x8414) }, /* Decagon USB Cable Adapter */
1789 + { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
1790 + { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
1791 + { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
1792 +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
1793 +index d671e096594bb..ed1b26193d7a3 100644
1794 +--- a/drivers/usb/serial/ftdi_sio.c
1795 ++++ b/drivers/usb/serial/ftdi_sio.c
1796 +@@ -1045,6 +1045,8 @@ static const struct usb_device_id id_table_combined[] = {
1797 + /* IDS GmbH devices */
1798 + { USB_DEVICE(IDS_VID, IDS_SI31A_PID) },
1799 + { USB_DEVICE(IDS_VID, IDS_CM31A_PID) },
1800 ++ /* Omron devices */
1801 ++ { USB_DEVICE(OMRON_VID, OMRON_CS1W_CIF31_PID) },
1802 + /* U-Blox devices */
1803 + { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) },
1804 + { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) },
1805 +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
1806 +index 4e92c165c86bf..31c8ccabbbb78 100644
1807 +--- a/drivers/usb/serial/ftdi_sio_ids.h
1808 ++++ b/drivers/usb/serial/ftdi_sio_ids.h
1809 +@@ -661,6 +661,12 @@
1810 + #define INFINEON_TRIBOARD_TC1798_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
1811 + #define INFINEON_TRIBOARD_TC2X7_PID 0x0043 /* DAS JTAG TriBoard TC2X7 V1.0 */
1812 +
1813 ++/*
1814 ++ * Omron corporation (https://www.omron.com)
1815 ++ */
1816 ++ #define OMRON_VID 0x0590
1817 ++ #define OMRON_CS1W_CIF31_PID 0x00b2
1818 ++
1819 + /*
1820 + * Acton Research Corp.
1821 + */
1822 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1823 +index 2317ed357d8ef..cbe8ad3cd61fd 100644
1824 +--- a/drivers/usb/serial/option.c
1825 ++++ b/drivers/usb/serial/option.c
1826 +@@ -253,6 +253,7 @@ static void option_instat_callback(struct urb *urb);
1827 + #define QUECTEL_PRODUCT_BG96 0x0296
1828 + #define QUECTEL_PRODUCT_EP06 0x0306
1829 + #define QUECTEL_PRODUCT_EM05G 0x030a
1830 ++#define QUECTEL_PRODUCT_EM060K 0x030b
1831 + #define QUECTEL_PRODUCT_EM12 0x0512
1832 + #define QUECTEL_PRODUCT_RM500Q 0x0800
1833 + #define QUECTEL_PRODUCT_EC200S_CN 0x6002
1834 +@@ -438,6 +439,8 @@ static void option_instat_callback(struct urb *urb);
1835 + #define CINTERION_PRODUCT_MV31_2_RMNET 0x00b9
1836 + #define CINTERION_PRODUCT_MV32_WA 0x00f1
1837 + #define CINTERION_PRODUCT_MV32_WB 0x00f2
1838 ++#define CINTERION_PRODUCT_MV32_WA_RMNET 0x00f3
1839 ++#define CINTERION_PRODUCT_MV32_WB_RMNET 0x00f4
1840 +
1841 + /* Olivetti products */
1842 + #define OLIVETTI_VENDOR_ID 0x0b3c
1843 +@@ -573,6 +576,10 @@ static void option_instat_callback(struct urb *urb);
1844 + #define WETELECOM_PRODUCT_6802 0x6802
1845 + #define WETELECOM_PRODUCT_WMD300 0x6803
1846 +
1847 ++/* OPPO products */
1848 ++#define OPPO_VENDOR_ID 0x22d9
1849 ++#define OPPO_PRODUCT_R11 0x276c
1850 ++
1851 +
1852 + /* Device flags */
1853 +
1854 +@@ -1138,6 +1145,9 @@ static const struct usb_device_id option_ids[] = {
1855 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
1856 + { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G, 0xff),
1857 + .driver_info = RSVD(6) | ZLP },
1858 ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0x00, 0x40) },
1859 ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x30) },
1860 ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x40) },
1861 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
1862 + .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
1863 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
1864 +@@ -1993,8 +2003,12 @@ static const struct usb_device_id option_ids[] = {
1865 + .driver_info = RSVD(0)},
1866 + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA, 0xff),
1867 + .driver_info = RSVD(3)},
1868 ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA_RMNET, 0xff),
1869 ++ .driver_info = RSVD(0) },
1870 + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB, 0xff),
1871 + .driver_info = RSVD(3)},
1872 ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB_RMNET, 0xff),
1873 ++ .driver_info = RSVD(0) },
1874 + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
1875 + .driver_info = RSVD(4) },
1876 + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
1877 +@@ -2155,6 +2169,7 @@ static const struct usb_device_id option_ids[] = {
1878 + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
1879 + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
1880 + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
1881 ++ { USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
1882 + { } /* Terminating entry */
1883 + };
1884 + MODULE_DEVICE_TABLE(usb, option_ids);
1885 +diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
1886 +index 66e7f5d123c46..6a59950a63a03 100644
1887 +--- a/drivers/usb/storage/unusual_devs.h
1888 ++++ b/drivers/usb/storage/unusual_devs.h
1889 +@@ -2294,6 +2294,13 @@ UNUSUAL_DEV( 0x1e74, 0x4621, 0x0000, 0x0000,
1890 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1891 + US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ),
1892 +
1893 ++/* Reported by Witold Lipieta <witold.lipieta@×××××××××.com> */
1894 ++UNUSUAL_DEV( 0x1fc9, 0x0117, 0x0100, 0x0100,
1895 ++ "NXP Semiconductors",
1896 ++ "PN7462AU",
1897 ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1898 ++ US_FL_IGNORE_RESIDUE ),
1899 ++
1900 + /* Supplied with some Castlewood ORB removable drives */
1901 + UNUSUAL_DEV( 0x2027, 0xa001, 0x0000, 0x9999,
1902 + "Double-H Technology",
1903 +diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
1904 +index 4092248a5936d..0b5cbf5ed1ca6 100644
1905 +--- a/drivers/usb/typec/altmodes/displayport.c
1906 ++++ b/drivers/usb/typec/altmodes/displayport.c
1907 +@@ -87,8 +87,8 @@ static int dp_altmode_configure(struct dp_altmode *dp, u8 con)
1908 + case DP_STATUS_CON_UFP_D:
1909 + case DP_STATUS_CON_BOTH: /* NOTE: First acting as DP source */
1910 + conf |= DP_CONF_UFP_U_AS_UFP_D;
1911 +- pin_assign = DP_CAP_DFP_D_PIN_ASSIGN(dp->alt->vdo) &
1912 +- DP_CAP_UFP_D_PIN_ASSIGN(dp->port->vdo);
1913 ++ pin_assign = DP_CAP_PIN_ASSIGN_UFP_D(dp->alt->vdo) &
1914 ++ DP_CAP_PIN_ASSIGN_DFP_D(dp->port->vdo);
1915 + break;
1916 + default:
1917 + break;
1918 +diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c
1919 +index 80fdd3ee0565f..57b1e011d2d34 100644
1920 +--- a/drivers/video/fbdev/chipsfb.c
1921 ++++ b/drivers/video/fbdev/chipsfb.c
1922 +@@ -430,6 +430,7 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
1923 + err_release_fb:
1924 + framebuffer_release(p);
1925 + err_disable:
1926 ++ pci_disable_device(dp);
1927 + err_out:
1928 + return rc;
1929 + }
1930 +diff --git a/fs/afs/flock.c b/fs/afs/flock.c
1931 +index d5e5a6ddc8478..0fa05998a24d2 100644
1932 +--- a/fs/afs/flock.c
1933 ++++ b/fs/afs/flock.c
1934 +@@ -75,7 +75,7 @@ void afs_lock_op_done(struct afs_call *call)
1935 + if (call->error == 0) {
1936 + spin_lock(&vnode->lock);
1937 + trace_afs_flock_ev(vnode, NULL, afs_flock_timestamp, 0);
1938 +- vnode->locked_at = call->reply_time;
1939 ++ vnode->locked_at = call->issue_time;
1940 + afs_schedule_lock_extension(vnode);
1941 + spin_unlock(&vnode->lock);
1942 + }
1943 +diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
1944 +index 5c2729fc07e52..254580b1dc74c 100644
1945 +--- a/fs/afs/fsclient.c
1946 ++++ b/fs/afs/fsclient.c
1947 +@@ -136,7 +136,7 @@ bad:
1948 +
1949 + static time64_t xdr_decode_expiry(struct afs_call *call, u32 expiry)
1950 + {
1951 +- return ktime_divns(call->reply_time, NSEC_PER_SEC) + expiry;
1952 ++ return ktime_divns(call->issue_time, NSEC_PER_SEC) + expiry;
1953 + }
1954 +
1955 + static void xdr_decode_AFSCallBack(const __be32 **_bp,
1956 +diff --git a/fs/afs/internal.h b/fs/afs/internal.h
1957 +index c3ad582f9fd0e..8d6582713fe72 100644
1958 +--- a/fs/afs/internal.h
1959 ++++ b/fs/afs/internal.h
1960 +@@ -159,7 +159,6 @@ struct afs_call {
1961 + bool need_attention; /* T if RxRPC poked us */
1962 + bool async; /* T if asynchronous */
1963 + bool upgrade; /* T to request service upgrade */
1964 +- bool have_reply_time; /* T if have got reply_time */
1965 + bool intr; /* T if interruptible */
1966 + bool unmarshalling_error; /* T if an unmarshalling error occurred */
1967 + u16 service_id; /* Actual service ID (after upgrade) */
1968 +@@ -173,7 +172,7 @@ struct afs_call {
1969 + } __attribute__((packed));
1970 + __be64 tmp64;
1971 + };
1972 +- ktime_t reply_time; /* Time of first reply packet */
1973 ++ ktime_t issue_time; /* Time of issue of operation */
1974 + };
1975 +
1976 + struct afs_call_type {
1977 +diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
1978 +index 6adab30a83993..49fcce6529a60 100644
1979 +--- a/fs/afs/rxrpc.c
1980 ++++ b/fs/afs/rxrpc.c
1981 +@@ -428,6 +428,7 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
1982 + if (call->max_lifespan)
1983 + rxrpc_kernel_set_max_life(call->net->socket, rxcall,
1984 + call->max_lifespan);
1985 ++ call->issue_time = ktime_get_real();
1986 +
1987 + /* send the request */
1988 + iov[0].iov_base = call->request;
1989 +@@ -532,12 +533,6 @@ static void afs_deliver_to_call(struct afs_call *call)
1990 + return;
1991 + }
1992 +
1993 +- if (!call->have_reply_time &&
1994 +- rxrpc_kernel_get_reply_time(call->net->socket,
1995 +- call->rxcall,
1996 +- &call->reply_time))
1997 +- call->have_reply_time = true;
1998 +-
1999 + ret = call->type->deliver(call);
2000 + state = READ_ONCE(call->state);
2001 + if (ret == 0 && call->unmarshalling_error)
2002 +diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
2003 +index 3b19b009452a2..fa85b359f325b 100644
2004 +--- a/fs/afs/yfsclient.c
2005 ++++ b/fs/afs/yfsclient.c
2006 +@@ -241,8 +241,7 @@ static void xdr_decode_YFSCallBack(const __be32 **_bp,
2007 + struct afs_callback *cb = &scb->callback;
2008 + ktime_t cb_expiry;
2009 +
2010 +- cb_expiry = call->reply_time;
2011 +- cb_expiry = ktime_add(cb_expiry, xdr_to_u64(x->expiration_time) * 100);
2012 ++ cb_expiry = ktime_add(call->issue_time, xdr_to_u64(x->expiration_time) * 100);
2013 + cb->expires_at = ktime_divns(cb_expiry, NSEC_PER_SEC);
2014 + scb->have_cb = true;
2015 + *_bp += xdr_size(x);
2016 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
2017 +index c7706a769de12..548de841cee53 100644
2018 +--- a/fs/btrfs/volumes.c
2019 ++++ b/fs/btrfs/volumes.c
2020 +@@ -713,15 +713,47 @@ static void pending_bios_fn(struct btrfs_work *work)
2021 + run_scheduled_bios(device);
2022 + }
2023 +
2024 +-static bool device_path_matched(const char *path, struct btrfs_device *device)
2025 ++/*
2026 ++ * Check if the device in the path matches the device in the given struct device.
2027 ++ *
2028 ++ * Returns:
2029 ++ * true If it is the same device.
2030 ++ * false If it is not the same device or on error.
2031 ++ */
2032 ++static bool device_matched(const struct btrfs_device *device, const char *path)
2033 + {
2034 +- int found;
2035 ++ char *device_name;
2036 ++ struct block_device *bdev_old;
2037 ++ struct block_device *bdev_new;
2038 ++
2039 ++ /*
2040 ++ * If we are looking for a device with the matching dev_t, then skip
2041 ++ * device without a name (a missing device).
2042 ++ */
2043 ++ if (!device->name)
2044 ++ return false;
2045 ++
2046 ++ device_name = kzalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL);
2047 ++ if (!device_name)
2048 ++ return false;
2049 +
2050 + rcu_read_lock();
2051 +- found = strcmp(rcu_str_deref(device->name), path);
2052 ++ scnprintf(device_name, BTRFS_PATH_NAME_MAX, "%s", rcu_str_deref(device->name));
2053 + rcu_read_unlock();
2054 +
2055 +- return found == 0;
2056 ++ bdev_old = lookup_bdev(device_name);
2057 ++ kfree(device_name);
2058 ++ if (IS_ERR(bdev_old))
2059 ++ return false;
2060 ++
2061 ++ bdev_new = lookup_bdev(path);
2062 ++ if (IS_ERR(bdev_new))
2063 ++ return false;
2064 ++
2065 ++ if (bdev_old == bdev_new)
2066 ++ return true;
2067 ++
2068 ++ return false;
2069 + }
2070 +
2071 + /*
2072 +@@ -754,9 +786,7 @@ static int btrfs_free_stale_devices(const char *path,
2073 + &fs_devices->devices, dev_list) {
2074 + if (skip_device && skip_device == device)
2075 + continue;
2076 +- if (path && !device->name)
2077 +- continue;
2078 +- if (path && !device_path_matched(path, device))
2079 ++ if (path && !device_matched(device, path))
2080 + continue;
2081 + if (fs_devices->opened) {
2082 + /* for an already deleted device return 0 */
2083 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
2084 +index 6ae281cff0d50..6039b0cdfe04e 100644
2085 +--- a/fs/cifs/smb2ops.c
2086 ++++ b/fs/cifs/smb2ops.c
2087 +@@ -3051,7 +3051,7 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
2088 + static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
2089 + loff_t offset, loff_t len)
2090 + {
2091 +- struct inode *inode;
2092 ++ struct inode *inode = file_inode(file);
2093 + struct cifsFileInfo *cfile = file->private_data;
2094 + struct file_zero_data_information fsctl_buf;
2095 + long rc;
2096 +@@ -3060,14 +3060,12 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
2097 +
2098 + xid = get_xid();
2099 +
2100 +- inode = d_inode(cfile->dentry);
2101 +-
2102 ++ inode_lock(inode);
2103 + /* Need to make file sparse, if not already, before freeing range. */
2104 + /* Consider adding equivalent for compressed since it could also work */
2105 + if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
2106 + rc = -EOPNOTSUPP;
2107 +- free_xid(xid);
2108 +- return rc;
2109 ++ goto out;
2110 + }
2111 +
2112 + /*
2113 +@@ -3086,6 +3084,8 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
2114 + true /* is_fctl */, (char *)&fsctl_buf,
2115 + sizeof(struct file_zero_data_information),
2116 + CIFSMaxBufSize, NULL, NULL);
2117 ++out:
2118 ++ inode_unlock(inode);
2119 + free_xid(xid);
2120 + return rc;
2121 + }
2122 +diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
2123 +index e595a29bf46e3..e0f07382ebebc 100644
2124 +--- a/fs/debugfs/inode.c
2125 ++++ b/fs/debugfs/inode.c
2126 +@@ -742,6 +742,28 @@ void debugfs_remove(struct dentry *dentry)
2127 + }
2128 + EXPORT_SYMBOL_GPL(debugfs_remove);
2129 +
2130 ++/**
2131 ++ * debugfs_lookup_and_remove - lookup a directory or file and recursively remove it
2132 ++ * @name: a pointer to a string containing the name of the item to look up.
2133 ++ * @parent: a pointer to the parent dentry of the item.
2134 ++ *
2135 ++ * This is the equlivant of doing something like
2136 ++ * debugfs_remove(debugfs_lookup(..)) but with the proper reference counting
2137 ++ * handled for the directory being looked up.
2138 ++ */
2139 ++void debugfs_lookup_and_remove(const char *name, struct dentry *parent)
2140 ++{
2141 ++ struct dentry *dentry;
2142 ++
2143 ++ dentry = debugfs_lookup(name, parent);
2144 ++ if (!dentry)
2145 ++ return;
2146 ++
2147 ++ debugfs_remove(dentry);
2148 ++ dput(dentry);
2149 ++}
2150 ++EXPORT_SYMBOL_GPL(debugfs_lookup_and_remove);
2151 ++
2152 + /**
2153 + * debugfs_remove_recursive - recursively removes a directory
2154 + * @dentry: a pointer to a the dentry of the directory to be removed. If this
2155 +diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
2156 +index 8fab480a8e4e4..0575ad84cc555 100644
2157 +--- a/include/linux/buffer_head.h
2158 ++++ b/include/linux/buffer_head.h
2159 +@@ -136,6 +136,17 @@ BUFFER_FNS(Defer_Completion, defer_completion)
2160 +
2161 + static __always_inline void set_buffer_uptodate(struct buffer_head *bh)
2162 + {
2163 ++ /*
2164 ++ * If somebody else already set this uptodate, they will
2165 ++ * have done the memory barrier, and a reader will thus
2166 ++ * see *some* valid buffer state.
2167 ++ *
2168 ++ * Any other serialization (with IO errors or whatever that
2169 ++ * might clear the bit) has to come from other state (eg BH_Lock).
2170 ++ */
2171 ++ if (test_bit(BH_Uptodate, &bh->b_state))
2172 ++ return;
2173 ++
2174 + /*
2175 + * make it consistent with folio_mark_uptodate
2176 + * pairs with smp_load_acquire in buffer_uptodate
2177 +diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
2178 +index 798f0b9b43aee..7e4f156acc2f7 100644
2179 +--- a/include/linux/debugfs.h
2180 ++++ b/include/linux/debugfs.h
2181 +@@ -85,6 +85,8 @@ struct dentry *debugfs_create_automount(const char *name,
2182 + void debugfs_remove(struct dentry *dentry);
2183 + void debugfs_remove_recursive(struct dentry *dentry);
2184 +
2185 ++void debugfs_lookup_and_remove(const char *name, struct dentry *parent);
2186 ++
2187 + const struct file_operations *debugfs_real_fops(const struct file *filp);
2188 +
2189 + int debugfs_file_get(struct dentry *dentry);
2190 +@@ -216,6 +218,10 @@ static inline void debugfs_remove(struct dentry *dentry)
2191 + static inline void debugfs_remove_recursive(struct dentry *dentry)
2192 + { }
2193 +
2194 ++static inline void debugfs_lookup_and_remove(const char *name,
2195 ++ struct dentry *parent)
2196 ++{ }
2197 ++
2198 + const struct file_operations *debugfs_real_fops(const struct file *filp);
2199 +
2200 + static inline int debugfs_file_get(struct dentry *dentry)
2201 +diff --git a/include/linux/platform_data/x86/pmc_atom.h b/include/linux/platform_data/x86/pmc_atom.h
2202 +index 022bcea9edec5..99a9b09dc839d 100644
2203 +--- a/include/linux/platform_data/x86/pmc_atom.h
2204 ++++ b/include/linux/platform_data/x86/pmc_atom.h
2205 +@@ -7,6 +7,8 @@
2206 + #ifndef PMC_ATOM_H
2207 + #define PMC_ATOM_H
2208 +
2209 ++#include <linux/bits.h>
2210 ++
2211 + /* ValleyView Power Control Unit PCI Device ID */
2212 + #define PCI_DEVICE_ID_VLV_PMC 0x0F1C
2213 + /* CherryTrail Power Control Unit PCI Device ID */
2214 +@@ -139,9 +141,9 @@
2215 + #define ACPI_MMIO_REG_LEN 0x100
2216 +
2217 + #define PM1_CNT 0x4
2218 +-#define SLEEP_TYPE_MASK 0xFFFFECFF
2219 ++#define SLEEP_TYPE_MASK GENMASK(12, 10)
2220 + #define SLEEP_TYPE_S5 0x1C00
2221 +-#define SLEEP_ENABLE 0x2000
2222 ++#define SLEEP_ENABLE BIT(13)
2223 +
2224 + extern int pmc_atom_read(int offset, u32 *value);
2225 + extern int pmc_atom_write(int offset, u32 value);
2226 +diff --git a/include/linux/usb.h b/include/linux/usb.h
2227 +index e656e7b4b1e44..703c7464d8957 100644
2228 +--- a/include/linux/usb.h
2229 ++++ b/include/linux/usb.h
2230 +@@ -580,6 +580,7 @@ struct usb3_lpm_parameters {
2231 + * @devaddr: device address, XHCI: assigned by HW, others: same as devnum
2232 + * @can_submit: URBs may be submitted
2233 + * @persist_enabled: USB_PERSIST enabled for this device
2234 ++ * @reset_in_progress: the device is being reset
2235 + * @have_langid: whether string_langid is valid
2236 + * @authorized: policy has said we can use it;
2237 + * (user space) policy determines if we authorize this device to be
2238 +@@ -665,6 +666,7 @@ struct usb_device {
2239 +
2240 + unsigned can_submit:1;
2241 + unsigned persist_enabled:1;
2242 ++ unsigned reset_in_progress:1;
2243 + unsigned have_langid:1;
2244 + unsigned authorized:1;
2245 + unsigned authenticated:1;
2246 +diff --git a/include/linux/usb/typec_dp.h b/include/linux/usb/typec_dp.h
2247 +index fc4c7edb2e8a4..296909ea04f26 100644
2248 +--- a/include/linux/usb/typec_dp.h
2249 ++++ b/include/linux/usb/typec_dp.h
2250 +@@ -73,6 +73,11 @@ enum {
2251 + #define DP_CAP_USB BIT(7)
2252 + #define DP_CAP_DFP_D_PIN_ASSIGN(_cap_) (((_cap_) & GENMASK(15, 8)) >> 8)
2253 + #define DP_CAP_UFP_D_PIN_ASSIGN(_cap_) (((_cap_) & GENMASK(23, 16)) >> 16)
2254 ++/* Get pin assignment taking plug & receptacle into consideration */
2255 ++#define DP_CAP_PIN_ASSIGN_UFP_D(_cap_) ((_cap_ & DP_CAP_RECEPTACLE) ? \
2256 ++ DP_CAP_UFP_D_PIN_ASSIGN(_cap_) : DP_CAP_DFP_D_PIN_ASSIGN(_cap_))
2257 ++#define DP_CAP_PIN_ASSIGN_DFP_D(_cap_) ((_cap_ & DP_CAP_RECEPTACLE) ? \
2258 ++ DP_CAP_DFP_D_PIN_ASSIGN(_cap_) : DP_CAP_UFP_D_PIN_ASSIGN(_cap_))
2259 +
2260 + /* DisplayPort Status Update VDO bits */
2261 + #define DP_STATUS_CONNECTION(_status_) ((_status_) & 3)
2262 +diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h
2263 +index 236f290224aae..8dfb2526b3aa2 100644
2264 +--- a/kernel/cgroup/cgroup-internal.h
2265 ++++ b/kernel/cgroup/cgroup-internal.h
2266 +@@ -250,9 +250,10 @@ int cgroup_migrate(struct task_struct *leader, bool threadgroup,
2267 +
2268 + int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
2269 + bool threadgroup);
2270 +-struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup)
2271 ++struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
2272 ++ bool *locked)
2273 + __acquires(&cgroup_threadgroup_rwsem);
2274 +-void cgroup_procs_write_finish(struct task_struct *task)
2275 ++void cgroup_procs_write_finish(struct task_struct *task, bool locked)
2276 + __releases(&cgroup_threadgroup_rwsem);
2277 +
2278 + void cgroup_lock_and_drain_offline(struct cgroup *cgrp);
2279 +diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
2280 +index 117d70098cd49..aa7577b189e92 100644
2281 +--- a/kernel/cgroup/cgroup-v1.c
2282 ++++ b/kernel/cgroup/cgroup-v1.c
2283 +@@ -498,12 +498,13 @@ static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of,
2284 + struct task_struct *task;
2285 + const struct cred *cred, *tcred;
2286 + ssize_t ret;
2287 ++ bool locked;
2288 +
2289 + cgrp = cgroup_kn_lock_live(of->kn, false);
2290 + if (!cgrp)
2291 + return -ENODEV;
2292 +
2293 +- task = cgroup_procs_write_start(buf, threadgroup);
2294 ++ task = cgroup_procs_write_start(buf, threadgroup, &locked);
2295 + ret = PTR_ERR_OR_ZERO(task);
2296 + if (ret)
2297 + goto out_unlock;
2298 +@@ -526,7 +527,7 @@ static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of,
2299 + ret = cgroup_attach_task(cgrp, task, threadgroup);
2300 +
2301 + out_finish:
2302 +- cgroup_procs_write_finish(task);
2303 ++ cgroup_procs_write_finish(task, locked);
2304 + out_unlock:
2305 + cgroup_kn_unlock(of->kn);
2306 +
2307 +diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
2308 +index 23f0db2900e4b..d14575c0e4640 100644
2309 +--- a/kernel/cgroup/cgroup.c
2310 ++++ b/kernel/cgroup/cgroup.c
2311 +@@ -30,6 +30,7 @@
2312 +
2313 + #include "cgroup-internal.h"
2314 +
2315 ++#include <linux/cpu.h>
2316 + #include <linux/cred.h>
2317 + #include <linux/errno.h>
2318 + #include <linux/init_task.h>
2319 +@@ -2376,6 +2377,47 @@ int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
2320 + }
2321 + EXPORT_SYMBOL_GPL(task_cgroup_path);
2322 +
2323 ++/**
2324 ++ * cgroup_attach_lock - Lock for ->attach()
2325 ++ * @lock_threadgroup: whether to down_write cgroup_threadgroup_rwsem
2326 ++ *
2327 ++ * cgroup migration sometimes needs to stabilize threadgroups against forks and
2328 ++ * exits by write-locking cgroup_threadgroup_rwsem. However, some ->attach()
2329 ++ * implementations (e.g. cpuset), also need to disable CPU hotplug.
2330 ++ * Unfortunately, letting ->attach() operations acquire cpus_read_lock() can
2331 ++ * lead to deadlocks.
2332 ++ *
2333 ++ * Bringing up a CPU may involve creating and destroying tasks which requires
2334 ++ * read-locking threadgroup_rwsem, so threadgroup_rwsem nests inside
2335 ++ * cpus_read_lock(). If we call an ->attach() which acquires the cpus lock while
2336 ++ * write-locking threadgroup_rwsem, the locking order is reversed and we end up
2337 ++ * waiting for an on-going CPU hotplug operation which in turn is waiting for
2338 ++ * the threadgroup_rwsem to be released to create new tasks. For more details:
2339 ++ *
2340 ++ * http://lkml.kernel.org/r/20220711174629.uehfmqegcwn2lqzu@wubuntu
2341 ++ *
2342 ++ * Resolve the situation by always acquiring cpus_read_lock() before optionally
2343 ++ * write-locking cgroup_threadgroup_rwsem. This allows ->attach() to assume that
2344 ++ * CPU hotplug is disabled on entry.
2345 ++ */
2346 ++static void cgroup_attach_lock(bool lock_threadgroup)
2347 ++{
2348 ++ cpus_read_lock();
2349 ++ if (lock_threadgroup)
2350 ++ percpu_down_write(&cgroup_threadgroup_rwsem);
2351 ++}
2352 ++
2353 ++/**
2354 ++ * cgroup_attach_unlock - Undo cgroup_attach_lock()
2355 ++ * @lock_threadgroup: whether to up_write cgroup_threadgroup_rwsem
2356 ++ */
2357 ++static void cgroup_attach_unlock(bool lock_threadgroup)
2358 ++{
2359 ++ if (lock_threadgroup)
2360 ++ percpu_up_write(&cgroup_threadgroup_rwsem);
2361 ++ cpus_read_unlock();
2362 ++}
2363 ++
2364 + /**
2365 + * cgroup_migrate_add_task - add a migration target task to a migration context
2366 + * @task: target task
2367 +@@ -2856,8 +2898,8 @@ int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
2368 + return ret;
2369 + }
2370 +
2371 +-struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup)
2372 +- __acquires(&cgroup_threadgroup_rwsem)
2373 ++struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
2374 ++ bool *threadgroup_locked)
2375 + {
2376 + struct task_struct *tsk;
2377 + pid_t pid;
2378 +@@ -2865,7 +2907,17 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup)
2379 + if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
2380 + return ERR_PTR(-EINVAL);
2381 +
2382 +- percpu_down_write(&cgroup_threadgroup_rwsem);
2383 ++ /*
2384 ++ * If we migrate a single thread, we don't care about threadgroup
2385 ++ * stability. If the thread is `current`, it won't exit(2) under our
2386 ++ * hands or change PID through exec(2). We exclude
2387 ++ * cgroup_update_dfl_csses and other cgroup_{proc,thread}s_write
2388 ++ * callers by cgroup_mutex.
2389 ++ * Therefore, we can skip the global lock.
2390 ++ */
2391 ++ lockdep_assert_held(&cgroup_mutex);
2392 ++ *threadgroup_locked = pid || threadgroup;
2393 ++ cgroup_attach_lock(*threadgroup_locked);
2394 +
2395 + rcu_read_lock();
2396 + if (pid) {
2397 +@@ -2896,14 +2948,14 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup)
2398 + goto out_unlock_rcu;
2399 +
2400 + out_unlock_threadgroup:
2401 +- percpu_up_write(&cgroup_threadgroup_rwsem);
2402 ++ cgroup_attach_unlock(*threadgroup_locked);
2403 ++ *threadgroup_locked = false;
2404 + out_unlock_rcu:
2405 + rcu_read_unlock();
2406 + return tsk;
2407 + }
2408 +
2409 +-void cgroup_procs_write_finish(struct task_struct *task)
2410 +- __releases(&cgroup_threadgroup_rwsem)
2411 ++void cgroup_procs_write_finish(struct task_struct *task, bool threadgroup_locked)
2412 + {
2413 + struct cgroup_subsys *ss;
2414 + int ssid;
2415 +@@ -2911,7 +2963,8 @@ void cgroup_procs_write_finish(struct task_struct *task)
2416 + /* release reference from cgroup_procs_write_start() */
2417 + put_task_struct(task);
2418 +
2419 +- percpu_up_write(&cgroup_threadgroup_rwsem);
2420 ++ cgroup_attach_unlock(threadgroup_locked);
2421 ++
2422 + for_each_subsys(ss, ssid)
2423 + if (ss->post_attach)
2424 + ss->post_attach();
2425 +@@ -2966,12 +3019,11 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
2426 + struct cgroup_subsys_state *d_css;
2427 + struct cgroup *dsct;
2428 + struct css_set *src_cset;
2429 ++ bool has_tasks;
2430 + int ret;
2431 +
2432 + lockdep_assert_held(&cgroup_mutex);
2433 +
2434 +- percpu_down_write(&cgroup_threadgroup_rwsem);
2435 +-
2436 + /* look up all csses currently attached to @cgrp's subtree */
2437 + spin_lock_irq(&css_set_lock);
2438 + cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
2439 +@@ -2982,6 +3034,15 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
2440 + }
2441 + spin_unlock_irq(&css_set_lock);
2442 +
2443 ++ /*
2444 ++ * We need to write-lock threadgroup_rwsem while migrating tasks.
2445 ++ * However, if there are no source csets for @cgrp, changing its
2446 ++ * controllers isn't gonna produce any task migrations and the
2447 ++ * write-locking can be skipped safely.
2448 ++ */
2449 ++ has_tasks = !list_empty(&mgctx.preloaded_src_csets);
2450 ++ cgroup_attach_lock(has_tasks);
2451 ++
2452 + /* NULL dst indicates self on default hierarchy */
2453 + ret = cgroup_migrate_prepare_dst(&mgctx);
2454 + if (ret)
2455 +@@ -3001,7 +3062,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
2456 + ret = cgroup_migrate_execute(&mgctx);
2457 + out_finish:
2458 + cgroup_migrate_finish(&mgctx);
2459 +- percpu_up_write(&cgroup_threadgroup_rwsem);
2460 ++ cgroup_attach_unlock(has_tasks);
2461 + return ret;
2462 + }
2463 +
2464 +@@ -4830,12 +4891,13 @@ static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
2465 + struct task_struct *task;
2466 + const struct cred *saved_cred;
2467 + ssize_t ret;
2468 ++ bool threadgroup_locked;
2469 +
2470 + dst_cgrp = cgroup_kn_lock_live(of->kn, false);
2471 + if (!dst_cgrp)
2472 + return -ENODEV;
2473 +
2474 +- task = cgroup_procs_write_start(buf, true);
2475 ++ task = cgroup_procs_write_start(buf, true, &threadgroup_locked);
2476 + ret = PTR_ERR_OR_ZERO(task);
2477 + if (ret)
2478 + goto out_unlock;
2479 +@@ -4861,7 +4923,7 @@ static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
2480 + ret = cgroup_attach_task(dst_cgrp, task, true);
2481 +
2482 + out_finish:
2483 +- cgroup_procs_write_finish(task);
2484 ++ cgroup_procs_write_finish(task, threadgroup_locked);
2485 + out_unlock:
2486 + cgroup_kn_unlock(of->kn);
2487 +
2488 +@@ -4881,6 +4943,7 @@ static ssize_t cgroup_threads_write(struct kernfs_open_file *of,
2489 + struct task_struct *task;
2490 + const struct cred *saved_cred;
2491 + ssize_t ret;
2492 ++ bool locked;
2493 +
2494 + buf = strstrip(buf);
2495 +
2496 +@@ -4888,7 +4951,7 @@ static ssize_t cgroup_threads_write(struct kernfs_open_file *of,
2497 + if (!dst_cgrp)
2498 + return -ENODEV;
2499 +
2500 +- task = cgroup_procs_write_start(buf, false);
2501 ++ task = cgroup_procs_write_start(buf, false, &locked);
2502 + ret = PTR_ERR_OR_ZERO(task);
2503 + if (ret)
2504 + goto out_unlock;
2505 +@@ -4919,7 +4982,7 @@ static ssize_t cgroup_threads_write(struct kernfs_open_file *of,
2506 + ret = cgroup_attach_task(dst_cgrp, task, false);
2507 +
2508 + out_finish:
2509 +- cgroup_procs_write_finish(task);
2510 ++ cgroup_procs_write_finish(task, locked);
2511 + out_unlock:
2512 + cgroup_kn_unlock(of->kn);
2513 +
2514 +diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
2515 +index b02eca235ba3f..9ba94a9a67aa4 100644
2516 +--- a/kernel/cgroup/cpuset.c
2517 ++++ b/kernel/cgroup/cpuset.c
2518 +@@ -2204,7 +2204,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
2519 + cgroup_taskset_first(tset, &css);
2520 + cs = css_cs(css);
2521 +
2522 +- cpus_read_lock();
2523 ++ lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */
2524 + percpu_down_write(&cpuset_rwsem);
2525 +
2526 + /* prepare for attach */
2527 +@@ -2260,7 +2260,6 @@ static void cpuset_attach(struct cgroup_taskset *tset)
2528 + wake_up(&cpuset_attach_wq);
2529 +
2530 + percpu_up_write(&cpuset_rwsem);
2531 +- cpus_read_unlock();
2532 + }
2533 +
2534 + /* The various types of files and directories in a cpuset file system */
2535 +diff --git a/kernel/kprobes.c b/kernel/kprobes.c
2536 +index 9631ecc8a34c9..6e9f5a10e04ae 100644
2537 +--- a/kernel/kprobes.c
2538 ++++ b/kernel/kprobes.c
2539 +@@ -1596,6 +1596,7 @@ static int check_kprobe_address_safe(struct kprobe *p,
2540 + /* Ensure it is not in reserved area nor out of text */
2541 + if (!(core_kernel_text((unsigned long) p->addr) ||
2542 + is_module_text_address((unsigned long) p->addr)) ||
2543 ++ in_gate_area_no_mm((unsigned long) p->addr) ||
2544 + within_kprobe_blacklist((unsigned long) p->addr) ||
2545 + jump_label_text_reserved(p->addr, p->addr) ||
2546 + find_bug((unsigned long)p->addr)) {
2547 +diff --git a/mm/kmemleak.c b/mm/kmemleak.c
2548 +index 3761c79137b17..d8cde7292bf92 100644
2549 +--- a/mm/kmemleak.c
2550 ++++ b/mm/kmemleak.c
2551 +@@ -1123,7 +1123,7 @@ EXPORT_SYMBOL(kmemleak_no_scan);
2552 + void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
2553 + gfp_t gfp)
2554 + {
2555 +- if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
2556 ++ if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
2557 + kmemleak_alloc(__va(phys), size, min_count, gfp);
2558 + }
2559 + EXPORT_SYMBOL(kmemleak_alloc_phys);
2560 +@@ -1137,7 +1137,7 @@ EXPORT_SYMBOL(kmemleak_alloc_phys);
2561 + */
2562 + void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
2563 + {
2564 +- if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
2565 ++ if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
2566 + kmemleak_free_part(__va(phys), size);
2567 + }
2568 + EXPORT_SYMBOL(kmemleak_free_part_phys);
2569 +@@ -1149,7 +1149,7 @@ EXPORT_SYMBOL(kmemleak_free_part_phys);
2570 + */
2571 + void __ref kmemleak_not_leak_phys(phys_addr_t phys)
2572 + {
2573 +- if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
2574 ++ if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
2575 + kmemleak_not_leak(__va(phys));
2576 + }
2577 + EXPORT_SYMBOL(kmemleak_not_leak_phys);
2578 +@@ -1161,7 +1161,7 @@ EXPORT_SYMBOL(kmemleak_not_leak_phys);
2579 + */
2580 + void __ref kmemleak_ignore_phys(phys_addr_t phys)
2581 + {
2582 +- if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
2583 ++ if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
2584 + kmemleak_ignore(__va(phys));
2585 + }
2586 + EXPORT_SYMBOL(kmemleak_ignore_phys);
2587 +diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
2588 +index 19726d81025d5..01e33724d10c3 100644
2589 +--- a/net/bridge/br_netfilter_hooks.c
2590 ++++ b/net/bridge/br_netfilter_hooks.c
2591 +@@ -384,6 +384,7 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_
2592 + /* - Bridged-and-DNAT'ed traffic doesn't
2593 + * require ip_forwarding. */
2594 + if (rt->dst.dev == dev) {
2595 ++ skb_dst_drop(skb);
2596 + skb_dst_set(skb, &rt->dst);
2597 + goto bridged_dnat;
2598 + }
2599 +@@ -413,6 +414,7 @@ bridged_dnat:
2600 + kfree_skb(skb);
2601 + return 0;
2602 + }
2603 ++ skb_dst_drop(skb);
2604 + skb_dst_set_noref(skb, &rt->dst);
2605 + }
2606 +
2607 +diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
2608 +index e4e0c836c3f51..6b07f30675bb0 100644
2609 +--- a/net/bridge/br_netfilter_ipv6.c
2610 ++++ b/net/bridge/br_netfilter_ipv6.c
2611 +@@ -197,6 +197,7 @@ static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struc
2612 + kfree_skb(skb);
2613 + return 0;
2614 + }
2615 ++ skb_dst_drop(skb);
2616 + skb_dst_set_noref(skb, &rt->dst);
2617 + }
2618 +
2619 +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
2620 +index ef3e7a3e3a29e..d38c8ca93ba09 100644
2621 +--- a/net/ipv4/fib_frontend.c
2622 ++++ b/net/ipv4/fib_frontend.c
2623 +@@ -399,7 +399,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
2624 + dev_match = dev_match || (res.type == RTN_LOCAL &&
2625 + dev == net->loopback_dev);
2626 + if (dev_match) {
2627 +- ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_HOST;
2628 ++ ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_LINK;
2629 + return ret;
2630 + }
2631 + if (no_addr)
2632 +@@ -411,7 +411,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
2633 + ret = 0;
2634 + if (fib_lookup(net, &fl4, &res, FIB_LOOKUP_IGNORE_LINKSTATE) == 0) {
2635 + if (res.type == RTN_UNICAST)
2636 +- ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_HOST;
2637 ++ ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_LINK;
2638 + }
2639 + return ret;
2640 +
2641 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
2642 +index f4e00ff909da3..0ebba83dbe220 100644
2643 +--- a/net/ipv4/tcp_input.c
2644 ++++ b/net/ipv4/tcp_input.c
2645 +@@ -2384,6 +2384,21 @@ static inline bool tcp_may_undo(const struct tcp_sock *tp)
2646 + return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp));
2647 + }
2648 +
2649 ++static bool tcp_is_non_sack_preventing_reopen(struct sock *sk)
2650 ++{
2651 ++ struct tcp_sock *tp = tcp_sk(sk);
2652 ++
2653 ++ if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
2654 ++ /* Hold old state until something *above* high_seq
2655 ++ * is ACKed. For Reno it is MUST to prevent false
2656 ++ * fast retransmits (RFC2582). SACK TCP is safe. */
2657 ++ if (!tcp_any_retrans_done(sk))
2658 ++ tp->retrans_stamp = 0;
2659 ++ return true;
2660 ++ }
2661 ++ return false;
2662 ++}
2663 ++
2664 + /* People celebrate: "We love our President!" */
2665 + static bool tcp_try_undo_recovery(struct sock *sk)
2666 + {
2667 +@@ -2406,14 +2421,8 @@ static bool tcp_try_undo_recovery(struct sock *sk)
2668 + } else if (tp->rack.reo_wnd_persist) {
2669 + tp->rack.reo_wnd_persist--;
2670 + }
2671 +- if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
2672 +- /* Hold old state until something *above* high_seq
2673 +- * is ACKed. For Reno it is MUST to prevent false
2674 +- * fast retransmits (RFC2582). SACK TCP is safe. */
2675 +- if (!tcp_any_retrans_done(sk))
2676 +- tp->retrans_stamp = 0;
2677 ++ if (tcp_is_non_sack_preventing_reopen(sk))
2678 + return true;
2679 +- }
2680 + tcp_set_ca_state(sk, TCP_CA_Open);
2681 + tp->is_sack_reneg = 0;
2682 + return false;
2683 +@@ -2449,6 +2458,8 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
2684 + NET_INC_STATS(sock_net(sk),
2685 + LINUX_MIB_TCPSPURIOUSRTOS);
2686 + inet_csk(sk)->icsk_retransmits = 0;
2687 ++ if (tcp_is_non_sack_preventing_reopen(sk))
2688 ++ return true;
2689 + if (frto_undo || tcp_is_sack(tp)) {
2690 + tcp_set_ca_state(sk, TCP_CA_Open);
2691 + tp->is_sack_reneg = 0;
2692 +@@ -3484,11 +3495,11 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
2693 +
2694 + /* Then check host-wide RFC 5961 rate limit. */
2695 + now = jiffies / HZ;
2696 +- if (now != challenge_timestamp) {
2697 ++ if (now != READ_ONCE(challenge_timestamp)) {
2698 + u32 ack_limit = READ_ONCE(net->ipv4.sysctl_tcp_challenge_ack_limit);
2699 + u32 half = (ack_limit + 1) >> 1;
2700 +
2701 +- challenge_timestamp = now;
2702 ++ WRITE_ONCE(challenge_timestamp, now);
2703 + WRITE_ONCE(challenge_count, half + prandom_u32_max(ack_limit));
2704 + }
2705 + count = READ_ONCE(challenge_count);
2706 +diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
2707 +index 75421a472d25a..f5c448c276fef 100644
2708 +--- a/net/ipv6/seg6.c
2709 ++++ b/net/ipv6/seg6.c
2710 +@@ -125,6 +125,11 @@ static int seg6_genl_sethmac(struct sk_buff *skb, struct genl_info *info)
2711 + goto out_unlock;
2712 + }
2713 +
2714 ++ if (slen > nla_len(info->attrs[SEG6_ATTR_SECRET])) {
2715 ++ err = -EINVAL;
2716 ++ goto out_unlock;
2717 ++ }
2718 ++
2719 + if (hinfo) {
2720 + err = seg6_hmac_info_del(net, hmackeyid);
2721 + if (err)
2722 +diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
2723 +index ea9e73428ed9c..659a589b1fad1 100644
2724 +--- a/net/kcm/kcmsock.c
2725 ++++ b/net/kcm/kcmsock.c
2726 +@@ -1413,12 +1413,6 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
2727 + psock->sk = csk;
2728 + psock->bpf_prog = prog;
2729 +
2730 +- err = strp_init(&psock->strp, csk, &cb);
2731 +- if (err) {
2732 +- kmem_cache_free(kcm_psockp, psock);
2733 +- goto out;
2734 +- }
2735 +-
2736 + write_lock_bh(&csk->sk_callback_lock);
2737 +
2738 + /* Check if sk_user_data is aready by KCM or someone else.
2739 +@@ -1426,13 +1420,18 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
2740 + */
2741 + if (csk->sk_user_data) {
2742 + write_unlock_bh(&csk->sk_callback_lock);
2743 +- strp_stop(&psock->strp);
2744 +- strp_done(&psock->strp);
2745 + kmem_cache_free(kcm_psockp, psock);
2746 + err = -EALREADY;
2747 + goto out;
2748 + }
2749 +
2750 ++ err = strp_init(&psock->strp, csk, &cb);
2751 ++ if (err) {
2752 ++ write_unlock_bh(&csk->sk_callback_lock);
2753 ++ kmem_cache_free(kcm_psockp, psock);
2754 ++ goto out;
2755 ++ }
2756 ++
2757 + psock->save_data_ready = csk->sk_data_ready;
2758 + psock->save_write_space = csk->sk_write_space;
2759 + psock->save_state_change = csk->sk_state_change;
2760 +diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
2761 +index 0e26c83b6b412..d5b8568591d4c 100644
2762 +--- a/net/mac80211/ibss.c
2763 ++++ b/net/mac80211/ibss.c
2764 +@@ -542,6 +542,10 @@ int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata)
2765 +
2766 + sdata_assert_lock(sdata);
2767 +
2768 ++ /* When not connected/joined, sending CSA doesn't make sense. */
2769 ++ if (ifibss->state != IEEE80211_IBSS_MLME_JOINED)
2770 ++ return -ENOLINK;
2771 ++
2772 + /* update cfg80211 bss information with the new channel */
2773 + if (!is_zero_ether_addr(ifibss->bssid)) {
2774 + cbss = cfg80211_get_bss(sdata->local->hw.wiphy,
2775 +diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
2776 +index b8ce84618a55b..c439125ef2b91 100644
2777 +--- a/net/mac802154/rx.c
2778 ++++ b/net/mac802154/rx.c
2779 +@@ -44,7 +44,7 @@ ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata,
2780 +
2781 + switch (mac_cb(skb)->dest.mode) {
2782 + case IEEE802154_ADDR_NONE:
2783 +- if (mac_cb(skb)->dest.mode != IEEE802154_ADDR_NONE)
2784 ++ if (hdr->source.mode != IEEE802154_ADDR_NONE)
2785 + /* FIXME: check if we are PAN coordinator */
2786 + skb->pkt_type = PACKET_OTHERHOST;
2787 + else
2788 +diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
2789 +index e40988a2f22fb..26245419ef4a9 100644
2790 +--- a/net/netfilter/nf_conntrack_irc.c
2791 ++++ b/net/netfilter/nf_conntrack_irc.c
2792 +@@ -185,8 +185,9 @@ static int help(struct sk_buff *skb, unsigned int protoff,
2793 +
2794 + /* dcc_ip can be the internal OR external (NAT'ed) IP */
2795 + tuple = &ct->tuplehash[dir].tuple;
2796 +- if (tuple->src.u3.ip != dcc_ip &&
2797 +- tuple->dst.u3.ip != dcc_ip) {
2798 ++ if ((tuple->src.u3.ip != dcc_ip &&
2799 ++ ct->tuplehash[!dir].tuple.dst.u3.ip != dcc_ip) ||
2800 ++ dcc_port == 0) {
2801 + net_warn_ratelimited("Forged DCC command from %pI4: %pI4:%u\n",
2802 + &tuple->src.u3.ip,
2803 + &dcc_ip, dcc_port);
2804 +diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
2805 +index 52a24d4ef5d8a..2ba61971f6231 100644
2806 +--- a/net/rxrpc/rxkad.c
2807 ++++ b/net/rxrpc/rxkad.c
2808 +@@ -451,7 +451,7 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
2809 + * directly into the target buffer.
2810 + */
2811 + sg = _sg;
2812 +- nsg = skb_shinfo(skb)->nr_frags;
2813 ++ nsg = skb_shinfo(skb)->nr_frags + 1;
2814 + if (nsg <= 4) {
2815 + nsg = 4;
2816 + } else {
2817 +diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
2818 +index 4074c50ac3d73..3aa6b4dcb1c8e 100644
2819 +--- a/net/sched/sch_sfb.c
2820 ++++ b/net/sched/sch_sfb.c
2821 +@@ -135,15 +135,15 @@ static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q)
2822 + }
2823 + }
2824 +
2825 +-static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
2826 ++static void increment_qlen(const struct sfb_skb_cb *cb, struct sfb_sched_data *q)
2827 + {
2828 + u32 sfbhash;
2829 +
2830 +- sfbhash = sfb_hash(skb, 0);
2831 ++ sfbhash = cb->hashes[0];
2832 + if (sfbhash)
2833 + increment_one_qlen(sfbhash, 0, q);
2834 +
2835 +- sfbhash = sfb_hash(skb, 1);
2836 ++ sfbhash = cb->hashes[1];
2837 + if (sfbhash)
2838 + increment_one_qlen(sfbhash, 1, q);
2839 + }
2840 +@@ -281,8 +281,10 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
2841 + {
2842 +
2843 + struct sfb_sched_data *q = qdisc_priv(sch);
2844 ++ unsigned int len = qdisc_pkt_len(skb);
2845 + struct Qdisc *child = q->qdisc;
2846 + struct tcf_proto *fl;
2847 ++ struct sfb_skb_cb cb;
2848 + int i;
2849 + u32 p_min = ~0;
2850 + u32 minqlen = ~0;
2851 +@@ -399,11 +401,12 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
2852 + }
2853 +
2854 + enqueue:
2855 ++ memcpy(&cb, sfb_skb_cb(skb), sizeof(cb));
2856 + ret = qdisc_enqueue(skb, child, to_free);
2857 + if (likely(ret == NET_XMIT_SUCCESS)) {
2858 +- qdisc_qstats_backlog_inc(sch, skb);
2859 ++ sch->qstats.backlog += len;
2860 + sch->q.qlen++;
2861 +- increment_qlen(skb, q);
2862 ++ increment_qlen(&cb, q);
2863 + } else if (net_xmit_drop_count(ret)) {
2864 + q->stats.childdrop++;
2865 + qdisc_qstats_drop(sch);
2866 +diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
2867 +index 5f72f3f916a5a..a7f60bb2dd513 100644
2868 +--- a/net/sched/sch_tbf.c
2869 ++++ b/net/sched/sch_tbf.c
2870 +@@ -297,6 +297,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
2871 + struct nlattr *tb[TCA_TBF_MAX + 1];
2872 + struct tc_tbf_qopt *qopt;
2873 + struct Qdisc *child = NULL;
2874 ++ struct Qdisc *old = NULL;
2875 + struct psched_ratecfg rate;
2876 + struct psched_ratecfg peak;
2877 + u64 max_size;
2878 +@@ -388,7 +389,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
2879 + sch_tree_lock(sch);
2880 + if (child) {
2881 + qdisc_tree_flush_backlog(q->qdisc);
2882 +- qdisc_put(q->qdisc);
2883 ++ old = q->qdisc;
2884 + q->qdisc = child;
2885 + }
2886 + q->limit = qopt->limit;
2887 +@@ -408,6 +409,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
2888 + memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
2889 +
2890 + sch_tree_unlock(sch);
2891 ++ qdisc_put(old);
2892 + err = 0;
2893 + done:
2894 + return err;
2895 +diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
2896 +index 394491692a078..5d696b7fb47e1 100644
2897 +--- a/net/smc/af_smc.c
2898 ++++ b/net/smc/af_smc.c
2899 +@@ -1093,7 +1093,6 @@ static void smc_listen_out_connected(struct smc_sock *new_smc)
2900 + {
2901 + struct sock *newsmcsk = &new_smc->sk;
2902 +
2903 +- sk_refcnt_debug_inc(newsmcsk);
2904 + if (newsmcsk->sk_state == SMC_INIT)
2905 + newsmcsk->sk_state = SMC_ACTIVE;
2906 +
2907 +diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
2908 +index e7155a7743001..0b9ad3b5ff18a 100644
2909 +--- a/net/tipc/monitor.c
2910 ++++ b/net/tipc/monitor.c
2911 +@@ -130,7 +130,7 @@ static void map_set(u64 *up_map, int i, unsigned int v)
2912 +
2913 + static int map_get(u64 up_map, int i)
2914 + {
2915 +- return (up_map & (1 << i)) >> i;
2916 ++ return (up_map & (1ULL << i)) >> i;
2917 + }
2918 +
2919 + static struct tipc_peer *peer_prev(struct tipc_peer *peer)
2920 +diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c
2921 +index 76b845f68ac89..d80b06d669593 100644
2922 +--- a/net/wireless/debugfs.c
2923 ++++ b/net/wireless/debugfs.c
2924 +@@ -65,9 +65,10 @@ static ssize_t ht40allow_map_read(struct file *file,
2925 + {
2926 + struct wiphy *wiphy = file->private_data;
2927 + char *buf;
2928 +- unsigned int offset = 0, buf_size = PAGE_SIZE, i, r;
2929 ++ unsigned int offset = 0, buf_size = PAGE_SIZE, i;
2930 + enum nl80211_band band;
2931 + struct ieee80211_supported_band *sband;
2932 ++ ssize_t r;
2933 +
2934 + buf = kzalloc(buf_size, GFP_KERNEL);
2935 + if (!buf)
2936 +diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c
2937 +index 2ddfe22266517..f73ee0798aeab 100644
2938 +--- a/sound/core/seq/oss/seq_oss_midi.c
2939 ++++ b/sound/core/seq/oss/seq_oss_midi.c
2940 +@@ -267,7 +267,9 @@ snd_seq_oss_midi_clear_all(void)
2941 + void
2942 + snd_seq_oss_midi_setup(struct seq_oss_devinfo *dp)
2943 + {
2944 ++ spin_lock_irq(&register_lock);
2945 + dp->max_mididev = max_midi_devs;
2946 ++ spin_unlock_irq(&register_lock);
2947 + }
2948 +
2949 + /*
2950 +diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
2951 +index cc93157fa9500..0363670a56e7c 100644
2952 +--- a/sound/core/seq/seq_clientmgr.c
2953 ++++ b/sound/core/seq/seq_clientmgr.c
2954 +@@ -121,13 +121,13 @@ struct snd_seq_client *snd_seq_client_use_ptr(int clientid)
2955 + spin_unlock_irqrestore(&clients_lock, flags);
2956 + #ifdef CONFIG_MODULES
2957 + if (!in_interrupt()) {
2958 +- static char client_requested[SNDRV_SEQ_GLOBAL_CLIENTS];
2959 +- static char card_requested[SNDRV_CARDS];
2960 ++ static DECLARE_BITMAP(client_requested, SNDRV_SEQ_GLOBAL_CLIENTS);
2961 ++ static DECLARE_BITMAP(card_requested, SNDRV_CARDS);
2962 ++
2963 + if (clientid < SNDRV_SEQ_GLOBAL_CLIENTS) {
2964 + int idx;
2965 +
2966 +- if (!client_requested[clientid]) {
2967 +- client_requested[clientid] = 1;
2968 ++ if (!test_and_set_bit(clientid, client_requested)) {
2969 + for (idx = 0; idx < 15; idx++) {
2970 + if (seq_client_load[idx] < 0)
2971 + break;
2972 +@@ -142,10 +142,8 @@ struct snd_seq_client *snd_seq_client_use_ptr(int clientid)
2973 + int card = (clientid - SNDRV_SEQ_GLOBAL_CLIENTS) /
2974 + SNDRV_SEQ_CLIENTS_PER_CARD;
2975 + if (card < snd_ecards_limit) {
2976 +- if (! card_requested[card]) {
2977 +- card_requested[card] = 1;
2978 ++ if (!test_and_set_bit(card, card_requested))
2979 + snd_request_card(card);
2980 +- }
2981 + snd_seq_device_load_drivers();
2982 + }
2983 + }
2984 +diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
2985 +index 452b9eaca815b..474347aba50e6 100644
2986 +--- a/sound/drivers/aloop.c
2987 ++++ b/sound/drivers/aloop.c
2988 +@@ -463,17 +463,18 @@ static unsigned int loopback_pos_update(struct loopback_cable *cable)
2989 + cable->streams[SNDRV_PCM_STREAM_PLAYBACK];
2990 + struct loopback_pcm *dpcm_capt =
2991 + cable->streams[SNDRV_PCM_STREAM_CAPTURE];
2992 +- unsigned long delta_play = 0, delta_capt = 0;
2993 ++ unsigned long delta_play = 0, delta_capt = 0, cur_jiffies;
2994 + unsigned int running, count1, count2;
2995 +
2996 ++ cur_jiffies = jiffies;
2997 + running = cable->running ^ cable->pause;
2998 + if (running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) {
2999 +- delta_play = jiffies - dpcm_play->last_jiffies;
3000 ++ delta_play = cur_jiffies - dpcm_play->last_jiffies;
3001 + dpcm_play->last_jiffies += delta_play;
3002 + }
3003 +
3004 + if (running & (1 << SNDRV_PCM_STREAM_CAPTURE)) {
3005 +- delta_capt = jiffies - dpcm_capt->last_jiffies;
3006 ++ delta_capt = cur_jiffies - dpcm_capt->last_jiffies;
3007 + dpcm_capt->last_jiffies += delta_capt;
3008 + }
3009 +
3010 +diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
3011 +index 6530a55fb8780..2cea3d3ee54dc 100644
3012 +--- a/sound/pci/emu10k1/emupcm.c
3013 ++++ b/sound/pci/emu10k1/emupcm.c
3014 +@@ -123,7 +123,7 @@ static int snd_emu10k1_pcm_channel_alloc(struct snd_emu10k1_pcm * epcm, int voic
3015 + epcm->voices[0]->epcm = epcm;
3016 + if (voices > 1) {
3017 + for (i = 1; i < voices; i++) {
3018 +- epcm->voices[i] = &epcm->emu->voices[epcm->voices[0]->number + i];
3019 ++ epcm->voices[i] = &epcm->emu->voices[(epcm->voices[0]->number + i) % NUM_G];
3020 + epcm->voices[i]->epcm = epcm;
3021 + }
3022 + }
3023 +diff --git a/sound/usb/stream.c b/sound/usb/stream.c
3024 +index eff1ac1dc9ba3..d35684e5f07f0 100644
3025 +--- a/sound/usb/stream.c
3026 ++++ b/sound/usb/stream.c
3027 +@@ -1103,7 +1103,7 @@ static int __snd_usb_parse_audio_interface(struct snd_usb_audio *chip,
3028 + * Dallas DS4201 workaround: It presents 5 altsettings, but the last
3029 + * one misses syncpipe, and does not produce any sound.
3030 + */
3031 +- if (chip->usb_id == USB_ID(0x04fa, 0x4201))
3032 ++ if (chip->usb_id == USB_ID(0x04fa, 0x4201) && num >= 4)
3033 + num = 4;
3034 +
3035 + for (i = 0; i < num; i++) {