Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.19 commit in: /
Date: Thu, 08 Sep 2022 10:45:32
Message-Id: 1662633914.036eac410993ee6f8ed2440ba5bf687f2733eda5.mpagano@gentoo
1 commit: 036eac410993ee6f8ed2440ba5bf687f2733eda5
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Sep 8 10:45:14 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Sep 8 10:45:14 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=036eac41
7
8 Linux patch 5.19.8
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1007_linux-5.19.8.patch | 6128 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 6132 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index e6423950..d9225608 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -71,6 +71,10 @@ Patch: 1006_linux-5.19.7.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.19.7
23
24 +Patch: 1007_linux-5.19.8.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.19.8
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1007_linux-5.19.8.patch b/1007_linux-5.19.8.patch
33 new file mode 100644
34 index 00000000..7bcf0c97
35 --- /dev/null
36 +++ b/1007_linux-5.19.8.patch
37 @@ -0,0 +1,6128 @@
38 +diff --git a/Makefile b/Makefile
39 +index 3d88923df694e..e361c6230e9e5 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 19
46 +-SUBLEVEL = 7
47 ++SUBLEVEL = 8
48 + EXTRAVERSION =
49 + NAME = Superb Owl
50 +
51 +diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
52 +index 889951291cc0f..a11a6e14ba89f 100644
53 +--- a/arch/arm64/kernel/machine_kexec_file.c
54 ++++ b/arch/arm64/kernel/machine_kexec_file.c
55 +@@ -47,7 +47,7 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
56 + u64 i;
57 + phys_addr_t start, end;
58 +
59 +- nr_ranges = 1; /* for exclusion of crashkernel region */
60 ++ nr_ranges = 2; /* for exclusion of crashkernel region */
61 + for_each_mem_range(i, &start, &end)
62 + nr_ranges++;
63 +
64 +diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
65 +index 8dddd34b8ecf1..fabc82f5e935d 100644
66 +--- a/arch/powerpc/include/asm/firmware.h
67 ++++ b/arch/powerpc/include/asm/firmware.h
68 +@@ -82,6 +82,8 @@ enum {
69 + FW_FEATURE_POWERNV_ALWAYS = 0,
70 + FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
71 + FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
72 ++ FW_FEATURE_NATIVE_POSSIBLE = 0,
73 ++ FW_FEATURE_NATIVE_ALWAYS = 0,
74 + FW_FEATURE_POSSIBLE =
75 + #ifdef CONFIG_PPC_PSERIES
76 + FW_FEATURE_PSERIES_POSSIBLE |
77 +@@ -91,6 +93,9 @@ enum {
78 + #endif
79 + #ifdef CONFIG_PPC_PS3
80 + FW_FEATURE_PS3_POSSIBLE |
81 ++#endif
82 ++#ifdef CONFIG_PPC_HASH_MMU_NATIVE
83 ++ FW_FEATURE_NATIVE_ALWAYS |
84 + #endif
85 + 0,
86 + FW_FEATURE_ALWAYS =
87 +@@ -102,6 +107,9 @@ enum {
88 + #endif
89 + #ifdef CONFIG_PPC_PS3
90 + FW_FEATURE_PS3_ALWAYS &
91 ++#endif
92 ++#ifdef CONFIG_PPC_HASH_MMU_NATIVE
93 ++ FW_FEATURE_NATIVE_ALWAYS &
94 + #endif
95 + FW_FEATURE_POSSIBLE,
96 +
97 +diff --git a/arch/powerpc/kernel/rtas_entry.S b/arch/powerpc/kernel/rtas_entry.S
98 +index 9a434d42e660a..6ce95ddadbcdb 100644
99 +--- a/arch/powerpc/kernel/rtas_entry.S
100 ++++ b/arch/powerpc/kernel/rtas_entry.S
101 +@@ -109,8 +109,12 @@ __enter_rtas:
102 + * its critical regions (as specified in PAPR+ section 7.2.1). MSR[S]
103 + * is not impacted by RFI_TO_KERNEL (only urfid can unset it). So if
104 + * MSR[S] is set, it will remain when entering RTAS.
105 ++ * If we're in HV mode, RTAS must also run in HV mode, so extract MSR_HV
106 ++ * from the saved MSR value and insert into the value RTAS will use.
107 + */
108 ++ extrdi r0, r6, 1, 63 - MSR_HV_LG
109 + LOAD_REG_IMMEDIATE(r6, MSR_ME | MSR_RI)
110 ++ insrdi r6, r0, 1, 63 - MSR_HV_LG
111 +
112 + li r0,0
113 + mtmsrd r0,1 /* disable RI before using SRR0/1 */
114 +diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S
115 +index cb3358886203e..6c1db3b6de2dc 100644
116 +--- a/arch/powerpc/kernel/systbl.S
117 ++++ b/arch/powerpc/kernel/systbl.S
118 +@@ -18,6 +18,7 @@
119 + .p2align 3
120 + #define __SYSCALL(nr, entry) .8byte entry
121 + #else
122 ++ .p2align 2
123 + #define __SYSCALL(nr, entry) .long entry
124 + #endif
125 +
126 +diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
127 +index 82cae08976bcd..92074a6c49d43 100644
128 +--- a/arch/powerpc/platforms/pseries/papr_scm.c
129 ++++ b/arch/powerpc/platforms/pseries/papr_scm.c
130 +@@ -124,9 +124,6 @@ struct papr_scm_priv {
131 +
132 + /* The bits which needs to be overridden */
133 + u64 health_bitmap_inject_mask;
134 +-
135 +- /* array to have event_code and stat_id mappings */
136 +- u8 *nvdimm_events_map;
137 + };
138 +
139 + static int papr_scm_pmem_flush(struct nd_region *nd_region,
140 +@@ -350,6 +347,25 @@ static ssize_t drc_pmem_query_stats(struct papr_scm_priv *p,
141 + #ifdef CONFIG_PERF_EVENTS
142 + #define to_nvdimm_pmu(_pmu) container_of(_pmu, struct nvdimm_pmu, pmu)
143 +
144 ++static const char * const nvdimm_events_map[] = {
145 ++ [1] = "CtlResCt",
146 ++ [2] = "CtlResTm",
147 ++ [3] = "PonSecs ",
148 ++ [4] = "MemLife ",
149 ++ [5] = "CritRscU",
150 ++ [6] = "HostLCnt",
151 ++ [7] = "HostSCnt",
152 ++ [8] = "HostSDur",
153 ++ [9] = "HostLDur",
154 ++ [10] = "MedRCnt ",
155 ++ [11] = "MedWCnt ",
156 ++ [12] = "MedRDur ",
157 ++ [13] = "MedWDur ",
158 ++ [14] = "CchRHCnt",
159 ++ [15] = "CchWHCnt",
160 ++ [16] = "FastWCnt",
161 ++};
162 ++
163 + static int papr_scm_pmu_get_value(struct perf_event *event, struct device *dev, u64 *count)
164 + {
165 + struct papr_scm_perf_stat *stat;
166 +@@ -357,11 +373,15 @@ static int papr_scm_pmu_get_value(struct perf_event *event, struct device *dev,
167 + struct papr_scm_priv *p = (struct papr_scm_priv *)dev->driver_data;
168 + int rc, size;
169 +
170 ++ /* Invalid eventcode */
171 ++ if (event->attr.config == 0 || event->attr.config >= ARRAY_SIZE(nvdimm_events_map))
172 ++ return -EINVAL;
173 ++
174 + /* Allocate request buffer enough to hold single performance stat */
175 + size = sizeof(struct papr_scm_perf_stats) +
176 + sizeof(struct papr_scm_perf_stat);
177 +
178 +- if (!p || !p->nvdimm_events_map)
179 ++ if (!p)
180 + return -EINVAL;
181 +
182 + stats = kzalloc(size, GFP_KERNEL);
183 +@@ -370,7 +390,7 @@ static int papr_scm_pmu_get_value(struct perf_event *event, struct device *dev,
184 +
185 + stat = &stats->scm_statistic[0];
186 + memcpy(&stat->stat_id,
187 +- &p->nvdimm_events_map[event->attr.config * sizeof(stat->stat_id)],
188 ++ nvdimm_events_map[event->attr.config],
189 + sizeof(stat->stat_id));
190 + stat->stat_val = 0;
191 +
192 +@@ -458,56 +478,6 @@ static void papr_scm_pmu_del(struct perf_event *event, int flags)
193 + papr_scm_pmu_read(event);
194 + }
195 +
196 +-static int papr_scm_pmu_check_events(struct papr_scm_priv *p, struct nvdimm_pmu *nd_pmu)
197 +-{
198 +- struct papr_scm_perf_stat *stat;
199 +- struct papr_scm_perf_stats *stats;
200 +- u32 available_events;
201 +- int index, rc = 0;
202 +-
203 +- if (!p->stat_buffer_len)
204 +- return -ENOENT;
205 +-
206 +- available_events = (p->stat_buffer_len - sizeof(struct papr_scm_perf_stats))
207 +- / sizeof(struct papr_scm_perf_stat);
208 +- if (available_events == 0)
209 +- return -EOPNOTSUPP;
210 +-
211 +- /* Allocate the buffer for phyp where stats are written */
212 +- stats = kzalloc(p->stat_buffer_len, GFP_KERNEL);
213 +- if (!stats) {
214 +- rc = -ENOMEM;
215 +- return rc;
216 +- }
217 +-
218 +- /* Called to get list of events supported */
219 +- rc = drc_pmem_query_stats(p, stats, 0);
220 +- if (rc)
221 +- goto out;
222 +-
223 +- /*
224 +- * Allocate memory and populate nvdimm_event_map.
225 +- * Allocate an extra element for NULL entry
226 +- */
227 +- p->nvdimm_events_map = kcalloc(available_events + 1,
228 +- sizeof(stat->stat_id),
229 +- GFP_KERNEL);
230 +- if (!p->nvdimm_events_map) {
231 +- rc = -ENOMEM;
232 +- goto out;
233 +- }
234 +-
235 +- /* Copy all stat_ids to event map */
236 +- for (index = 0, stat = stats->scm_statistic;
237 +- index < available_events; index++, ++stat) {
238 +- memcpy(&p->nvdimm_events_map[index * sizeof(stat->stat_id)],
239 +- &stat->stat_id, sizeof(stat->stat_id));
240 +- }
241 +-out:
242 +- kfree(stats);
243 +- return rc;
244 +-}
245 +-
246 + static void papr_scm_pmu_register(struct papr_scm_priv *p)
247 + {
248 + struct nvdimm_pmu *nd_pmu;
249 +@@ -519,9 +489,10 @@ static void papr_scm_pmu_register(struct papr_scm_priv *p)
250 + goto pmu_err_print;
251 + }
252 +
253 +- rc = papr_scm_pmu_check_events(p, nd_pmu);
254 +- if (rc)
255 ++ if (!p->stat_buffer_len) {
256 ++ rc = -ENOENT;
257 + goto pmu_check_events_err;
258 ++ }
259 +
260 + nd_pmu->pmu.task_ctx_nr = perf_invalid_context;
261 + nd_pmu->pmu.name = nvdimm_name(p->nvdimm);
262 +@@ -539,7 +510,7 @@ static void papr_scm_pmu_register(struct papr_scm_priv *p)
263 +
264 + rc = register_nvdimm_pmu(nd_pmu, p->pdev);
265 + if (rc)
266 +- goto pmu_register_err;
267 ++ goto pmu_check_events_err;
268 +
269 + /*
270 + * Set archdata.priv value to nvdimm_pmu structure, to handle the
271 +@@ -548,8 +519,6 @@ static void papr_scm_pmu_register(struct papr_scm_priv *p)
272 + p->pdev->archdata.priv = nd_pmu;
273 + return;
274 +
275 +-pmu_register_err:
276 +- kfree(p->nvdimm_events_map);
277 + pmu_check_events_err:
278 + kfree(nd_pmu);
279 + pmu_err_print:
280 +@@ -1560,7 +1529,6 @@ static int papr_scm_remove(struct platform_device *pdev)
281 + unregister_nvdimm_pmu(pdev->archdata.priv);
282 +
283 + pdev->archdata.priv = NULL;
284 +- kfree(p->nvdimm_events_map);
285 + kfree(p->bus_desc.provider_name);
286 + kfree(p);
287 +
288 +diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h
289 +index 83d6d4d2b1dff..26a446a34057b 100644
290 +--- a/arch/riscv/include/asm/kvm_vcpu_sbi.h
291 ++++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h
292 +@@ -33,4 +33,16 @@ void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
293 + u32 type, u64 flags);
294 + const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(unsigned long extid);
295 +
296 ++#ifdef CONFIG_RISCV_SBI_V01
297 ++extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01;
298 ++#endif
299 ++extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_base;
300 ++extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time;
301 ++extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi;
302 ++extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence;
303 ++extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst;
304 ++extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm;
305 ++extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental;
306 ++extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor;
307 ++
308 + #endif /* __RISCV_KVM_VCPU_SBI_H__ */
309 +diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c
310 +index d45e7da3f0d32..f96991d230bfc 100644
311 +--- a/arch/riscv/kvm/vcpu_sbi.c
312 ++++ b/arch/riscv/kvm/vcpu_sbi.c
313 +@@ -32,23 +32,13 @@ static int kvm_linux_err_map_sbi(int err)
314 + };
315 + }
316 +
317 +-#ifdef CONFIG_RISCV_SBI_V01
318 +-extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01;
319 +-#else
320 ++#ifndef CONFIG_RISCV_SBI_V01
321 + static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
322 + .extid_start = -1UL,
323 + .extid_end = -1UL,
324 + .handler = NULL,
325 + };
326 + #endif
327 +-extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_base;
328 +-extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time;
329 +-extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi;
330 +-extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence;
331 +-extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst;
332 +-extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm;
333 +-extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental;
334 +-extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor;
335 +
336 + static const struct kvm_vcpu_sbi_extension *sbi_ext[] = {
337 + &vcpu_sbi_ext_v01,
338 +diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c
339 +index 5e49e4b4a4ccc..86c56616e5dea 100644
340 +--- a/arch/riscv/mm/pageattr.c
341 ++++ b/arch/riscv/mm/pageattr.c
342 +@@ -118,10 +118,10 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
343 + if (!numpages)
344 + return 0;
345 +
346 +- mmap_read_lock(&init_mm);
347 ++ mmap_write_lock(&init_mm);
348 + ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
349 + &masks);
350 +- mmap_read_unlock(&init_mm);
351 ++ mmap_write_unlock(&init_mm);
352 +
353 + flush_tlb_kernel_range(start, end);
354 +
355 +diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
356 +index f22beda9e6d5c..ccdbccfde148c 100644
357 +--- a/arch/s390/include/asm/hugetlb.h
358 ++++ b/arch/s390/include/asm/hugetlb.h
359 +@@ -28,9 +28,11 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
360 + static inline int prepare_hugepage_range(struct file *file,
361 + unsigned long addr, unsigned long len)
362 + {
363 +- if (len & ~HPAGE_MASK)
364 ++ struct hstate *h = hstate_file(file);
365 ++
366 ++ if (len & ~huge_page_mask(h))
367 + return -EINVAL;
368 +- if (addr & ~HPAGE_MASK)
369 ++ if (addr & ~huge_page_mask(h))
370 + return -EINVAL;
371 + return 0;
372 + }
373 +diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
374 +index 2e526f11b91e2..5ea3830af0ccf 100644
375 +--- a/arch/s390/kernel/vmlinux.lds.S
376 ++++ b/arch/s390/kernel/vmlinux.lds.S
377 +@@ -131,6 +131,7 @@ SECTIONS
378 + /*
379 + * Table with the patch locations to undo expolines
380 + */
381 ++ . = ALIGN(4);
382 + .nospec_call_table : {
383 + __nospec_call_start = . ;
384 + *(.s390_indirect*)
385 +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
386 +index 0aaea87a14597..b09a50e0af29d 100644
387 +--- a/arch/x86/kvm/vmx/vmx.c
388 ++++ b/arch/x86/kvm/vmx/vmx.c
389 +@@ -835,8 +835,7 @@ static bool msr_write_intercepted(struct vcpu_vmx *vmx, u32 msr)
390 + if (!(exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS))
391 + return true;
392 +
393 +- return vmx_test_msr_bitmap_write(vmx->loaded_vmcs->msr_bitmap,
394 +- MSR_IA32_SPEC_CTRL);
395 ++ return vmx_test_msr_bitmap_write(vmx->loaded_vmcs->msr_bitmap, msr);
396 + }
397 +
398 + unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx)
399 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
400 +index bc411d19dac08..55de0d1981e52 100644
401 +--- a/arch/x86/kvm/x86.c
402 ++++ b/arch/x86/kvm/x86.c
403 +@@ -1570,12 +1570,32 @@ static const u32 msr_based_features_all[] = {
404 + static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all)];
405 + static unsigned int num_msr_based_features;
406 +
407 ++/*
408 ++ * Some IA32_ARCH_CAPABILITIES bits have dependencies on MSRs that KVM
409 ++ * does not yet virtualize. These include:
410 ++ * 10 - MISC_PACKAGE_CTRLS
411 ++ * 11 - ENERGY_FILTERING_CTL
412 ++ * 12 - DOITM
413 ++ * 18 - FB_CLEAR_CTRL
414 ++ * 21 - XAPIC_DISABLE_STATUS
415 ++ * 23 - OVERCLOCKING_STATUS
416 ++ */
417 ++
418 ++#define KVM_SUPPORTED_ARCH_CAP \
419 ++ (ARCH_CAP_RDCL_NO | ARCH_CAP_IBRS_ALL | ARCH_CAP_RSBA | \
420 ++ ARCH_CAP_SKIP_VMENTRY_L1DFLUSH | ARCH_CAP_SSB_NO | ARCH_CAP_MDS_NO | \
421 ++ ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \
422 ++ ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \
423 ++ ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO)
424 ++
425 + static u64 kvm_get_arch_capabilities(void)
426 + {
427 + u64 data = 0;
428 +
429 +- if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
430 ++ if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) {
431 + rdmsrl(MSR_IA32_ARCH_CAPABILITIES, data);
432 ++ data &= KVM_SUPPORTED_ARCH_CAP;
433 ++ }
434 +
435 + /*
436 + * If nx_huge_pages is enabled, KVM's shadow paging will ensure that
437 +@@ -1623,9 +1643,6 @@ static u64 kvm_get_arch_capabilities(void)
438 + */
439 + }
440 +
441 +- /* Guests don't need to know "Fill buffer clear control" exists */
442 +- data &= ~ARCH_CAP_FB_CLEAR_CTRL;
443 +-
444 + return data;
445 + }
446 +
447 +diff --git a/drivers/android/binder.c b/drivers/android/binder.c
448 +index 54ac94fed0151..8bac11d8e618a 100644
449 +--- a/drivers/android/binder.c
450 ++++ b/drivers/android/binder.c
451 +@@ -1385,6 +1385,18 @@ static int binder_inc_ref_for_node(struct binder_proc *proc,
452 + }
453 + ret = binder_inc_ref_olocked(ref, strong, target_list);
454 + *rdata = ref->data;
455 ++ if (ret && ref == new_ref) {
456 ++ /*
457 ++ * Cleanup the failed reference here as the target
458 ++ * could now be dead and have already released its
459 ++ * references by now. Calling on the new reference
460 ++ * with strong=0 and a tmp_refs will not decrement
461 ++ * the node. The new_ref gets kfree'd below.
462 ++ */
463 ++ binder_cleanup_ref_olocked(new_ref);
464 ++ ref = NULL;
465 ++ }
466 ++
467 + binder_proc_unlock(proc);
468 + if (new_ref && ref != new_ref)
469 + /*
470 +diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
471 +index 5d437c0c842cb..53797453a6ee8 100644
472 +--- a/drivers/android/binder_alloc.c
473 ++++ b/drivers/android/binder_alloc.c
474 +@@ -322,7 +322,6 @@ static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
475 + */
476 + if (vma) {
477 + vm_start = vma->vm_start;
478 +- alloc->vma_vm_mm = vma->vm_mm;
479 + mmap_assert_write_locked(alloc->vma_vm_mm);
480 + } else {
481 + mmap_assert_locked(alloc->vma_vm_mm);
482 +@@ -795,7 +794,6 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
483 + binder_insert_free_buffer(alloc, buffer);
484 + alloc->free_async_space = alloc->buffer_size / 2;
485 + binder_alloc_set_vma(alloc, vma);
486 +- mmgrab(alloc->vma_vm_mm);
487 +
488 + return 0;
489 +
490 +@@ -1091,6 +1089,8 @@ static struct shrinker binder_shrinker = {
491 + void binder_alloc_init(struct binder_alloc *alloc)
492 + {
493 + alloc->pid = current->group_leader->pid;
494 ++ alloc->vma_vm_mm = current->mm;
495 ++ mmgrab(alloc->vma_vm_mm);
496 + mutex_init(&alloc->mutex);
497 + INIT_LIST_HEAD(&alloc->buffers);
498 + }
499 +diff --git a/drivers/base/dd.c b/drivers/base/dd.c
500 +index b766968a873ce..2ccbde111c352 100644
501 +--- a/drivers/base/dd.c
502 ++++ b/drivers/base/dd.c
503 +@@ -897,6 +897,11 @@ static int __device_attach_driver(struct device_driver *drv, void *_data)
504 + dev_dbg(dev, "Device match requests probe deferral\n");
505 + dev->can_match = true;
506 + driver_deferred_probe_add(dev);
507 ++ /*
508 ++ * Device can't match with a driver right now, so don't attempt
509 ++ * to match or bind with other drivers on the bus.
510 ++ */
511 ++ return ret;
512 + } else if (ret < 0) {
513 + dev_dbg(dev, "Bus failed to match device: %d\n", ret);
514 + return ret;
515 +@@ -1136,6 +1141,11 @@ static int __driver_attach(struct device *dev, void *data)
516 + dev_dbg(dev, "Device match requests probe deferral\n");
517 + dev->can_match = true;
518 + driver_deferred_probe_add(dev);
519 ++ /*
520 ++ * Driver could not match with device, but may match with
521 ++ * another device on the bus.
522 ++ */
523 ++ return 0;
524 + } else if (ret < 0) {
525 + dev_dbg(dev, "Bus failed to match device: %d\n", ret);
526 + return ret;
527 +diff --git a/drivers/base/firmware_loader/sysfs.c b/drivers/base/firmware_loader/sysfs.c
528 +index 5b0b85b70b6f2..28b9cbb8a6dd3 100644
529 +--- a/drivers/base/firmware_loader/sysfs.c
530 ++++ b/drivers/base/firmware_loader/sysfs.c
531 +@@ -93,10 +93,9 @@ static void fw_dev_release(struct device *dev)
532 + {
533 + struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
534 +
535 +- if (fw_sysfs->fw_upload_priv) {
536 +- free_fw_priv(fw_sysfs->fw_priv);
537 +- kfree(fw_sysfs->fw_upload_priv);
538 +- }
539 ++ if (fw_sysfs->fw_upload_priv)
540 ++ fw_upload_free(fw_sysfs);
541 ++
542 + kfree(fw_sysfs);
543 + }
544 +
545 +diff --git a/drivers/base/firmware_loader/sysfs.h b/drivers/base/firmware_loader/sysfs.h
546 +index 5d8ff1675c794..df1d5add698f1 100644
547 +--- a/drivers/base/firmware_loader/sysfs.h
548 ++++ b/drivers/base/firmware_loader/sysfs.h
549 +@@ -106,12 +106,17 @@ extern struct device_attribute dev_attr_cancel;
550 + extern struct device_attribute dev_attr_remaining_size;
551 +
552 + int fw_upload_start(struct fw_sysfs *fw_sysfs);
553 ++void fw_upload_free(struct fw_sysfs *fw_sysfs);
554 + umode_t fw_upload_is_visible(struct kobject *kobj, struct attribute *attr, int n);
555 + #else
556 + static inline int fw_upload_start(struct fw_sysfs *fw_sysfs)
557 + {
558 + return 0;
559 + }
560 ++
561 ++static inline void fw_upload_free(struct fw_sysfs *fw_sysfs)
562 ++{
563 ++}
564 + #endif
565 +
566 + #endif /* __FIRMWARE_SYSFS_H */
567 +diff --git a/drivers/base/firmware_loader/sysfs_upload.c b/drivers/base/firmware_loader/sysfs_upload.c
568 +index 87044d52322aa..a0af8f5f13d88 100644
569 +--- a/drivers/base/firmware_loader/sysfs_upload.c
570 ++++ b/drivers/base/firmware_loader/sysfs_upload.c
571 +@@ -264,6 +264,15 @@ int fw_upload_start(struct fw_sysfs *fw_sysfs)
572 + return 0;
573 + }
574 +
575 ++void fw_upload_free(struct fw_sysfs *fw_sysfs)
576 ++{
577 ++ struct fw_upload_priv *fw_upload_priv = fw_sysfs->fw_upload_priv;
578 ++
579 ++ free_fw_priv(fw_sysfs->fw_priv);
580 ++ kfree(fw_upload_priv->fw_upload);
581 ++ kfree(fw_upload_priv);
582 ++}
583 ++
584 + /**
585 + * firmware_upload_register() - register for the firmware upload sysfs API
586 + * @module: kernel module of this device
587 +@@ -377,6 +386,7 @@ void firmware_upload_unregister(struct fw_upload *fw_upload)
588 + {
589 + struct fw_sysfs *fw_sysfs = fw_upload->priv;
590 + struct fw_upload_priv *fw_upload_priv = fw_sysfs->fw_upload_priv;
591 ++ struct module *module = fw_upload_priv->module;
592 +
593 + mutex_lock(&fw_upload_priv->lock);
594 + if (fw_upload_priv->progress == FW_UPLOAD_PROG_IDLE) {
595 +@@ -392,6 +402,6 @@ void firmware_upload_unregister(struct fw_upload *fw_upload)
596 +
597 + unregister:
598 + device_unregister(&fw_sysfs->dev);
599 +- module_put(fw_upload_priv->module);
600 ++ module_put(module);
601 + }
602 + EXPORT_SYMBOL_GPL(firmware_upload_unregister);
603 +diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
604 +index bda5c815e4415..a28473470e662 100644
605 +--- a/drivers/block/xen-blkback/common.h
606 ++++ b/drivers/block/xen-blkback/common.h
607 +@@ -226,6 +226,9 @@ struct xen_vbd {
608 + sector_t size;
609 + unsigned int flush_support:1;
610 + unsigned int discard_secure:1;
611 ++ /* Connect-time cached feature_persistent parameter value */
612 ++ unsigned int feature_gnt_persistent_parm:1;
613 ++ /* Persistent grants feature negotiation result */
614 + unsigned int feature_gnt_persistent:1;
615 + unsigned int overflow_max_grants:1;
616 + };
617 +diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
618 +index ee7ad2fb432d1..c0227dfa46887 100644
619 +--- a/drivers/block/xen-blkback/xenbus.c
620 ++++ b/drivers/block/xen-blkback/xenbus.c
621 +@@ -907,7 +907,7 @@ again:
622 + xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
623 +
624 + err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u",
625 +- be->blkif->vbd.feature_gnt_persistent);
626 ++ be->blkif->vbd.feature_gnt_persistent_parm);
627 + if (err) {
628 + xenbus_dev_fatal(dev, err, "writing %s/feature-persistent",
629 + dev->nodename);
630 +@@ -1085,7 +1085,9 @@ static int connect_ring(struct backend_info *be)
631 + return -ENOSYS;
632 + }
633 +
634 +- blkif->vbd.feature_gnt_persistent = feature_persistent &&
635 ++ blkif->vbd.feature_gnt_persistent_parm = feature_persistent;
636 ++ blkif->vbd.feature_gnt_persistent =
637 ++ blkif->vbd.feature_gnt_persistent_parm &&
638 + xenbus_read_unsigned(dev->otherend, "feature-persistent", 0);
639 +
640 + blkif->vbd.overflow_max_grants = 0;
641 +diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
642 +index 4e763701b3720..1f85750f981e5 100644
643 +--- a/drivers/block/xen-blkfront.c
644 ++++ b/drivers/block/xen-blkfront.c
645 +@@ -213,6 +213,9 @@ struct blkfront_info
646 + unsigned int feature_fua:1;
647 + unsigned int feature_discard:1;
648 + unsigned int feature_secdiscard:1;
649 ++ /* Connect-time cached feature_persistent parameter */
650 ++ unsigned int feature_persistent_parm:1;
651 ++ /* Persistent grants feature negotiation result */
652 + unsigned int feature_persistent:1;
653 + unsigned int bounce:1;
654 + unsigned int discard_granularity;
655 +@@ -1756,6 +1759,12 @@ abort_transaction:
656 + return err;
657 + }
658 +
659 ++/* Enable the persistent grants feature. */
660 ++static bool feature_persistent = true;
661 ++module_param(feature_persistent, bool, 0644);
662 ++MODULE_PARM_DESC(feature_persistent,
663 ++ "Enables the persistent grants feature");
664 ++
665 + /* Common code used when first setting up, and when resuming. */
666 + static int talk_to_blkback(struct xenbus_device *dev,
667 + struct blkfront_info *info)
668 +@@ -1847,8 +1856,9 @@ again:
669 + message = "writing protocol";
670 + goto abort_transaction;
671 + }
672 ++ info->feature_persistent_parm = feature_persistent;
673 + err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u",
674 +- info->feature_persistent);
675 ++ info->feature_persistent_parm);
676 + if (err)
677 + dev_warn(&dev->dev,
678 + "writing persistent grants feature to xenbus");
679 +@@ -1916,12 +1926,6 @@ static int negotiate_mq(struct blkfront_info *info)
680 + return 0;
681 + }
682 +
683 +-/* Enable the persistent grants feature. */
684 +-static bool feature_persistent = true;
685 +-module_param(feature_persistent, bool, 0644);
686 +-MODULE_PARM_DESC(feature_persistent,
687 +- "Enables the persistent grants feature");
688 +-
689 + /*
690 + * Entry point to this code when a new device is created. Allocate the basic
691 + * structures and the ring buffer for communication with the backend, and
692 +@@ -2281,7 +2285,7 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
693 + if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0))
694 + blkfront_setup_discard(info);
695 +
696 +- if (feature_persistent)
697 ++ if (info->feature_persistent_parm)
698 + info->feature_persistent =
699 + !!xenbus_read_unsigned(info->xbdev->otherend,
700 + "feature-persistent", 0);
701 +diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
702 +index 73518009a0f20..4df921d1e21ca 100644
703 +--- a/drivers/clk/bcm/clk-raspberrypi.c
704 ++++ b/drivers/clk/bcm/clk-raspberrypi.c
705 +@@ -203,7 +203,7 @@ static unsigned long raspberrypi_fw_get_rate(struct clk_hw *hw,
706 + ret = raspberrypi_clock_property(rpi->firmware, data,
707 + RPI_FIRMWARE_GET_CLOCK_RATE, &val);
708 + if (ret)
709 +- return ret;
710 ++ return 0;
711 +
712 + return val;
713 + }
714 +@@ -220,7 +220,7 @@ static int raspberrypi_fw_set_rate(struct clk_hw *hw, unsigned long rate,
715 + ret = raspberrypi_clock_property(rpi->firmware, data,
716 + RPI_FIRMWARE_SET_CLOCK_RATE, &_rate);
717 + if (ret)
718 +- dev_err_ratelimited(rpi->dev, "Failed to change %s frequency: %d",
719 ++ dev_err_ratelimited(rpi->dev, "Failed to change %s frequency: %d\n",
720 + clk_hw_get_name(hw), ret);
721 +
722 + return ret;
723 +@@ -288,7 +288,7 @@ static struct clk_hw *raspberrypi_clk_register(struct raspberrypi_clk *rpi,
724 + RPI_FIRMWARE_GET_MIN_CLOCK_RATE,
725 + &min_rate);
726 + if (ret) {
727 +- dev_err(rpi->dev, "Failed to get clock %d min freq: %d",
728 ++ dev_err(rpi->dev, "Failed to get clock %d min freq: %d\n",
729 + id, ret);
730 + return ERR_PTR(ret);
731 + }
732 +@@ -344,8 +344,13 @@ static int raspberrypi_discover_clocks(struct raspberrypi_clk *rpi,
733 + struct rpi_firmware_get_clocks_response *clks;
734 + int ret;
735 +
736 ++ /*
737 ++ * The firmware doesn't guarantee that the last element of
738 ++ * RPI_FIRMWARE_GET_CLOCKS is zeroed. So allocate an additional
739 ++ * zero element as sentinel.
740 ++ */
741 + clks = devm_kcalloc(rpi->dev,
742 +- RPI_FIRMWARE_NUM_CLK_ID, sizeof(*clks),
743 ++ RPI_FIRMWARE_NUM_CLK_ID + 1, sizeof(*clks),
744 + GFP_KERNEL);
745 + if (!clks)
746 + return -ENOMEM;
747 +@@ -360,7 +365,7 @@ static int raspberrypi_discover_clocks(struct raspberrypi_clk *rpi,
748 + struct raspberrypi_clk_variant *variant;
749 +
750 + if (clks->id > RPI_FIRMWARE_NUM_CLK_ID) {
751 +- dev_err(rpi->dev, "Unknown clock id: %u", clks->id);
752 ++ dev_err(rpi->dev, "Unknown clock id: %u\n", clks->id);
753 + return -EINVAL;
754 + }
755 +
756 +diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
757 +index f00d4c1158d72..f246d66f8261f 100644
758 +--- a/drivers/clk/clk.c
759 ++++ b/drivers/clk/clk.c
760 +@@ -840,10 +840,9 @@ static void clk_core_unprepare(struct clk_core *core)
761 + if (core->ops->unprepare)
762 + core->ops->unprepare(core->hw);
763 +
764 +- clk_pm_runtime_put(core);
765 +-
766 + trace_clk_unprepare_complete(core);
767 + clk_core_unprepare(core->parent);
768 ++ clk_pm_runtime_put(core);
769 + }
770 +
771 + static void clk_core_unprepare_lock(struct clk_core *core)
772 +diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
773 +index 3463579220b51..121d8610beb15 100644
774 +--- a/drivers/clk/ti/clk.c
775 ++++ b/drivers/clk/ti/clk.c
776 +@@ -143,6 +143,7 @@ static struct device_node *ti_find_clock_provider(struct device_node *from,
777 + continue;
778 +
779 + if (!strncmp(n, tmp, strlen(tmp))) {
780 ++ of_node_get(np);
781 + found = true;
782 + break;
783 + }
784 +diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
785 +index 205acb2c744de..e3885c90a3acb 100644
786 +--- a/drivers/dma-buf/dma-resv.c
787 ++++ b/drivers/dma-buf/dma-resv.c
788 +@@ -295,7 +295,8 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
789 + enum dma_resv_usage old_usage;
790 +
791 + dma_resv_list_entry(fobj, i, obj, &old, &old_usage);
792 +- if ((old->context == fence->context && old_usage >= usage) ||
793 ++ if ((old->context == fence->context && old_usage >= usage &&
794 ++ dma_fence_is_later(fence, old)) ||
795 + dma_fence_is_signaled(old)) {
796 + dma_resv_list_set(fobj, i, fence, usage);
797 + dma_fence_put(old);
798 +diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
799 +index ecd7d169470b0..2925f4d8cef36 100644
800 +--- a/drivers/gpio/gpio-pca953x.c
801 ++++ b/drivers/gpio/gpio-pca953x.c
802 +@@ -1175,7 +1175,9 @@ static int pca953x_suspend(struct device *dev)
803 + {
804 + struct pca953x_chip *chip = dev_get_drvdata(dev);
805 +
806 ++ mutex_lock(&chip->i2c_lock);
807 + regcache_cache_only(chip->regmap, true);
808 ++ mutex_unlock(&chip->i2c_lock);
809 +
810 + if (atomic_read(&chip->wakeup_path))
811 + device_set_wakeup_path(dev);
812 +@@ -1198,13 +1200,17 @@ static int pca953x_resume(struct device *dev)
813 + }
814 + }
815 +
816 ++ mutex_lock(&chip->i2c_lock);
817 + regcache_cache_only(chip->regmap, false);
818 + regcache_mark_dirty(chip->regmap);
819 + ret = pca953x_regcache_sync(dev);
820 +- if (ret)
821 ++ if (ret) {
822 ++ mutex_unlock(&chip->i2c_lock);
823 + return ret;
824 ++ }
825 +
826 + ret = regcache_sync(chip->regmap);
827 ++ mutex_unlock(&chip->i2c_lock);
828 + if (ret) {
829 + dev_err(dev, "Failed to restore register map: %d\n", ret);
830 + return ret;
831 +diff --git a/drivers/gpio/gpio-realtek-otto.c b/drivers/gpio/gpio-realtek-otto.c
832 +index 63dcf42f7c206..d6418f89d3f63 100644
833 +--- a/drivers/gpio/gpio-realtek-otto.c
834 ++++ b/drivers/gpio/gpio-realtek-otto.c
835 +@@ -46,10 +46,20 @@
836 + * @lock: Lock for accessing the IRQ registers and values
837 + * @intr_mask: Mask for interrupts lines
838 + * @intr_type: Interrupt type selection
839 ++ * @bank_read: Read a bank setting as a single 32-bit value
840 ++ * @bank_write: Write a bank setting as a single 32-bit value
841 ++ * @imr_line_pos: Bit shift of an IRQ line's IMR value.
842 ++ *
843 ++ * The DIR, DATA, and ISR registers consist of four 8-bit port values, packed
844 ++ * into a single 32-bit register. Use @bank_read (@bank_write) to get (assign)
845 ++ * a value from (to) these registers. The IMR register consists of four 16-bit
846 ++ * port values, packed into two 32-bit registers. Use @imr_line_pos to get the
847 ++ * bit shift of the 2-bit field for a line's IMR settings. Shifts larger than
848 ++ * 32 overflow into the second register.
849 + *
850 + * Because the interrupt mask register (IMR) combines the function of IRQ type
851 + * selection and masking, two extra values are stored. @intr_mask is used to
852 +- * mask/unmask the interrupts for a GPIO port, and @intr_type is used to store
853 ++ * mask/unmask the interrupts for a GPIO line, and @intr_type is used to store
854 + * the selected interrupt types. The logical AND of these values is written to
855 + * IMR on changes.
856 + */
857 +@@ -59,10 +69,11 @@ struct realtek_gpio_ctrl {
858 + void __iomem *cpumask_base;
859 + struct cpumask cpu_irq_maskable;
860 + raw_spinlock_t lock;
861 +- u16 intr_mask[REALTEK_GPIO_PORTS_PER_BANK];
862 +- u16 intr_type[REALTEK_GPIO_PORTS_PER_BANK];
863 +- unsigned int (*port_offset_u8)(unsigned int port);
864 +- unsigned int (*port_offset_u16)(unsigned int port);
865 ++ u8 intr_mask[REALTEK_GPIO_MAX];
866 ++ u8 intr_type[REALTEK_GPIO_MAX];
867 ++ u32 (*bank_read)(void __iomem *reg);
868 ++ void (*bank_write)(void __iomem *reg, u32 value);
869 ++ unsigned int (*line_imr_pos)(unsigned int line);
870 + };
871 +
872 + /* Expand with more flags as devices with other quirks are added */
873 +@@ -101,14 +112,22 @@ static struct realtek_gpio_ctrl *irq_data_to_ctrl(struct irq_data *data)
874 + * port. The two interrupt mask registers store two bits per GPIO, so use u16
875 + * values.
876 + */
877 +-static unsigned int realtek_gpio_port_offset_u8(unsigned int port)
878 ++static u32 realtek_gpio_bank_read_swapped(void __iomem *reg)
879 + {
880 +- return port;
881 ++ return ioread32be(reg);
882 + }
883 +
884 +-static unsigned int realtek_gpio_port_offset_u16(unsigned int port)
885 ++static void realtek_gpio_bank_write_swapped(void __iomem *reg, u32 value)
886 + {
887 +- return 2 * port;
888 ++ iowrite32be(value, reg);
889 ++}
890 ++
891 ++static unsigned int realtek_gpio_line_imr_pos_swapped(unsigned int line)
892 ++{
893 ++ unsigned int port_pin = line % 8;
894 ++ unsigned int port = line / 8;
895 ++
896 ++ return 2 * (8 * (port ^ 1) + port_pin);
897 + }
898 +
899 + /*
900 +@@ -119,66 +138,67 @@ static unsigned int realtek_gpio_port_offset_u16(unsigned int port)
901 + * per GPIO, so use u16 values. The first register contains ports 1 and 0, the
902 + * second ports 3 and 2.
903 + */
904 +-static unsigned int realtek_gpio_port_offset_u8_rev(unsigned int port)
905 ++static u32 realtek_gpio_bank_read(void __iomem *reg)
906 + {
907 +- return 3 - port;
908 ++ return ioread32(reg);
909 + }
910 +
911 +-static unsigned int realtek_gpio_port_offset_u16_rev(unsigned int port)
912 ++static void realtek_gpio_bank_write(void __iomem *reg, u32 value)
913 + {
914 +- return 2 * (port ^ 1);
915 ++ iowrite32(value, reg);
916 + }
917 +
918 +-static void realtek_gpio_write_imr(struct realtek_gpio_ctrl *ctrl,
919 +- unsigned int port, u16 irq_type, u16 irq_mask)
920 ++static unsigned int realtek_gpio_line_imr_pos(unsigned int line)
921 + {
922 +- iowrite16(irq_type & irq_mask,
923 +- ctrl->base + REALTEK_GPIO_REG_IMR + ctrl->port_offset_u16(port));
924 ++ return 2 * line;
925 + }
926 +
927 +-static void realtek_gpio_clear_isr(struct realtek_gpio_ctrl *ctrl,
928 +- unsigned int port, u8 mask)
929 ++static void realtek_gpio_clear_isr(struct realtek_gpio_ctrl *ctrl, u32 mask)
930 + {
931 +- iowrite8(mask, ctrl->base + REALTEK_GPIO_REG_ISR + ctrl->port_offset_u8(port));
932 ++ ctrl->bank_write(ctrl->base + REALTEK_GPIO_REG_ISR, mask);
933 + }
934 +
935 +-static u8 realtek_gpio_read_isr(struct realtek_gpio_ctrl *ctrl, unsigned int port)
936 ++static u32 realtek_gpio_read_isr(struct realtek_gpio_ctrl *ctrl)
937 + {
938 +- return ioread8(ctrl->base + REALTEK_GPIO_REG_ISR + ctrl->port_offset_u8(port));
939 ++ return ctrl->bank_read(ctrl->base + REALTEK_GPIO_REG_ISR);
940 + }
941 +
942 +-/* Set the rising and falling edge mask bits for a GPIO port pin */
943 +-static u16 realtek_gpio_imr_bits(unsigned int pin, u16 value)
944 ++/* Set the rising and falling edge mask bits for a GPIO pin */
945 ++static void realtek_gpio_update_line_imr(struct realtek_gpio_ctrl *ctrl, unsigned int line)
946 + {
947 +- return (value & REALTEK_GPIO_IMR_LINE_MASK) << 2 * pin;
948 ++ void __iomem *reg = ctrl->base + REALTEK_GPIO_REG_IMR;
949 ++ unsigned int line_shift = ctrl->line_imr_pos(line);
950 ++ unsigned int shift = line_shift % 32;
951 ++ u32 irq_type = ctrl->intr_type[line];
952 ++ u32 irq_mask = ctrl->intr_mask[line];
953 ++ u32 reg_val;
954 ++
955 ++ reg += 4 * (line_shift / 32);
956 ++ reg_val = ioread32(reg);
957 ++ reg_val &= ~(REALTEK_GPIO_IMR_LINE_MASK << shift);
958 ++ reg_val |= (irq_type & irq_mask & REALTEK_GPIO_IMR_LINE_MASK) << shift;
959 ++ iowrite32(reg_val, reg);
960 + }
961 +
962 + static void realtek_gpio_irq_ack(struct irq_data *data)
963 + {
964 + struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
965 + irq_hw_number_t line = irqd_to_hwirq(data);
966 +- unsigned int port = line / 8;
967 +- unsigned int port_pin = line % 8;
968 +
969 +- realtek_gpio_clear_isr(ctrl, port, BIT(port_pin));
970 ++ realtek_gpio_clear_isr(ctrl, BIT(line));
971 + }
972 +
973 + static void realtek_gpio_irq_unmask(struct irq_data *data)
974 + {
975 + struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
976 + unsigned int line = irqd_to_hwirq(data);
977 +- unsigned int port = line / 8;
978 +- unsigned int port_pin = line % 8;
979 + unsigned long flags;
980 +- u16 m;
981 +
982 + gpiochip_enable_irq(&ctrl->gc, line);
983 +
984 + raw_spin_lock_irqsave(&ctrl->lock, flags);
985 +- m = ctrl->intr_mask[port];
986 +- m |= realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK);
987 +- ctrl->intr_mask[port] = m;
988 +- realtek_gpio_write_imr(ctrl, port, ctrl->intr_type[port], m);
989 ++ ctrl->intr_mask[line] = REALTEK_GPIO_IMR_LINE_MASK;
990 ++ realtek_gpio_update_line_imr(ctrl, line);
991 + raw_spin_unlock_irqrestore(&ctrl->lock, flags);
992 + }
993 +
994 +@@ -186,16 +206,11 @@ static void realtek_gpio_irq_mask(struct irq_data *data)
995 + {
996 + struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
997 + unsigned int line = irqd_to_hwirq(data);
998 +- unsigned int port = line / 8;
999 +- unsigned int port_pin = line % 8;
1000 + unsigned long flags;
1001 +- u16 m;
1002 +
1003 + raw_spin_lock_irqsave(&ctrl->lock, flags);
1004 +- m = ctrl->intr_mask[port];
1005 +- m &= ~realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK);
1006 +- ctrl->intr_mask[port] = m;
1007 +- realtek_gpio_write_imr(ctrl, port, ctrl->intr_type[port], m);
1008 ++ ctrl->intr_mask[line] = 0;
1009 ++ realtek_gpio_update_line_imr(ctrl, line);
1010 + raw_spin_unlock_irqrestore(&ctrl->lock, flags);
1011 +
1012 + gpiochip_disable_irq(&ctrl->gc, line);
1013 +@@ -205,10 +220,8 @@ static int realtek_gpio_irq_set_type(struct irq_data *data, unsigned int flow_ty
1014 + {
1015 + struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
1016 + unsigned int line = irqd_to_hwirq(data);
1017 +- unsigned int port = line / 8;
1018 +- unsigned int port_pin = line % 8;
1019 + unsigned long flags;
1020 +- u16 type, t;
1021 ++ u8 type;
1022 +
1023 + switch (flow_type & IRQ_TYPE_SENSE_MASK) {
1024 + case IRQ_TYPE_EDGE_FALLING:
1025 +@@ -227,11 +240,8 @@ static int realtek_gpio_irq_set_type(struct irq_data *data, unsigned int flow_ty
1026 + irq_set_handler_locked(data, handle_edge_irq);
1027 +
1028 + raw_spin_lock_irqsave(&ctrl->lock, flags);
1029 +- t = ctrl->intr_type[port];
1030 +- t &= ~realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK);
1031 +- t |= realtek_gpio_imr_bits(port_pin, type);
1032 +- ctrl->intr_type[port] = t;
1033 +- realtek_gpio_write_imr(ctrl, port, t, ctrl->intr_mask[port]);
1034 ++ ctrl->intr_type[line] = type;
1035 ++ realtek_gpio_update_line_imr(ctrl, line);
1036 + raw_spin_unlock_irqrestore(&ctrl->lock, flags);
1037 +
1038 + return 0;
1039 +@@ -242,28 +252,21 @@ static void realtek_gpio_irq_handler(struct irq_desc *desc)
1040 + struct gpio_chip *gc = irq_desc_get_handler_data(desc);
1041 + struct realtek_gpio_ctrl *ctrl = gpiochip_get_data(gc);
1042 + struct irq_chip *irq_chip = irq_desc_get_chip(desc);
1043 +- unsigned int lines_done;
1044 +- unsigned int port_pin_count;
1045 + unsigned long status;
1046 + int offset;
1047 +
1048 + chained_irq_enter(irq_chip, desc);
1049 +
1050 +- for (lines_done = 0; lines_done < gc->ngpio; lines_done += 8) {
1051 +- status = realtek_gpio_read_isr(ctrl, lines_done / 8);
1052 +- port_pin_count = min(gc->ngpio - lines_done, 8U);
1053 +- for_each_set_bit(offset, &status, port_pin_count)
1054 +- generic_handle_domain_irq(gc->irq.domain, offset + lines_done);
1055 +- }
1056 ++ status = realtek_gpio_read_isr(ctrl);
1057 ++ for_each_set_bit(offset, &status, gc->ngpio)
1058 ++ generic_handle_domain_irq(gc->irq.domain, offset);
1059 +
1060 + chained_irq_exit(irq_chip, desc);
1061 + }
1062 +
1063 +-static inline void __iomem *realtek_gpio_irq_cpu_mask(struct realtek_gpio_ctrl *ctrl,
1064 +- unsigned int port, int cpu)
1065 ++static inline void __iomem *realtek_gpio_irq_cpu_mask(struct realtek_gpio_ctrl *ctrl, int cpu)
1066 + {
1067 +- return ctrl->cpumask_base + ctrl->port_offset_u8(port) +
1068 +- REALTEK_GPIO_PORTS_PER_BANK * cpu;
1069 ++ return ctrl->cpumask_base + REALTEK_GPIO_PORTS_PER_BANK * cpu;
1070 + }
1071 +
1072 + static int realtek_gpio_irq_set_affinity(struct irq_data *data,
1073 +@@ -271,12 +274,10 @@ static int realtek_gpio_irq_set_affinity(struct irq_data *data,
1074 + {
1075 + struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
1076 + unsigned int line = irqd_to_hwirq(data);
1077 +- unsigned int port = line / 8;
1078 +- unsigned int port_pin = line % 8;
1079 + void __iomem *irq_cpu_mask;
1080 + unsigned long flags;
1081 + int cpu;
1082 +- u8 v;
1083 ++ u32 v;
1084 +
1085 + if (!ctrl->cpumask_base)
1086 + return -ENXIO;
1087 +@@ -284,15 +285,15 @@ static int realtek_gpio_irq_set_affinity(struct irq_data *data,
1088 + raw_spin_lock_irqsave(&ctrl->lock, flags);
1089 +
1090 + for_each_cpu(cpu, &ctrl->cpu_irq_maskable) {
1091 +- irq_cpu_mask = realtek_gpio_irq_cpu_mask(ctrl, port, cpu);
1092 +- v = ioread8(irq_cpu_mask);
1093 ++ irq_cpu_mask = realtek_gpio_irq_cpu_mask(ctrl, cpu);
1094 ++ v = ctrl->bank_read(irq_cpu_mask);
1095 +
1096 + if (cpumask_test_cpu(cpu, dest))
1097 +- v |= BIT(port_pin);
1098 ++ v |= BIT(line);
1099 + else
1100 +- v &= ~BIT(port_pin);
1101 ++ v &= ~BIT(line);
1102 +
1103 +- iowrite8(v, irq_cpu_mask);
1104 ++ ctrl->bank_write(irq_cpu_mask, v);
1105 + }
1106 +
1107 + raw_spin_unlock_irqrestore(&ctrl->lock, flags);
1108 +@@ -305,16 +306,17 @@ static int realtek_gpio_irq_set_affinity(struct irq_data *data,
1109 + static int realtek_gpio_irq_init(struct gpio_chip *gc)
1110 + {
1111 + struct realtek_gpio_ctrl *ctrl = gpiochip_get_data(gc);
1112 +- unsigned int port;
1113 ++ u32 mask_all = GENMASK(gc->ngpio - 1, 0);
1114 ++ unsigned int line;
1115 + int cpu;
1116 +
1117 +- for (port = 0; (port * 8) < gc->ngpio; port++) {
1118 +- realtek_gpio_write_imr(ctrl, port, 0, 0);
1119 +- realtek_gpio_clear_isr(ctrl, port, GENMASK(7, 0));
1120 ++ for (line = 0; line < gc->ngpio; line++)
1121 ++ realtek_gpio_update_line_imr(ctrl, line);
1122 +
1123 +- for_each_cpu(cpu, &ctrl->cpu_irq_maskable)
1124 +- iowrite8(GENMASK(7, 0), realtek_gpio_irq_cpu_mask(ctrl, port, cpu));
1125 +- }
1126 ++ realtek_gpio_clear_isr(ctrl, mask_all);
1127 ++
1128 ++ for_each_cpu(cpu, &ctrl->cpu_irq_maskable)
1129 ++ ctrl->bank_write(realtek_gpio_irq_cpu_mask(ctrl, cpu), mask_all);
1130 +
1131 + return 0;
1132 + }
1133 +@@ -387,12 +389,14 @@ static int realtek_gpio_probe(struct platform_device *pdev)
1134 +
1135 + if (dev_flags & GPIO_PORTS_REVERSED) {
1136 + bgpio_flags = 0;
1137 +- ctrl->port_offset_u8 = realtek_gpio_port_offset_u8_rev;
1138 +- ctrl->port_offset_u16 = realtek_gpio_port_offset_u16_rev;
1139 ++ ctrl->bank_read = realtek_gpio_bank_read;
1140 ++ ctrl->bank_write = realtek_gpio_bank_write;
1141 ++ ctrl->line_imr_pos = realtek_gpio_line_imr_pos;
1142 + } else {
1143 + bgpio_flags = BGPIOF_BIG_ENDIAN_BYTE_ORDER;
1144 +- ctrl->port_offset_u8 = realtek_gpio_port_offset_u8;
1145 +- ctrl->port_offset_u16 = realtek_gpio_port_offset_u16;
1146 ++ ctrl->bank_read = realtek_gpio_bank_read_swapped;
1147 ++ ctrl->bank_write = realtek_gpio_bank_write_swapped;
1148 ++ ctrl->line_imr_pos = realtek_gpio_line_imr_pos_swapped;
1149 + }
1150 +
1151 + err = bgpio_init(&ctrl->gc, dev, 4,
1152 +diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
1153 +index c8e1fc53a881f..3e200a2e4ba29 100644
1154 +--- a/drivers/gpu/drm/i915/display/intel_backlight.c
1155 ++++ b/drivers/gpu/drm/i915/display/intel_backlight.c
1156 +@@ -15,6 +15,7 @@
1157 + #include "intel_dsi_dcs_backlight.h"
1158 + #include "intel_panel.h"
1159 + #include "intel_pci_config.h"
1160 ++#include "intel_pps.h"
1161 +
1162 + /**
1163 + * scale - scale values from one range to another
1164 +@@ -970,26 +971,24 @@ int intel_backlight_device_register(struct intel_connector *connector)
1165 + if (!name)
1166 + return -ENOMEM;
1167 +
1168 +- bd = backlight_device_register(name, connector->base.kdev, connector,
1169 +- &intel_backlight_device_ops, &props);
1170 +-
1171 +- /*
1172 +- * Using the same name independent of the drm device or connector
1173 +- * prevents registration of multiple backlight devices in the
1174 +- * driver. However, we need to use the default name for backward
1175 +- * compatibility. Use unique names for subsequent backlight devices as a
1176 +- * fallback when the default name already exists.
1177 +- */
1178 +- if (IS_ERR(bd) && PTR_ERR(bd) == -EEXIST) {
1179 ++ bd = backlight_device_get_by_name(name);
1180 ++ if (bd) {
1181 ++ put_device(&bd->dev);
1182 ++ /*
1183 ++ * Using the same name independent of the drm device or connector
1184 ++ * prevents registration of multiple backlight devices in the
1185 ++ * driver. However, we need to use the default name for backward
1186 ++ * compatibility. Use unique names for subsequent backlight devices as a
1187 ++ * fallback when the default name already exists.
1188 ++ */
1189 + kfree(name);
1190 + name = kasprintf(GFP_KERNEL, "card%d-%s-backlight",
1191 + i915->drm.primary->index, connector->base.name);
1192 + if (!name)
1193 + return -ENOMEM;
1194 +-
1195 +- bd = backlight_device_register(name, connector->base.kdev, connector,
1196 +- &intel_backlight_device_ops, &props);
1197 + }
1198 ++ bd = backlight_device_register(name, connector->base.kdev, connector,
1199 ++ &intel_backlight_device_ops, &props);
1200 +
1201 + if (IS_ERR(bd)) {
1202 + drm_err(&i915->drm,
1203 +@@ -1771,9 +1770,13 @@ void intel_backlight_init_funcs(struct intel_panel *panel)
1204 + panel->backlight.pwm_funcs = &i9xx_pwm_funcs;
1205 + }
1206 +
1207 +- if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
1208 +- intel_dp_aux_init_backlight_funcs(connector) == 0)
1209 +- return;
1210 ++ if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) {
1211 ++ if (intel_dp_aux_init_backlight_funcs(connector) == 0)
1212 ++ return;
1213 ++
1214 ++ if (!(dev_priv->quirks & QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
1215 ++ connector->panel.backlight.power = intel_pps_backlight_power;
1216 ++ }
1217 +
1218 + /* We're using a standard PWM backlight interface */
1219 + panel->backlight.funcs = &pwm_bl_funcs;
1220 +diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
1221 +index 37bd7b17f3d0b..f2fad199e2e0b 100644
1222 +--- a/drivers/gpu/drm/i915/display/intel_bw.c
1223 ++++ b/drivers/gpu/drm/i915/display/intel_bw.c
1224 +@@ -404,15 +404,17 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
1225 + int clpchgroup;
1226 + int j;
1227 +
1228 +- if (i < num_groups - 1)
1229 +- bi_next = &dev_priv->max_bw[i + 1];
1230 +-
1231 + clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i;
1232 +
1233 +- if (i < num_groups - 1 && clpchgroup < clperchgroup)
1234 +- bi_next->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
1235 +- else
1236 +- bi_next->num_planes = 0;
1237 ++ if (i < num_groups - 1) {
1238 ++ bi_next = &dev_priv->max_bw[i + 1];
1239 ++
1240 ++ if (clpchgroup < clperchgroup)
1241 ++ bi_next->num_planes = (ipqdepth - clpchgroup) /
1242 ++ clpchgroup + 1;
1243 ++ else
1244 ++ bi_next->num_planes = 0;
1245 ++ }
1246 +
1247 + bi->num_qgv_points = qi.num_points;
1248 + bi->num_psf_gv_points = qi.num_psf_points;
1249 +diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
1250 +index ff67899522cf7..41aaa6c98114f 100644
1251 +--- a/drivers/gpu/drm/i915/display/intel_dp.c
1252 ++++ b/drivers/gpu/drm/i915/display/intel_dp.c
1253 +@@ -5248,8 +5248,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
1254 +
1255 + intel_panel_init(intel_connector);
1256 +
1257 +- if (!(dev_priv->quirks & QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
1258 +- intel_connector->panel.backlight.power = intel_pps_backlight_power;
1259 + intel_backlight_setup(intel_connector, pipe);
1260 +
1261 + intel_edp_add_properties(intel_dp);
1262 +diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
1263 +index c8488f5ebd044..e415cd7c0b84b 100644
1264 +--- a/drivers/gpu/drm/i915/display/intel_quirks.c
1265 ++++ b/drivers/gpu/drm/i915/display/intel_quirks.c
1266 +@@ -191,6 +191,9 @@ static struct intel_quirk intel_quirks[] = {
1267 + /* ASRock ITX*/
1268 + { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
1269 + { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
1270 ++ /* ECS Liva Q2 */
1271 ++ { 0x3185, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
1272 ++ { 0x3184, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
1273 + };
1274 +
1275 + void intel_init_quirks(struct drm_i915_private *i915)
1276 +diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c
1277 +index 2b10b96b17b5b..933648cc90ff9 100644
1278 +--- a/drivers/gpu/drm/i915/gt/intel_migrate.c
1279 ++++ b/drivers/gpu/drm/i915/gt/intel_migrate.c
1280 +@@ -638,9 +638,9 @@ static int emit_copy(struct i915_request *rq,
1281 + return 0;
1282 + }
1283 +
1284 +-static int scatter_list_length(struct scatterlist *sg)
1285 ++static u64 scatter_list_length(struct scatterlist *sg)
1286 + {
1287 +- int len = 0;
1288 ++ u64 len = 0;
1289 +
1290 + while (sg && sg_dma_len(sg)) {
1291 + len += sg_dma_len(sg);
1292 +@@ -650,28 +650,26 @@ static int scatter_list_length(struct scatterlist *sg)
1293 + return len;
1294 + }
1295 +
1296 +-static void
1297 ++static int
1298 + calculate_chunk_sz(struct drm_i915_private *i915, bool src_is_lmem,
1299 +- int *src_sz, u32 bytes_to_cpy, u32 ccs_bytes_to_cpy)
1300 ++ u64 bytes_to_cpy, u64 ccs_bytes_to_cpy)
1301 + {
1302 +- if (ccs_bytes_to_cpy) {
1303 +- if (!src_is_lmem)
1304 +- /*
1305 +- * When CHUNK_SZ is passed all the pages upto CHUNK_SZ
1306 +- * will be taken for the blt. in Flat-ccs supported
1307 +- * platform Smem obj will have more pages than required
1308 +- * for main meory hence limit it to the required size
1309 +- * for main memory
1310 +- */
1311 +- *src_sz = min_t(int, bytes_to_cpy, CHUNK_SZ);
1312 +- } else { /* ccs handling is not required */
1313 +- *src_sz = CHUNK_SZ;
1314 +- }
1315 ++ if (ccs_bytes_to_cpy && !src_is_lmem)
1316 ++ /*
1317 ++ * When CHUNK_SZ is passed all the pages upto CHUNK_SZ
1318 ++ * will be taken for the blt. in Flat-ccs supported
1319 ++ * platform Smem obj will have more pages than required
1320 ++ * for main meory hence limit it to the required size
1321 ++ * for main memory
1322 ++ */
1323 ++ return min_t(u64, bytes_to_cpy, CHUNK_SZ);
1324 ++ else
1325 ++ return CHUNK_SZ;
1326 + }
1327 +
1328 +-static void get_ccs_sg_sgt(struct sgt_dma *it, u32 bytes_to_cpy)
1329 ++static void get_ccs_sg_sgt(struct sgt_dma *it, u64 bytes_to_cpy)
1330 + {
1331 +- u32 len;
1332 ++ u64 len;
1333 +
1334 + do {
1335 + GEM_BUG_ON(!it->sg || !sg_dma_len(it->sg));
1336 +@@ -702,12 +700,12 @@ intel_context_migrate_copy(struct intel_context *ce,
1337 + {
1338 + struct sgt_dma it_src = sg_sgt(src), it_dst = sg_sgt(dst), it_ccs;
1339 + struct drm_i915_private *i915 = ce->engine->i915;
1340 +- u32 ccs_bytes_to_cpy = 0, bytes_to_cpy;
1341 ++ u64 ccs_bytes_to_cpy = 0, bytes_to_cpy;
1342 + enum i915_cache_level ccs_cache_level;
1343 + u32 src_offset, dst_offset;
1344 + u8 src_access, dst_access;
1345 + struct i915_request *rq;
1346 +- int src_sz, dst_sz;
1347 ++ u64 src_sz, dst_sz;
1348 + bool ccs_is_src, overwrite_ccs;
1349 + int err;
1350 +
1351 +@@ -790,8 +788,8 @@ intel_context_migrate_copy(struct intel_context *ce,
1352 + if (err)
1353 + goto out_rq;
1354 +
1355 +- calculate_chunk_sz(i915, src_is_lmem, &src_sz,
1356 +- bytes_to_cpy, ccs_bytes_to_cpy);
1357 ++ src_sz = calculate_chunk_sz(i915, src_is_lmem,
1358 ++ bytes_to_cpy, ccs_bytes_to_cpy);
1359 +
1360 + len = emit_pte(rq, &it_src, src_cache_level, src_is_lmem,
1361 + src_offset, src_sz);
1362 +diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1363 +index 2d9f5f1c79d3a..26a051ef119df 100644
1364 +--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1365 ++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1366 +@@ -4010,6 +4010,13 @@ static inline void guc_init_lrc_mapping(struct intel_guc *guc)
1367 + /* make sure all descriptors are clean... */
1368 + xa_destroy(&guc->context_lookup);
1369 +
1370 ++ /*
1371 ++ * A reset might have occurred while we had a pending stalled request,
1372 ++ * so make sure we clean that up.
1373 ++ */
1374 ++ guc->stalled_request = NULL;
1375 ++ guc->submission_stall_reason = STALL_NONE;
1376 ++
1377 + /*
1378 + * Some contexts might have been pinned before we enabled GuC
1379 + * submission, so we need to add them to the GuC bookeeping.
1380 +diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
1381 +index beea5895e4992..73e74a6a76037 100644
1382 +--- a/drivers/gpu/drm/i915/gvt/handlers.c
1383 ++++ b/drivers/gpu/drm/i915/gvt/handlers.c
1384 +@@ -905,7 +905,7 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
1385 + else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
1386 + index = FDI_RX_IMR_TO_PIPE(offset);
1387 + else {
1388 +- gvt_vgpu_err("Unsupport registers %x\n", offset);
1389 ++ gvt_vgpu_err("Unsupported registers %x\n", offset);
1390 + return -EINVAL;
1391 + }
1392 +
1393 +diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
1394 +index 72dac1718f3e7..6163aeaee9b98 100644
1395 +--- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
1396 ++++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
1397 +@@ -1074,7 +1074,8 @@ static int iterate_skl_plus_mmio(struct intel_gvt_mmio_table_iter *iter)
1398 + MMIO_D(GEN8_HDC_CHICKEN1);
1399 + MMIO_D(GEN9_WM_CHICKEN3);
1400 +
1401 +- if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
1402 ++ if (IS_KABYLAKE(dev_priv) ||
1403 ++ IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv))
1404 + MMIO_D(GAMT_CHKN_BIT_REG);
1405 + if (!IS_BROXTON(dev_priv))
1406 + MMIO_D(GEN9_CTX_PREEMPT_REG);
1407 +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
1408 +index 5735915facc51..7d5803f2343a9 100644
1409 +--- a/drivers/gpu/drm/i915/intel_pm.c
1410 ++++ b/drivers/gpu/drm/i915/intel_pm.c
1411 +@@ -6560,7 +6560,10 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
1412 + enum plane_id plane_id;
1413 + u8 slices;
1414 +
1415 +- skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
1416 ++ memset(&crtc_state->wm.skl.optimal, 0,
1417 ++ sizeof(crtc_state->wm.skl.optimal));
1418 ++ if (crtc_state->hw.active)
1419 ++ skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
1420 + crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
1421 +
1422 + memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]));
1423 +@@ -6571,6 +6574,9 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
1424 + struct skl_ddb_entry *ddb_y =
1425 + &crtc_state->wm.skl.plane_ddb_y[plane_id];
1426 +
1427 ++ if (!crtc_state->hw.active)
1428 ++ continue;
1429 ++
1430 + skl_ddb_get_hw_plane_state(dev_priv, crtc->pipe,
1431 + plane_id, ddb, ddb_y);
1432 +
1433 +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
1434 +index 9b4df3084366b..d98c7f7da7c08 100644
1435 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
1436 ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
1437 +@@ -1998,6 +1998,12 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
1438 +
1439 + intf_cfg.stream_sel = 0; /* Don't care value for video mode */
1440 + intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
1441 ++
1442 ++ if (phys_enc->hw_intf)
1443 ++ intf_cfg.intf = phys_enc->hw_intf->idx;
1444 ++ if (phys_enc->hw_wb)
1445 ++ intf_cfg.wb = phys_enc->hw_wb->idx;
1446 ++
1447 + if (phys_enc->hw_pp->merge_3d)
1448 + intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
1449 +
1450 +diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
1451 +index 703249384e7c7..45aa06a31a9fd 100644
1452 +--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
1453 ++++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
1454 +@@ -1214,7 +1214,7 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
1455 + if (ret)
1456 + return ret;
1457 +
1458 +- dp_ctrl_train_pattern_set(ctrl, pattern | DP_RECOVERED_CLOCK_OUT_EN);
1459 ++ dp_ctrl_train_pattern_set(ctrl, pattern);
1460 +
1461 + for (tries = 0; tries <= maximum_retries; tries++) {
1462 + drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd);
1463 +diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
1464 +index 2c23324a2296b..72c018e26f47f 100644
1465 +--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
1466 ++++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
1467 +@@ -109,7 +109,7 @@ static const char * const dsi_8996_bus_clk_names[] = {
1468 + static const struct msm_dsi_config msm8996_dsi_cfg = {
1469 + .io_offset = DSI_6G_REG_SHIFT,
1470 + .reg_cfg = {
1471 +- .num = 2,
1472 ++ .num = 3,
1473 + .regs = {
1474 + {"vdda", 18160, 1 }, /* 1.25 V */
1475 + {"vcca", 17000, 32 }, /* 0.925 V */
1476 +@@ -148,7 +148,7 @@ static const char * const dsi_sdm660_bus_clk_names[] = {
1477 + static const struct msm_dsi_config sdm660_dsi_cfg = {
1478 + .io_offset = DSI_6G_REG_SHIFT,
1479 + .reg_cfg = {
1480 +- .num = 2,
1481 ++ .num = 1,
1482 + .regs = {
1483 + {"vdda", 12560, 4 }, /* 1.2 V */
1484 + },
1485 +diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
1486 +index a39de3bdc7faf..56dfa2d24be1f 100644
1487 +--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
1488 ++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
1489 +@@ -347,7 +347,7 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
1490 + } else {
1491 + timing->shared_timings.clk_pre =
1492 + linear_inter(tmax, tmin, pcnt2, 0, false);
1493 +- timing->shared_timings.clk_pre_inc_by_2 = 0;
1494 ++ timing->shared_timings.clk_pre_inc_by_2 = 0;
1495 + }
1496 +
1497 + timing->ta_go = 3;
1498 +diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
1499 +index 14ab9a627d8b0..7c0314d6566af 100644
1500 +--- a/drivers/gpu/drm/msm/msm_drv.c
1501 ++++ b/drivers/gpu/drm/msm/msm_drv.c
1502 +@@ -424,6 +424,8 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
1503 + }
1504 + }
1505 +
1506 ++ drm_helper_move_panel_connectors_to_head(ddev);
1507 ++
1508 + ddev->mode_config.funcs = &mode_config_funcs;
1509 + ddev->mode_config.helper_private = &mode_config_helper_funcs;
1510 +
1511 +diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
1512 +index ea94bc18e72eb..89cc93eb67557 100644
1513 +--- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c
1514 ++++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
1515 +@@ -213,6 +213,8 @@ void msm_devfreq_init(struct msm_gpu *gpu)
1516 +
1517 + if (IS_ERR(df->devfreq)) {
1518 + DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
1519 ++ dev_pm_qos_remove_request(&df->idle_freq);
1520 ++ dev_pm_qos_remove_request(&df->boost_freq);
1521 + df->devfreq = NULL;
1522 + return;
1523 + }
1524 +diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
1525 +index befe989ca7b94..fbf3f5a4ecb67 100644
1526 +--- a/drivers/hwmon/gpio-fan.c
1527 ++++ b/drivers/hwmon/gpio-fan.c
1528 +@@ -391,6 +391,9 @@ static int gpio_fan_set_cur_state(struct thermal_cooling_device *cdev,
1529 + if (!fan_data)
1530 + return -EINVAL;
1531 +
1532 ++ if (state >= fan_data->num_speed)
1533 ++ return -EINVAL;
1534 ++
1535 + set_fan_speed(fan_data, state);
1536 + return 0;
1537 + }
1538 +diff --git a/drivers/iio/adc/ad7292.c b/drivers/iio/adc/ad7292.c
1539 +index 92c68d467c505..a2f9fda25ff34 100644
1540 +--- a/drivers/iio/adc/ad7292.c
1541 ++++ b/drivers/iio/adc/ad7292.c
1542 +@@ -287,10 +287,8 @@ static int ad7292_probe(struct spi_device *spi)
1543 +
1544 + ret = devm_add_action_or_reset(&spi->dev,
1545 + ad7292_regulator_disable, st);
1546 +- if (ret) {
1547 +- regulator_disable(st->reg);
1548 ++ if (ret)
1549 + return ret;
1550 +- }
1551 +
1552 + ret = regulator_get_voltage(st->reg);
1553 + if (ret < 0)
1554 +diff --git a/drivers/iio/adc/mcp3911.c b/drivers/iio/adc/mcp3911.c
1555 +index 1cb4590fe4125..890af7dca62de 100644
1556 +--- a/drivers/iio/adc/mcp3911.c
1557 ++++ b/drivers/iio/adc/mcp3911.c
1558 +@@ -40,8 +40,8 @@
1559 + #define MCP3911_CHANNEL(x) (MCP3911_REG_CHANNEL0 + x * 3)
1560 + #define MCP3911_OFFCAL(x) (MCP3911_REG_OFFCAL_CH0 + x * 6)
1561 +
1562 +-/* Internal voltage reference in uV */
1563 +-#define MCP3911_INT_VREF_UV 1200000
1564 ++/* Internal voltage reference in mV */
1565 ++#define MCP3911_INT_VREF_MV 1200
1566 +
1567 + #define MCP3911_REG_READ(reg, id) ((((reg) << 1) | ((id) << 5) | (1 << 0)) & 0xff)
1568 + #define MCP3911_REG_WRITE(reg, id) ((((reg) << 1) | ((id) << 5) | (0 << 0)) & 0xff)
1569 +@@ -113,6 +113,8 @@ static int mcp3911_read_raw(struct iio_dev *indio_dev,
1570 + if (ret)
1571 + goto out;
1572 +
1573 ++ *val = sign_extend32(*val, 23);
1574 ++
1575 + ret = IIO_VAL_INT;
1576 + break;
1577 +
1578 +@@ -137,11 +139,18 @@ static int mcp3911_read_raw(struct iio_dev *indio_dev,
1579 +
1580 + *val = ret / 1000;
1581 + } else {
1582 +- *val = MCP3911_INT_VREF_UV;
1583 ++ *val = MCP3911_INT_VREF_MV;
1584 + }
1585 +
1586 +- *val2 = 24;
1587 +- ret = IIO_VAL_FRACTIONAL_LOG2;
1588 ++ /*
1589 ++ * For 24bit Conversion
1590 ++ * Raw = ((Voltage)/(Vref) * 2^23 * Gain * 1.5
1591 ++ * Voltage = Raw * (Vref)/(2^23 * Gain * 1.5)
1592 ++ */
1593 ++
1594 ++ /* val2 = (2^23 * 1.5) */
1595 ++ *val2 = 12582912;
1596 ++ ret = IIO_VAL_FRACTIONAL;
1597 + break;
1598 + }
1599 +
1600 +@@ -208,7 +217,14 @@ static int mcp3911_config(struct mcp3911 *adc)
1601 + u32 configreg;
1602 + int ret;
1603 +
1604 +- device_property_read_u32(dev, "device-addr", &adc->dev_addr);
1605 ++ ret = device_property_read_u32(dev, "microchip,device-addr", &adc->dev_addr);
1606 ++
1607 ++ /*
1608 ++ * Fallback to "device-addr" due to historical mismatch between
1609 ++ * dt-bindings and implementation
1610 ++ */
1611 ++ if (ret)
1612 ++ device_property_read_u32(dev, "device-addr", &adc->dev_addr);
1613 + if (adc->dev_addr > 3) {
1614 + dev_err(&adc->spi->dev,
1615 + "invalid device address (%i). Must be in range 0-3.\n",
1616 +diff --git a/drivers/iio/light/cm3605.c b/drivers/iio/light/cm3605.c
1617 +index 50d34a98839c0..a68b95a79d482 100644
1618 +--- a/drivers/iio/light/cm3605.c
1619 ++++ b/drivers/iio/light/cm3605.c
1620 +@@ -226,8 +226,10 @@ static int cm3605_probe(struct platform_device *pdev)
1621 + }
1622 +
1623 + irq = platform_get_irq(pdev, 0);
1624 +- if (irq < 0)
1625 +- return dev_err_probe(dev, irq, "failed to get irq\n");
1626 ++ if (irq < 0) {
1627 ++ ret = dev_err_probe(dev, irq, "failed to get irq\n");
1628 ++ goto out_disable_aset;
1629 ++ }
1630 +
1631 + ret = devm_request_threaded_irq(dev, irq, cm3605_prox_irq,
1632 + NULL, 0, "cm3605", indio_dev);
1633 +diff --git a/drivers/input/joystick/iforce/iforce-serio.c b/drivers/input/joystick/iforce/iforce-serio.c
1634 +index f95a81b9fac72..2380546d79782 100644
1635 +--- a/drivers/input/joystick/iforce/iforce-serio.c
1636 ++++ b/drivers/input/joystick/iforce/iforce-serio.c
1637 +@@ -39,7 +39,7 @@ static void iforce_serio_xmit(struct iforce *iforce)
1638 +
1639 + again:
1640 + if (iforce->xmit.head == iforce->xmit.tail) {
1641 +- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
1642 ++ iforce_clear_xmit_and_wake(iforce);
1643 + spin_unlock_irqrestore(&iforce->xmit_lock, flags);
1644 + return;
1645 + }
1646 +@@ -64,7 +64,7 @@ again:
1647 + if (test_and_clear_bit(IFORCE_XMIT_AGAIN, iforce->xmit_flags))
1648 + goto again;
1649 +
1650 +- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
1651 ++ iforce_clear_xmit_and_wake(iforce);
1652 +
1653 + spin_unlock_irqrestore(&iforce->xmit_lock, flags);
1654 + }
1655 +@@ -169,7 +169,7 @@ static irqreturn_t iforce_serio_irq(struct serio *serio,
1656 + iforce_serio->cmd_response_len = iforce_serio->len;
1657 +
1658 + /* Signal that command is done */
1659 +- wake_up(&iforce->wait);
1660 ++ wake_up_all(&iforce->wait);
1661 + } else if (likely(iforce->type)) {
1662 + iforce_process_packet(iforce, iforce_serio->id,
1663 + iforce_serio->data_in,
1664 +diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
1665 +index ea58805c480fa..cba92bd590a8d 100644
1666 +--- a/drivers/input/joystick/iforce/iforce-usb.c
1667 ++++ b/drivers/input/joystick/iforce/iforce-usb.c
1668 +@@ -30,7 +30,7 @@ static void __iforce_usb_xmit(struct iforce *iforce)
1669 + spin_lock_irqsave(&iforce->xmit_lock, flags);
1670 +
1671 + if (iforce->xmit.head == iforce->xmit.tail) {
1672 +- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
1673 ++ iforce_clear_xmit_and_wake(iforce);
1674 + spin_unlock_irqrestore(&iforce->xmit_lock, flags);
1675 + return;
1676 + }
1677 +@@ -58,9 +58,9 @@ static void __iforce_usb_xmit(struct iforce *iforce)
1678 + XMIT_INC(iforce->xmit.tail, n);
1679 +
1680 + if ( (n=usb_submit_urb(iforce_usb->out, GFP_ATOMIC)) ) {
1681 +- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
1682 + dev_warn(&iforce_usb->intf->dev,
1683 + "usb_submit_urb failed %d\n", n);
1684 ++ iforce_clear_xmit_and_wake(iforce);
1685 + }
1686 +
1687 + /* The IFORCE_XMIT_RUNNING bit is not cleared here. That's intended.
1688 +@@ -175,15 +175,15 @@ static void iforce_usb_out(struct urb *urb)
1689 + struct iforce *iforce = &iforce_usb->iforce;
1690 +
1691 + if (urb->status) {
1692 +- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
1693 + dev_dbg(&iforce_usb->intf->dev, "urb->status %d, exiting\n",
1694 + urb->status);
1695 ++ iforce_clear_xmit_and_wake(iforce);
1696 + return;
1697 + }
1698 +
1699 + __iforce_usb_xmit(iforce);
1700 +
1701 +- wake_up(&iforce->wait);
1702 ++ wake_up_all(&iforce->wait);
1703 + }
1704 +
1705 + static int iforce_usb_probe(struct usb_interface *intf,
1706 +diff --git a/drivers/input/joystick/iforce/iforce.h b/drivers/input/joystick/iforce/iforce.h
1707 +index 6aa761ebbdf77..9ccb9107ccbef 100644
1708 +--- a/drivers/input/joystick/iforce/iforce.h
1709 ++++ b/drivers/input/joystick/iforce/iforce.h
1710 +@@ -119,6 +119,12 @@ static inline int iforce_get_id_packet(struct iforce *iforce, u8 id,
1711 + response_data, response_len);
1712 + }
1713 +
1714 ++static inline void iforce_clear_xmit_and_wake(struct iforce *iforce)
1715 ++{
1716 ++ clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
1717 ++ wake_up_all(&iforce->wait);
1718 ++}
1719 ++
1720 + /* Public functions */
1721 + /* iforce-main.c */
1722 + int iforce_init_device(struct device *parent, u16 bustype,
1723 +diff --git a/drivers/input/misc/rk805-pwrkey.c b/drivers/input/misc/rk805-pwrkey.c
1724 +index 3fb64dbda1a21..76873aa005b41 100644
1725 +--- a/drivers/input/misc/rk805-pwrkey.c
1726 ++++ b/drivers/input/misc/rk805-pwrkey.c
1727 +@@ -98,6 +98,7 @@ static struct platform_driver rk805_pwrkey_driver = {
1728 + };
1729 + module_platform_driver(rk805_pwrkey_driver);
1730 +
1731 ++MODULE_ALIAS("platform:rk805-pwrkey");
1732 + MODULE_AUTHOR("Joseph Chen <chenjh@××××××××××.com>");
1733 + MODULE_DESCRIPTION("RK805 PMIC Power Key driver");
1734 + MODULE_LICENSE("GPL");
1735 +diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
1736 +index 0834d5f866fd8..39d2b03e26317 100644
1737 +--- a/drivers/media/rc/mceusb.c
1738 ++++ b/drivers/media/rc/mceusb.c
1739 +@@ -1416,42 +1416,37 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
1740 + {
1741 + int ret;
1742 + struct device *dev = ir->dev;
1743 +- char *data;
1744 +-
1745 +- data = kzalloc(USB_CTRL_MSG_SZ, GFP_KERNEL);
1746 +- if (!data) {
1747 +- dev_err(dev, "%s: memory allocation failed!", __func__);
1748 +- return;
1749 +- }
1750 ++ char data[USB_CTRL_MSG_SZ];
1751 +
1752 + /*
1753 + * This is a strange one. Windows issues a set address to the device
1754 + * on the receive control pipe and expect a certain value pair back
1755 + */
1756 +- ret = usb_control_msg(ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0),
1757 +- USB_REQ_SET_ADDRESS, USB_TYPE_VENDOR, 0, 0,
1758 +- data, USB_CTRL_MSG_SZ, 3000);
1759 ++ ret = usb_control_msg_recv(ir->usbdev, 0, USB_REQ_SET_ADDRESS,
1760 ++ USB_DIR_IN | USB_TYPE_VENDOR,
1761 ++ 0, 0, data, USB_CTRL_MSG_SZ, 3000,
1762 ++ GFP_KERNEL);
1763 + dev_dbg(dev, "set address - ret = %d", ret);
1764 + dev_dbg(dev, "set address - data[0] = %d, data[1] = %d",
1765 + data[0], data[1]);
1766 +
1767 + /* set feature: bit rate 38400 bps */
1768 +- ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
1769 +- USB_REQ_SET_FEATURE, USB_TYPE_VENDOR,
1770 +- 0xc04e, 0x0000, NULL, 0, 3000);
1771 ++ ret = usb_control_msg_send(ir->usbdev, 0,
1772 ++ USB_REQ_SET_FEATURE, USB_TYPE_VENDOR,
1773 ++ 0xc04e, 0x0000, NULL, 0, 3000, GFP_KERNEL);
1774 +
1775 + dev_dbg(dev, "set feature - ret = %d", ret);
1776 +
1777 + /* bRequest 4: set char length to 8 bits */
1778 +- ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
1779 +- 4, USB_TYPE_VENDOR,
1780 +- 0x0808, 0x0000, NULL, 0, 3000);
1781 ++ ret = usb_control_msg_send(ir->usbdev, 0,
1782 ++ 4, USB_TYPE_VENDOR,
1783 ++ 0x0808, 0x0000, NULL, 0, 3000, GFP_KERNEL);
1784 + dev_dbg(dev, "set char length - retB = %d", ret);
1785 +
1786 + /* bRequest 2: set handshaking to use DTR/DSR */
1787 +- ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
1788 +- 2, USB_TYPE_VENDOR,
1789 +- 0x0000, 0x0100, NULL, 0, 3000);
1790 ++ ret = usb_control_msg_send(ir->usbdev, 0,
1791 ++ 2, USB_TYPE_VENDOR,
1792 ++ 0x0000, 0x0100, NULL, 0, 3000, GFP_KERNEL);
1793 + dev_dbg(dev, "set handshake - retC = %d", ret);
1794 +
1795 + /* device resume */
1796 +@@ -1459,8 +1454,6 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
1797 +
1798 + /* get hw/sw revision? */
1799 + mce_command_out(ir, GET_REVISION, sizeof(GET_REVISION));
1800 +-
1801 +- kfree(data);
1802 + }
1803 +
1804 + static void mceusb_gen2_init(struct mceusb_dev *ir)
1805 +diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
1806 +index 93ebd174d8487..6e312ac856686 100644
1807 +--- a/drivers/misc/fastrpc.c
1808 ++++ b/drivers/misc/fastrpc.c
1809 +@@ -1943,7 +1943,12 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
1810 + of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions);
1811 +
1812 + spin_lock_irqsave(&cctx->lock, flags);
1813 +- sess = &cctx->session[cctx->sesscount];
1814 ++ if (cctx->sesscount >= FASTRPC_MAX_SESSIONS) {
1815 ++ dev_err(&pdev->dev, "too many sessions\n");
1816 ++ spin_unlock_irqrestore(&cctx->lock, flags);
1817 ++ return -ENOSPC;
1818 ++ }
1819 ++ sess = &cctx->session[cctx->sesscount++];
1820 + sess->used = false;
1821 + sess->valid = true;
1822 + sess->dev = dev;
1823 +@@ -1956,13 +1961,12 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
1824 + struct fastrpc_session_ctx *dup_sess;
1825 +
1826 + for (i = 1; i < sessions; i++) {
1827 +- if (cctx->sesscount++ >= FASTRPC_MAX_SESSIONS)
1828 ++ if (cctx->sesscount >= FASTRPC_MAX_SESSIONS)
1829 + break;
1830 +- dup_sess = &cctx->session[cctx->sesscount];
1831 ++ dup_sess = &cctx->session[cctx->sesscount++];
1832 + memcpy(dup_sess, sess, sizeof(*dup_sess));
1833 + }
1834 + }
1835 +- cctx->sesscount++;
1836 + spin_unlock_irqrestore(&cctx->lock, flags);
1837 + rc = dma_set_mask(dev, DMA_BIT_MASK(32));
1838 + if (rc) {
1839 +diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
1840 +index c5f1df6ce4c0a..5e4e2d2182d91 100644
1841 +--- a/drivers/mmc/core/sd.c
1842 ++++ b/drivers/mmc/core/sd.c
1843 +@@ -949,15 +949,16 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
1844 +
1845 + /* Erase init depends on CSD and SSR */
1846 + mmc_init_erase(card);
1847 +-
1848 +- /*
1849 +- * Fetch switch information from card.
1850 +- */
1851 +- err = mmc_read_switch(card);
1852 +- if (err)
1853 +- return err;
1854 + }
1855 +
1856 ++ /*
1857 ++ * Fetch switch information from card. Note, sd3_bus_mode can change if
1858 ++ * voltage switch outcome changes, so do this always.
1859 ++ */
1860 ++ err = mmc_read_switch(card);
1861 ++ if (err)
1862 ++ return err;
1863 ++
1864 + /*
1865 + * For SPI, enable CRC as appropriate.
1866 + * This CRC enable is located AFTER the reading of the
1867 +@@ -1480,26 +1481,15 @@ retry:
1868 + if (!v18_fixup_failed && !mmc_host_is_spi(host) && mmc_host_uhs(host) &&
1869 + mmc_sd_card_using_v18(card) &&
1870 + host->ios.signal_voltage != MMC_SIGNAL_VOLTAGE_180) {
1871 +- /*
1872 +- * Re-read switch information in case it has changed since
1873 +- * oldcard was initialized.
1874 +- */
1875 +- if (oldcard) {
1876 +- err = mmc_read_switch(card);
1877 +- if (err)
1878 +- goto free_card;
1879 +- }
1880 +- if (mmc_sd_card_using_v18(card)) {
1881 +- if (mmc_host_set_uhs_voltage(host) ||
1882 +- mmc_sd_init_uhs_card(card)) {
1883 +- v18_fixup_failed = true;
1884 +- mmc_power_cycle(host, ocr);
1885 +- if (!oldcard)
1886 +- mmc_remove_card(card);
1887 +- goto retry;
1888 +- }
1889 +- goto done;
1890 ++ if (mmc_host_set_uhs_voltage(host) ||
1891 ++ mmc_sd_init_uhs_card(card)) {
1892 ++ v18_fixup_failed = true;
1893 ++ mmc_power_cycle(host, ocr);
1894 ++ if (!oldcard)
1895 ++ mmc_remove_card(card);
1896 ++ goto retry;
1897 + }
1898 ++ goto cont;
1899 + }
1900 +
1901 + /* Initialization sequence for UHS-I cards */
1902 +@@ -1534,7 +1524,7 @@ retry:
1903 + mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
1904 + }
1905 + }
1906 +-
1907 ++cont:
1908 + if (!oldcard) {
1909 + /* Read/parse the extension registers. */
1910 + err = sd_read_ext_regs(card);
1911 +@@ -1566,7 +1556,7 @@ retry:
1912 + err = -EINVAL;
1913 + goto free_card;
1914 + }
1915 +-done:
1916 ++
1917 + host->card = card;
1918 + return 0;
1919 +
1920 +diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c
1921 +index 3887ed33c5fe2..fa622639d6401 100644
1922 +--- a/drivers/net/dsa/xrs700x/xrs700x.c
1923 ++++ b/drivers/net/dsa/xrs700x/xrs700x.c
1924 +@@ -109,6 +109,7 @@ static void xrs700x_read_port_counters(struct xrs700x *priv, int port)
1925 + {
1926 + struct xrs700x_port *p = &priv->ports[port];
1927 + struct rtnl_link_stats64 stats;
1928 ++ unsigned long flags;
1929 + int i;
1930 +
1931 + memset(&stats, 0, sizeof(stats));
1932 +@@ -138,9 +139,9 @@ static void xrs700x_read_port_counters(struct xrs700x *priv, int port)
1933 + */
1934 + stats.rx_packets += stats.multicast;
1935 +
1936 +- u64_stats_update_begin(&p->syncp);
1937 ++ flags = u64_stats_update_begin_irqsave(&p->syncp);
1938 + p->stats64 = stats;
1939 +- u64_stats_update_end(&p->syncp);
1940 ++ u64_stats_update_end_irqrestore(&p->syncp, flags);
1941 +
1942 + mutex_unlock(&p->mib_mutex);
1943 + }
1944 +diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
1945 +index 9e6de2f968fa3..6dae768671e3d 100644
1946 +--- a/drivers/net/ethernet/cortina/gemini.c
1947 ++++ b/drivers/net/ethernet/cortina/gemini.c
1948 +@@ -1919,7 +1919,7 @@ static void gmac_get_stats64(struct net_device *netdev,
1949 +
1950 + /* Racing with RX NAPI */
1951 + do {
1952 +- start = u64_stats_fetch_begin(&port->rx_stats_syncp);
1953 ++ start = u64_stats_fetch_begin_irq(&port->rx_stats_syncp);
1954 +
1955 + stats->rx_packets = port->stats.rx_packets;
1956 + stats->rx_bytes = port->stats.rx_bytes;
1957 +@@ -1931,11 +1931,11 @@ static void gmac_get_stats64(struct net_device *netdev,
1958 + stats->rx_crc_errors = port->stats.rx_crc_errors;
1959 + stats->rx_frame_errors = port->stats.rx_frame_errors;
1960 +
1961 +- } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start));
1962 ++ } while (u64_stats_fetch_retry_irq(&port->rx_stats_syncp, start));
1963 +
1964 + /* Racing with MIB and TX completion interrupts */
1965 + do {
1966 +- start = u64_stats_fetch_begin(&port->ir_stats_syncp);
1967 ++ start = u64_stats_fetch_begin_irq(&port->ir_stats_syncp);
1968 +
1969 + stats->tx_errors = port->stats.tx_errors;
1970 + stats->tx_packets = port->stats.tx_packets;
1971 +@@ -1945,15 +1945,15 @@ static void gmac_get_stats64(struct net_device *netdev,
1972 + stats->rx_missed_errors = port->stats.rx_missed_errors;
1973 + stats->rx_fifo_errors = port->stats.rx_fifo_errors;
1974 +
1975 +- } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start));
1976 ++ } while (u64_stats_fetch_retry_irq(&port->ir_stats_syncp, start));
1977 +
1978 + /* Racing with hard_start_xmit */
1979 + do {
1980 +- start = u64_stats_fetch_begin(&port->tx_stats_syncp);
1981 ++ start = u64_stats_fetch_begin_irq(&port->tx_stats_syncp);
1982 +
1983 + stats->tx_dropped = port->stats.tx_dropped;
1984 +
1985 +- } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start));
1986 ++ } while (u64_stats_fetch_retry_irq(&port->tx_stats_syncp, start));
1987 +
1988 + stats->rx_dropped += stats->rx_missed_errors;
1989 + }
1990 +@@ -2031,18 +2031,18 @@ static void gmac_get_ethtool_stats(struct net_device *netdev,
1991 + /* Racing with MIB interrupt */
1992 + do {
1993 + p = values;
1994 +- start = u64_stats_fetch_begin(&port->ir_stats_syncp);
1995 ++ start = u64_stats_fetch_begin_irq(&port->ir_stats_syncp);
1996 +
1997 + for (i = 0; i < RX_STATS_NUM; i++)
1998 + *p++ = port->hw_stats[i];
1999 +
2000 +- } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start));
2001 ++ } while (u64_stats_fetch_retry_irq(&port->ir_stats_syncp, start));
2002 + values = p;
2003 +
2004 + /* Racing with RX NAPI */
2005 + do {
2006 + p = values;
2007 +- start = u64_stats_fetch_begin(&port->rx_stats_syncp);
2008 ++ start = u64_stats_fetch_begin_irq(&port->rx_stats_syncp);
2009 +
2010 + for (i = 0; i < RX_STATUS_NUM; i++)
2011 + *p++ = port->rx_stats[i];
2012 +@@ -2050,13 +2050,13 @@ static void gmac_get_ethtool_stats(struct net_device *netdev,
2013 + *p++ = port->rx_csum_stats[i];
2014 + *p++ = port->rx_napi_exits;
2015 +
2016 +- } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start));
2017 ++ } while (u64_stats_fetch_retry_irq(&port->rx_stats_syncp, start));
2018 + values = p;
2019 +
2020 + /* Racing with TX start_xmit */
2021 + do {
2022 + p = values;
2023 +- start = u64_stats_fetch_begin(&port->tx_stats_syncp);
2024 ++ start = u64_stats_fetch_begin_irq(&port->tx_stats_syncp);
2025 +
2026 + for (i = 0; i < TX_MAX_FRAGS; i++) {
2027 + *values++ = port->tx_frag_stats[i];
2028 +@@ -2065,7 +2065,7 @@ static void gmac_get_ethtool_stats(struct net_device *netdev,
2029 + *values++ = port->tx_frags_linearized;
2030 + *values++ = port->tx_hw_csummed;
2031 +
2032 +- } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start));
2033 ++ } while (u64_stats_fetch_retry_irq(&port->tx_stats_syncp, start));
2034 + }
2035 +
2036 + static int gmac_get_ksettings(struct net_device *netdev,
2037 +diff --git a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
2038 +index 8708e2895946d..6b125ed04bbad 100644
2039 +--- a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
2040 ++++ b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
2041 +@@ -205,9 +205,9 @@ struct funeth_rxq {
2042 +
2043 + #define FUN_QSTAT_READ(q, seq, stats_copy) \
2044 + do { \
2045 +- seq = u64_stats_fetch_begin(&(q)->syncp); \
2046 ++ seq = u64_stats_fetch_begin_irq(&(q)->syncp); \
2047 + stats_copy = (q)->stats; \
2048 +- } while (u64_stats_fetch_retry(&(q)->syncp, (seq)))
2049 ++ } while (u64_stats_fetch_retry_irq(&(q)->syncp, (seq)))
2050 +
2051 + #define FUN_INT_NAME_LEN (IFNAMSIZ + 16)
2052 +
2053 +diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
2054 +index 50b384910c839..7b9a2d9d96243 100644
2055 +--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
2056 ++++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
2057 +@@ -177,14 +177,14 @@ gve_get_ethtool_stats(struct net_device *netdev,
2058 + struct gve_rx_ring *rx = &priv->rx[ring];
2059 +
2060 + start =
2061 +- u64_stats_fetch_begin(&priv->rx[ring].statss);
2062 ++ u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
2063 + tmp_rx_pkts = rx->rpackets;
2064 + tmp_rx_bytes = rx->rbytes;
2065 + tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
2066 + tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
2067 + tmp_rx_desc_err_dropped_pkt =
2068 + rx->rx_desc_err_dropped_pkt;
2069 +- } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
2070 ++ } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
2071 + start));
2072 + rx_pkts += tmp_rx_pkts;
2073 + rx_bytes += tmp_rx_bytes;
2074 +@@ -198,10 +198,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
2075 + if (priv->tx) {
2076 + do {
2077 + start =
2078 +- u64_stats_fetch_begin(&priv->tx[ring].statss);
2079 ++ u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
2080 + tmp_tx_pkts = priv->tx[ring].pkt_done;
2081 + tmp_tx_bytes = priv->tx[ring].bytes_done;
2082 +- } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
2083 ++ } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
2084 + start));
2085 + tx_pkts += tmp_tx_pkts;
2086 + tx_bytes += tmp_tx_bytes;
2087 +@@ -259,13 +259,13 @@ gve_get_ethtool_stats(struct net_device *netdev,
2088 + data[i++] = rx->fill_cnt - rx->cnt;
2089 + do {
2090 + start =
2091 +- u64_stats_fetch_begin(&priv->rx[ring].statss);
2092 ++ u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
2093 + tmp_rx_bytes = rx->rbytes;
2094 + tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
2095 + tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
2096 + tmp_rx_desc_err_dropped_pkt =
2097 + rx->rx_desc_err_dropped_pkt;
2098 +- } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
2099 ++ } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
2100 + start));
2101 + data[i++] = tmp_rx_bytes;
2102 + data[i++] = rx->rx_cont_packet_cnt;
2103 +@@ -331,9 +331,9 @@ gve_get_ethtool_stats(struct net_device *netdev,
2104 + }
2105 + do {
2106 + start =
2107 +- u64_stats_fetch_begin(&priv->tx[ring].statss);
2108 ++ u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
2109 + tmp_tx_bytes = tx->bytes_done;
2110 +- } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
2111 ++ } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
2112 + start));
2113 + data[i++] = tmp_tx_bytes;
2114 + data[i++] = tx->wake_queue;
2115 +diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
2116 +index 6cafee55efc32..044db3ebb071c 100644
2117 +--- a/drivers/net/ethernet/google/gve/gve_main.c
2118 ++++ b/drivers/net/ethernet/google/gve/gve_main.c
2119 +@@ -51,10 +51,10 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
2120 + for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
2121 + do {
2122 + start =
2123 +- u64_stats_fetch_begin(&priv->rx[ring].statss);
2124 ++ u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
2125 + packets = priv->rx[ring].rpackets;
2126 + bytes = priv->rx[ring].rbytes;
2127 +- } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
2128 ++ } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
2129 + start));
2130 + s->rx_packets += packets;
2131 + s->rx_bytes += bytes;
2132 +@@ -64,10 +64,10 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
2133 + for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
2134 + do {
2135 + start =
2136 +- u64_stats_fetch_begin(&priv->tx[ring].statss);
2137 ++ u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
2138 + packets = priv->tx[ring].pkt_done;
2139 + bytes = priv->tx[ring].bytes_done;
2140 +- } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
2141 ++ } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
2142 + start));
2143 + s->tx_packets += packets;
2144 + s->tx_bytes += bytes;
2145 +@@ -1274,9 +1274,9 @@ void gve_handle_report_stats(struct gve_priv *priv)
2146 + }
2147 +
2148 + do {
2149 +- start = u64_stats_fetch_begin(&priv->tx[idx].statss);
2150 ++ start = u64_stats_fetch_begin_irq(&priv->tx[idx].statss);
2151 + tx_bytes = priv->tx[idx].bytes_done;
2152 +- } while (u64_stats_fetch_retry(&priv->tx[idx].statss, start));
2153 ++ } while (u64_stats_fetch_retry_irq(&priv->tx[idx].statss, start));
2154 + stats[stats_idx++] = (struct stats) {
2155 + .stat_name = cpu_to_be32(TX_WAKE_CNT),
2156 + .value = cpu_to_be64(priv->tx[idx].wake_queue),
2157 +diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
2158 +index a866bea651103..e5828a658caf4 100644
2159 +--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
2160 ++++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
2161 +@@ -74,14 +74,14 @@ void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
2162 + unsigned int start;
2163 +
2164 + do {
2165 +- start = u64_stats_fetch_begin(&rxq_stats->syncp);
2166 ++ start = u64_stats_fetch_begin_irq(&rxq_stats->syncp);
2167 + stats->pkts = rxq_stats->pkts;
2168 + stats->bytes = rxq_stats->bytes;
2169 + stats->errors = rxq_stats->csum_errors +
2170 + rxq_stats->other_errors;
2171 + stats->csum_errors = rxq_stats->csum_errors;
2172 + stats->other_errors = rxq_stats->other_errors;
2173 +- } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
2174 ++ } while (u64_stats_fetch_retry_irq(&rxq_stats->syncp, start));
2175 + }
2176 +
2177 + /**
2178 +diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
2179 +index 5051cdff2384b..3b6c7b5857376 100644
2180 +--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
2181 ++++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
2182 +@@ -99,14 +99,14 @@ void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats)
2183 + unsigned int start;
2184 +
2185 + do {
2186 +- start = u64_stats_fetch_begin(&txq_stats->syncp);
2187 ++ start = u64_stats_fetch_begin_irq(&txq_stats->syncp);
2188 + stats->pkts = txq_stats->pkts;
2189 + stats->bytes = txq_stats->bytes;
2190 + stats->tx_busy = txq_stats->tx_busy;
2191 + stats->tx_wake = txq_stats->tx_wake;
2192 + stats->tx_dropped = txq_stats->tx_dropped;
2193 + stats->big_frags_pkts = txq_stats->big_frags_pkts;
2194 +- } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
2195 ++ } while (u64_stats_fetch_retry_irq(&txq_stats->syncp, start));
2196 + }
2197 +
2198 + /**
2199 +diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
2200 +index 5fdf9b7179f55..5a1027b072155 100644
2201 +--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
2202 ++++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
2203 +@@ -75,6 +75,7 @@ struct mlxbf_gige {
2204 + struct net_device *netdev;
2205 + struct platform_device *pdev;
2206 + void __iomem *mdio_io;
2207 ++ void __iomem *clk_io;
2208 + struct mii_bus *mdiobus;
2209 + spinlock_t lock; /* for packet processing indices */
2210 + u16 rx_q_entries;
2211 +@@ -137,7 +138,8 @@ enum mlxbf_gige_res {
2212 + MLXBF_GIGE_RES_MDIO9,
2213 + MLXBF_GIGE_RES_GPIO0,
2214 + MLXBF_GIGE_RES_LLU,
2215 +- MLXBF_GIGE_RES_PLU
2216 ++ MLXBF_GIGE_RES_PLU,
2217 ++ MLXBF_GIGE_RES_CLK
2218 + };
2219 +
2220 + /* Version of register data returned by mlxbf_gige_get_regs() */
2221 +diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
2222 +index 2e6c1b7af0964..85155cd9405c5 100644
2223 +--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
2224 ++++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
2225 +@@ -22,10 +22,23 @@
2226 + #include <linux/property.h>
2227 +
2228 + #include "mlxbf_gige.h"
2229 ++#include "mlxbf_gige_regs.h"
2230 +
2231 + #define MLXBF_GIGE_MDIO_GW_OFFSET 0x0
2232 + #define MLXBF_GIGE_MDIO_CFG_OFFSET 0x4
2233 +
2234 ++#define MLXBF_GIGE_MDIO_FREQ_REFERENCE 156250000ULL
2235 ++#define MLXBF_GIGE_MDIO_COREPLL_CONST 16384ULL
2236 ++#define MLXBF_GIGE_MDC_CLK_NS 400
2237 ++#define MLXBF_GIGE_MDIO_PLL_I1CLK_REG1 0x4
2238 ++#define MLXBF_GIGE_MDIO_PLL_I1CLK_REG2 0x8
2239 ++#define MLXBF_GIGE_MDIO_CORE_F_SHIFT 0
2240 ++#define MLXBF_GIGE_MDIO_CORE_F_MASK GENMASK(25, 0)
2241 ++#define MLXBF_GIGE_MDIO_CORE_R_SHIFT 26
2242 ++#define MLXBF_GIGE_MDIO_CORE_R_MASK GENMASK(31, 26)
2243 ++#define MLXBF_GIGE_MDIO_CORE_OD_SHIFT 0
2244 ++#define MLXBF_GIGE_MDIO_CORE_OD_MASK GENMASK(3, 0)
2245 ++
2246 + /* Support clause 22 */
2247 + #define MLXBF_GIGE_MDIO_CL22_ST1 0x1
2248 + #define MLXBF_GIGE_MDIO_CL22_WRITE 0x1
2249 +@@ -50,27 +63,76 @@
2250 + #define MLXBF_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK GENMASK(23, 16)
2251 + #define MLXBF_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK GENMASK(31, 24)
2252 +
2253 ++#define MLXBF_GIGE_MDIO_CFG_VAL (FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_MODE_MASK, 1) | \
2254 ++ FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO3_3_MASK, 1) | \
2255 ++ FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK, 1) | \
2256 ++ FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK, 6) | \
2257 ++ FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK, 13))
2258 ++
2259 ++#define MLXBF_GIGE_BF2_COREPLL_ADDR 0x02800c30
2260 ++#define MLXBF_GIGE_BF2_COREPLL_SIZE 0x0000000c
2261 ++
2262 ++static struct resource corepll_params[] = {
2263 ++ [MLXBF_GIGE_VERSION_BF2] = {
2264 ++ .start = MLXBF_GIGE_BF2_COREPLL_ADDR,
2265 ++ .end = MLXBF_GIGE_BF2_COREPLL_ADDR + MLXBF_GIGE_BF2_COREPLL_SIZE - 1,
2266 ++ .name = "COREPLL_RES"
2267 ++ },
2268 ++};
2269 ++
2270 ++/* Returns core clock i1clk in Hz */
2271 ++static u64 calculate_i1clk(struct mlxbf_gige *priv)
2272 ++{
2273 ++ u8 core_od, core_r;
2274 ++ u64 freq_output;
2275 ++ u32 reg1, reg2;
2276 ++ u32 core_f;
2277 ++
2278 ++ reg1 = readl(priv->clk_io + MLXBF_GIGE_MDIO_PLL_I1CLK_REG1);
2279 ++ reg2 = readl(priv->clk_io + MLXBF_GIGE_MDIO_PLL_I1CLK_REG2);
2280 ++
2281 ++ core_f = (reg1 & MLXBF_GIGE_MDIO_CORE_F_MASK) >>
2282 ++ MLXBF_GIGE_MDIO_CORE_F_SHIFT;
2283 ++ core_r = (reg1 & MLXBF_GIGE_MDIO_CORE_R_MASK) >>
2284 ++ MLXBF_GIGE_MDIO_CORE_R_SHIFT;
2285 ++ core_od = (reg2 & MLXBF_GIGE_MDIO_CORE_OD_MASK) >>
2286 ++ MLXBF_GIGE_MDIO_CORE_OD_SHIFT;
2287 ++
2288 ++ /* Compute PLL output frequency as follow:
2289 ++ *
2290 ++ * CORE_F / 16384
2291 ++ * freq_output = freq_reference * ----------------------------
2292 ++ * (CORE_R + 1) * (CORE_OD + 1)
2293 ++ */
2294 ++ freq_output = div_u64((MLXBF_GIGE_MDIO_FREQ_REFERENCE * core_f),
2295 ++ MLXBF_GIGE_MDIO_COREPLL_CONST);
2296 ++ freq_output = div_u64(freq_output, (core_r + 1) * (core_od + 1));
2297 ++
2298 ++ return freq_output;
2299 ++}
2300 ++
2301 + /* Formula for encoding the MDIO period. The encoded value is
2302 + * passed to the MDIO config register.
2303 + *
2304 +- * mdc_clk = 2*(val + 1)*i1clk
2305 ++ * mdc_clk = 2*(val + 1)*(core clock in sec)
2306 + *
2307 +- * 400 ns = 2*(val + 1)*(((1/430)*1000) ns)
2308 ++ * i1clk is in Hz:
2309 ++ * 400 ns = 2*(val + 1)*(1/i1clk)
2310 + *
2311 +- * val = (((400 * 430 / 1000) / 2) - 1)
2312 ++ * val = (((400/10^9) / (1/i1clk) / 2) - 1)
2313 ++ * val = (400/2 * i1clk)/10^9 - 1
2314 + */
2315 +-#define MLXBF_GIGE_I1CLK_MHZ 430
2316 +-#define MLXBF_GIGE_MDC_CLK_NS 400
2317 ++static u8 mdio_period_map(struct mlxbf_gige *priv)
2318 ++{
2319 ++ u8 mdio_period;
2320 ++ u64 i1clk;
2321 +
2322 +-#define MLXBF_GIGE_MDIO_PERIOD (((MLXBF_GIGE_MDC_CLK_NS * MLXBF_GIGE_I1CLK_MHZ / 1000) / 2) - 1)
2323 ++ i1clk = calculate_i1clk(priv);
2324 +
2325 +-#define MLXBF_GIGE_MDIO_CFG_VAL (FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_MODE_MASK, 1) | \
2326 +- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO3_3_MASK, 1) | \
2327 +- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK, 1) | \
2328 +- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDC_PERIOD_MASK, \
2329 +- MLXBF_GIGE_MDIO_PERIOD) | \
2330 +- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK, 6) | \
2331 +- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK, 13))
2332 ++ mdio_period = div_u64((MLXBF_GIGE_MDC_CLK_NS >> 1) * i1clk, 1000000000) - 1;
2333 ++
2334 ++ return mdio_period;
2335 ++}
2336 +
2337 + static u32 mlxbf_gige_mdio_create_cmd(u16 data, int phy_add,
2338 + int phy_reg, u32 opcode)
2339 +@@ -124,9 +186,9 @@ static int mlxbf_gige_mdio_write(struct mii_bus *bus, int phy_add,
2340 + int phy_reg, u16 val)
2341 + {
2342 + struct mlxbf_gige *priv = bus->priv;
2343 ++ u32 temp;
2344 + u32 cmd;
2345 + int ret;
2346 +- u32 temp;
2347 +
2348 + if (phy_reg & MII_ADDR_C45)
2349 + return -EOPNOTSUPP;
2350 +@@ -144,18 +206,44 @@ static int mlxbf_gige_mdio_write(struct mii_bus *bus, int phy_add,
2351 + return ret;
2352 + }
2353 +
2354 ++static void mlxbf_gige_mdio_cfg(struct mlxbf_gige *priv)
2355 ++{
2356 ++ u8 mdio_period;
2357 ++ u32 val;
2358 ++
2359 ++ mdio_period = mdio_period_map(priv);
2360 ++
2361 ++ val = MLXBF_GIGE_MDIO_CFG_VAL;
2362 ++ val |= FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDC_PERIOD_MASK, mdio_period);
2363 ++ writel(val, priv->mdio_io + MLXBF_GIGE_MDIO_CFG_OFFSET);
2364 ++}
2365 ++
2366 + int mlxbf_gige_mdio_probe(struct platform_device *pdev, struct mlxbf_gige *priv)
2367 + {
2368 + struct device *dev = &pdev->dev;
2369 ++ struct resource *res;
2370 + int ret;
2371 +
2372 + priv->mdio_io = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_MDIO9);
2373 + if (IS_ERR(priv->mdio_io))
2374 + return PTR_ERR(priv->mdio_io);
2375 +
2376 +- /* Configure mdio parameters */
2377 +- writel(MLXBF_GIGE_MDIO_CFG_VAL,
2378 +- priv->mdio_io + MLXBF_GIGE_MDIO_CFG_OFFSET);
2379 ++ /* clk resource shared with other drivers so cannot use
2380 ++ * devm_platform_ioremap_resource
2381 ++ */
2382 ++ res = platform_get_resource(pdev, IORESOURCE_MEM, MLXBF_GIGE_RES_CLK);
2383 ++ if (!res) {
2384 ++ /* For backward compatibility with older ACPI tables, also keep
2385 ++ * CLK resource internal to the driver.
2386 ++ */
2387 ++ res = &corepll_params[MLXBF_GIGE_VERSION_BF2];
2388 ++ }
2389 ++
2390 ++ priv->clk_io = devm_ioremap(dev, res->start, resource_size(res));
2391 ++ if (IS_ERR(priv->clk_io))
2392 ++ return PTR_ERR(priv->clk_io);
2393 ++
2394 ++ mlxbf_gige_mdio_cfg(priv);
2395 +
2396 + priv->mdiobus = devm_mdiobus_alloc(dev);
2397 + if (!priv->mdiobus) {
2398 +diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
2399 +index 5fb33c9294bf9..7be3a793984d5 100644
2400 +--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
2401 ++++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
2402 +@@ -8,6 +8,8 @@
2403 + #ifndef __MLXBF_GIGE_REGS_H__
2404 + #define __MLXBF_GIGE_REGS_H__
2405 +
2406 ++#define MLXBF_GIGE_VERSION 0x0000
2407 ++#define MLXBF_GIGE_VERSION_BF2 0x0
2408 + #define MLXBF_GIGE_STATUS 0x0010
2409 + #define MLXBF_GIGE_STATUS_READY BIT(0)
2410 + #define MLXBF_GIGE_INT_STATUS 0x0028
2411 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
2412 +index fe663b0ab7086..68d87e61bdc05 100644
2413 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
2414 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
2415 +@@ -423,7 +423,8 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
2416 +
2417 + parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
2418 + ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
2419 +- 0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0);
2420 ++ 0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0,
2421 ++ 0);
2422 +
2423 + rt = ip_route_output_key(tun->net, &fl4);
2424 + if (IS_ERR(rt))
2425 +diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
2426 +index 6dea7f8c14814..51f8a08163777 100644
2427 +--- a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
2428 ++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
2429 +@@ -425,7 +425,8 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx)
2430 + lan966x_ifh_get_src_port(skb->data, &src_port);
2431 + lan966x_ifh_get_timestamp(skb->data, &timestamp);
2432 +
2433 +- WARN_ON(src_port >= lan966x->num_phys_ports);
2434 ++ if (WARN_ON(src_port >= lan966x->num_phys_ports))
2435 ++ goto free_skb;
2436 +
2437 + skb->dev = lan966x->ports[src_port]->dev;
2438 + skb_pull(skb, IFH_LEN * sizeof(u32));
2439 +@@ -449,6 +450,8 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx)
2440 +
2441 + return skb;
2442 +
2443 ++free_skb:
2444 ++ kfree_skb(skb);
2445 + unmap_page:
2446 + dma_unmap_page(lan966x->dev, (dma_addr_t)db->dataptr,
2447 + FDMA_DCB_STATUS_BLOCKL(db->status),
2448 +diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
2449 +index 304f84aadc36b..21844beba72df 100644
2450 +--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
2451 ++++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
2452 +@@ -113,6 +113,8 @@ static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap)
2453 + /* This assumes STATUS_WORD_POS == 1, Status
2454 + * just after last data
2455 + */
2456 ++ if (!byte_swap)
2457 ++ val = ntohl((__force __be32)val);
2458 + byte_cnt -= (4 - XTR_VALID_BYTES(val));
2459 + eof_flag = true;
2460 + break;
2461 +diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
2462 +index 3206ba83b1aaa..de2ef5bf8c694 100644
2463 +--- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
2464 ++++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
2465 +@@ -127,10 +127,11 @@ static int nfp_policer_validate(const struct flow_action *action,
2466 + return -EOPNOTSUPP;
2467 + }
2468 +
2469 +- if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
2470 ++ if (act->police.notexceed.act_id != FLOW_ACTION_CONTINUE &&
2471 ++ act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
2472 + act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
2473 + NL_SET_ERR_MSG_MOD(extack,
2474 +- "Offload not supported when conform action is not pipe or ok");
2475 ++ "Offload not supported when conform action is not continue, pipe or ok");
2476 + return -EOPNOTSUPP;
2477 + }
2478 +
2479 +diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
2480 +index 4e56a99087fab..32d46f07ea851 100644
2481 +--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
2482 ++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
2483 +@@ -1629,21 +1629,21 @@ static void nfp_net_stat64(struct net_device *netdev,
2484 + unsigned int start;
2485 +
2486 + do {
2487 +- start = u64_stats_fetch_begin(&r_vec->rx_sync);
2488 ++ start = u64_stats_fetch_begin_irq(&r_vec->rx_sync);
2489 + data[0] = r_vec->rx_pkts;
2490 + data[1] = r_vec->rx_bytes;
2491 + data[2] = r_vec->rx_drops;
2492 +- } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
2493 ++ } while (u64_stats_fetch_retry_irq(&r_vec->rx_sync, start));
2494 + stats->rx_packets += data[0];
2495 + stats->rx_bytes += data[1];
2496 + stats->rx_dropped += data[2];
2497 +
2498 + do {
2499 +- start = u64_stats_fetch_begin(&r_vec->tx_sync);
2500 ++ start = u64_stats_fetch_begin_irq(&r_vec->tx_sync);
2501 + data[0] = r_vec->tx_pkts;
2502 + data[1] = r_vec->tx_bytes;
2503 + data[2] = r_vec->tx_errors;
2504 +- } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
2505 ++ } while (u64_stats_fetch_retry_irq(&r_vec->tx_sync, start));
2506 + stats->tx_packets += data[0];
2507 + stats->tx_bytes += data[1];
2508 + stats->tx_errors += data[2];
2509 +diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
2510 +index e6ee45afd80c7..2d7d30ec54301 100644
2511 +--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
2512 ++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
2513 +@@ -494,7 +494,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
2514 + unsigned int start;
2515 +
2516 + do {
2517 +- start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync);
2518 ++ start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].rx_sync);
2519 + data[0] = nn->r_vecs[i].rx_pkts;
2520 + tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
2521 + tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
2522 +@@ -502,10 +502,10 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
2523 + tmp[3] = nn->r_vecs[i].hw_csum_rx_error;
2524 + tmp[4] = nn->r_vecs[i].rx_replace_buf_alloc_fail;
2525 + tmp[5] = nn->r_vecs[i].hw_tls_rx;
2526 +- } while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start));
2527 ++ } while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].rx_sync, start));
2528 +
2529 + do {
2530 +- start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
2531 ++ start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].tx_sync);
2532 + data[1] = nn->r_vecs[i].tx_pkts;
2533 + data[2] = nn->r_vecs[i].tx_busy;
2534 + tmp[6] = nn->r_vecs[i].hw_csum_tx;
2535 +@@ -515,7 +515,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
2536 + tmp[10] = nn->r_vecs[i].hw_tls_tx;
2537 + tmp[11] = nn->r_vecs[i].tls_tx_fallback;
2538 + tmp[12] = nn->r_vecs[i].tls_tx_no_fallback;
2539 +- } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
2540 ++ } while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].tx_sync, start));
2541 +
2542 + data += NN_RVEC_PER_Q_STATS;
2543 +
2544 +diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
2545 +index bc70c6abd6a5b..58cf7cc54f408 100644
2546 +--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
2547 ++++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
2548 +@@ -1273,7 +1273,7 @@ static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
2549 + bool removing;
2550 + int err = 0;
2551 +
2552 +- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2553 ++ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
2554 + if (!entry)
2555 + return -ENOMEM;
2556 +
2557 +diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
2558 +index 3bf20211cceb4..3829c2805b16c 100644
2559 +--- a/drivers/net/ethernet/smsc/smsc911x.c
2560 ++++ b/drivers/net/ethernet/smsc/smsc911x.c
2561 +@@ -1037,6 +1037,8 @@ static int smsc911x_mii_probe(struct net_device *dev)
2562 + return ret;
2563 + }
2564 +
2565 ++ /* Indicate that the MAC is responsible for managing PHY PM */
2566 ++ phydev->mac_managed_pm = true;
2567 + phy_attached_info(phydev);
2568 +
2569 + phy_set_max_speed(phydev, SPEED_100);
2570 +@@ -2587,6 +2589,8 @@ static int smsc911x_suspend(struct device *dev)
2571 + if (netif_running(ndev)) {
2572 + netif_stop_queue(ndev);
2573 + netif_device_detach(ndev);
2574 ++ if (!device_may_wakeup(dev))
2575 ++ phy_stop(ndev->phydev);
2576 + }
2577 +
2578 + /* enable wake on LAN, energy detection and the external PME
2579 +@@ -2628,6 +2632,8 @@ static int smsc911x_resume(struct device *dev)
2580 + if (netif_running(ndev)) {
2581 + netif_device_attach(ndev);
2582 + netif_start_queue(ndev);
2583 ++ if (!device_may_wakeup(dev))
2584 ++ phy_start(ndev->phydev);
2585 + }
2586 +
2587 + return 0;
2588 +diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
2589 +index 6afdf1622944e..5cf218c674a5a 100644
2590 +--- a/drivers/net/ieee802154/adf7242.c
2591 ++++ b/drivers/net/ieee802154/adf7242.c
2592 +@@ -1310,10 +1310,11 @@ static void adf7242_remove(struct spi_device *spi)
2593 +
2594 + debugfs_remove_recursive(lp->debugfs_root);
2595 +
2596 ++ ieee802154_unregister_hw(lp->hw);
2597 ++
2598 + cancel_delayed_work_sync(&lp->work);
2599 + destroy_workqueue(lp->wqueue);
2600 +
2601 +- ieee802154_unregister_hw(lp->hw);
2602 + mutex_destroy(&lp->bmux);
2603 + ieee802154_free_hw(lp->hw);
2604 + }
2605 +diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
2606 +index e470e3398abc2..9a1a5b2036240 100644
2607 +--- a/drivers/net/netdevsim/netdev.c
2608 ++++ b/drivers/net/netdevsim/netdev.c
2609 +@@ -67,10 +67,10 @@ nsim_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
2610 + unsigned int start;
2611 +
2612 + do {
2613 +- start = u64_stats_fetch_begin(&ns->syncp);
2614 ++ start = u64_stats_fetch_begin_irq(&ns->syncp);
2615 + stats->tx_bytes = ns->tx_bytes;
2616 + stats->tx_packets = ns->tx_packets;
2617 +- } while (u64_stats_fetch_retry(&ns->syncp, start));
2618 ++ } while (u64_stats_fetch_retry_irq(&ns->syncp, start));
2619 + }
2620 +
2621 + static int
2622 +diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
2623 +index 22139901f01c7..34483a4bd688a 100644
2624 +--- a/drivers/net/phy/micrel.c
2625 ++++ b/drivers/net/phy/micrel.c
2626 +@@ -2838,12 +2838,18 @@ static int lan8814_config_init(struct phy_device *phydev)
2627 + return 0;
2628 + }
2629 +
2630 ++/* It is expected that there will not be any 'lan8814_take_coma_mode'
2631 ++ * function called in suspend. Because the GPIO line can be shared, so if one of
2632 ++ * the phys goes back in coma mode, then all the other PHYs will go, which is
2633 ++ * wrong.
2634 ++ */
2635 + static int lan8814_release_coma_mode(struct phy_device *phydev)
2636 + {
2637 + struct gpio_desc *gpiod;
2638 +
2639 + gpiod = devm_gpiod_get_optional(&phydev->mdio.dev, "coma-mode",
2640 +- GPIOD_OUT_HIGH_OPEN_DRAIN);
2641 ++ GPIOD_OUT_HIGH_OPEN_DRAIN |
2642 ++ GPIOD_FLAGS_BIT_NONEXCLUSIVE);
2643 + if (IS_ERR(gpiod))
2644 + return PTR_ERR(gpiod);
2645 +
2646 +diff --git a/drivers/peci/controller/peci-aspeed.c b/drivers/peci/controller/peci-aspeed.c
2647 +index 1925ddc13f002..731c5d8f75c66 100644
2648 +--- a/drivers/peci/controller/peci-aspeed.c
2649 ++++ b/drivers/peci/controller/peci-aspeed.c
2650 +@@ -523,7 +523,7 @@ static int aspeed_peci_probe(struct platform_device *pdev)
2651 + return PTR_ERR(priv->base);
2652 +
2653 + priv->irq = platform_get_irq(pdev, 0);
2654 +- if (!priv->irq)
2655 ++ if (priv->irq < 0)
2656 + return priv->irq;
2657 +
2658 + ret = devm_request_irq(&pdev->dev, priv->irq, aspeed_peci_irq_handler,
2659 +diff --git a/drivers/platform/mellanox/mlxreg-lc.c b/drivers/platform/mellanox/mlxreg-lc.c
2660 +index 55834ccb4ac7c..e578c7bc060bb 100644
2661 +--- a/drivers/platform/mellanox/mlxreg-lc.c
2662 ++++ b/drivers/platform/mellanox/mlxreg-lc.c
2663 +@@ -460,8 +460,6 @@ static int mlxreg_lc_power_on_off(struct mlxreg_lc *mlxreg_lc, u8 action)
2664 + u32 regval;
2665 + int err;
2666 +
2667 +- mutex_lock(&mlxreg_lc->lock);
2668 +-
2669 + err = regmap_read(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_pwr, &regval);
2670 + if (err)
2671 + goto regmap_read_fail;
2672 +@@ -474,7 +472,6 @@ static int mlxreg_lc_power_on_off(struct mlxreg_lc *mlxreg_lc, u8 action)
2673 + err = regmap_write(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_pwr, regval);
2674 +
2675 + regmap_read_fail:
2676 +- mutex_unlock(&mlxreg_lc->lock);
2677 + return err;
2678 + }
2679 +
2680 +@@ -491,8 +488,6 @@ static int mlxreg_lc_enable_disable(struct mlxreg_lc *mlxreg_lc, bool action)
2681 + * line card which is already has been enabled. Disabling does not affect the disabled line
2682 + * card.
2683 + */
2684 +- mutex_lock(&mlxreg_lc->lock);
2685 +-
2686 + err = regmap_read(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_ena, &regval);
2687 + if (err)
2688 + goto regmap_read_fail;
2689 +@@ -505,7 +500,6 @@ static int mlxreg_lc_enable_disable(struct mlxreg_lc *mlxreg_lc, bool action)
2690 + err = regmap_write(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_ena, regval);
2691 +
2692 + regmap_read_fail:
2693 +- mutex_unlock(&mlxreg_lc->lock);
2694 + return err;
2695 + }
2696 +
2697 +@@ -537,6 +531,15 @@ mlxreg_lc_sn4800_c16_config_init(struct mlxreg_lc *mlxreg_lc, void *regmap,
2698 +
2699 + static void
2700 + mlxreg_lc_state_update(struct mlxreg_lc *mlxreg_lc, enum mlxreg_lc_state state, u8 action)
2701 ++{
2702 ++ if (action)
2703 ++ mlxreg_lc->state |= state;
2704 ++ else
2705 ++ mlxreg_lc->state &= ~state;
2706 ++}
2707 ++
2708 ++static void
2709 ++mlxreg_lc_state_update_locked(struct mlxreg_lc *mlxreg_lc, enum mlxreg_lc_state state, u8 action)
2710 + {
2711 + mutex_lock(&mlxreg_lc->lock);
2712 +
2713 +@@ -560,8 +563,11 @@ static int mlxreg_lc_event_handler(void *handle, enum mlxreg_hotplug_kind kind,
2714 + dev_info(mlxreg_lc->dev, "linecard#%d state %d event kind %d action %d\n",
2715 + mlxreg_lc->data->slot, mlxreg_lc->state, kind, action);
2716 +
2717 +- if (!(mlxreg_lc->state & MLXREG_LC_INITIALIZED))
2718 ++ mutex_lock(&mlxreg_lc->lock);
2719 ++ if (!(mlxreg_lc->state & MLXREG_LC_INITIALIZED)) {
2720 ++ mutex_unlock(&mlxreg_lc->lock);
2721 + return 0;
2722 ++ }
2723 +
2724 + switch (kind) {
2725 + case MLXREG_HOTPLUG_LC_SYNCED:
2726 +@@ -574,7 +580,7 @@ static int mlxreg_lc_event_handler(void *handle, enum mlxreg_hotplug_kind kind,
2727 + if (!(mlxreg_lc->state & MLXREG_LC_POWERED) && action) {
2728 + err = mlxreg_lc_power_on_off(mlxreg_lc, 1);
2729 + if (err)
2730 +- return err;
2731 ++ goto mlxreg_lc_power_on_off_fail;
2732 + }
2733 + /* In case line card is configured - enable it. */
2734 + if (mlxreg_lc->state & MLXREG_LC_CONFIGURED && action)
2735 +@@ -588,12 +594,13 @@ static int mlxreg_lc_event_handler(void *handle, enum mlxreg_hotplug_kind kind,
2736 + /* In case line card is configured - enable it. */
2737 + if (mlxreg_lc->state & MLXREG_LC_CONFIGURED)
2738 + err = mlxreg_lc_enable_disable(mlxreg_lc, 1);
2739 ++ mutex_unlock(&mlxreg_lc->lock);
2740 + return err;
2741 + }
2742 + err = mlxreg_lc_create_static_devices(mlxreg_lc, mlxreg_lc->main_devs,
2743 + mlxreg_lc->main_devs_num);
2744 + if (err)
2745 +- return err;
2746 ++ goto mlxreg_lc_create_static_devices_fail;
2747 +
2748 + /* In case line card is already in ready state - enable it. */
2749 + if (mlxreg_lc->state & MLXREG_LC_CONFIGURED)
2750 +@@ -620,6 +627,10 @@ static int mlxreg_lc_event_handler(void *handle, enum mlxreg_hotplug_kind kind,
2751 + break;
2752 + }
2753 +
2754 ++mlxreg_lc_power_on_off_fail:
2755 ++mlxreg_lc_create_static_devices_fail:
2756 ++ mutex_unlock(&mlxreg_lc->lock);
2757 ++
2758 + return err;
2759 + }
2760 +
2761 +@@ -665,7 +676,7 @@ static int mlxreg_lc_completion_notify(void *handle, struct i2c_adapter *parent,
2762 + if (err)
2763 + goto mlxreg_lc_create_static_devices_failed;
2764 +
2765 +- mlxreg_lc_state_update(mlxreg_lc, MLXREG_LC_POWERED, 1);
2766 ++ mlxreg_lc_state_update_locked(mlxreg_lc, MLXREG_LC_POWERED, 1);
2767 + }
2768 +
2769 + /* Verify if line card is synchronized. */
2770 +@@ -676,7 +687,7 @@ static int mlxreg_lc_completion_notify(void *handle, struct i2c_adapter *parent,
2771 + /* Power on line card if necessary. */
2772 + if (regval & mlxreg_lc->data->mask) {
2773 + mlxreg_lc->state |= MLXREG_LC_SYNCED;
2774 +- mlxreg_lc_state_update(mlxreg_lc, MLXREG_LC_SYNCED, 1);
2775 ++ mlxreg_lc_state_update_locked(mlxreg_lc, MLXREG_LC_SYNCED, 1);
2776 + if (mlxreg_lc->state & ~MLXREG_LC_POWERED) {
2777 + err = mlxreg_lc_power_on_off(mlxreg_lc, 1);
2778 + if (err)
2779 +@@ -684,7 +695,7 @@ static int mlxreg_lc_completion_notify(void *handle, struct i2c_adapter *parent,
2780 + }
2781 + }
2782 +
2783 +- mlxreg_lc_state_update(mlxreg_lc, MLXREG_LC_INITIALIZED, 1);
2784 ++ mlxreg_lc_state_update_locked(mlxreg_lc, MLXREG_LC_INITIALIZED, 1);
2785 +
2786 + return 0;
2787 +
2788 +@@ -863,7 +874,6 @@ static int mlxreg_lc_probe(struct platform_device *pdev)
2789 + if (err) {
2790 + dev_err(&pdev->dev, "Failed to sync regmap for client %s at bus %d at addr 0x%02x\n",
2791 + data->hpdev.brdinfo->type, data->hpdev.nr, data->hpdev.brdinfo->addr);
2792 +- err = PTR_ERR(regmap);
2793 + goto regcache_sync_fail;
2794 + }
2795 +
2796 +@@ -905,6 +915,8 @@ static int mlxreg_lc_remove(struct platform_device *pdev)
2797 + struct mlxreg_core_data *data = dev_get_platdata(&pdev->dev);
2798 + struct mlxreg_lc *mlxreg_lc = platform_get_drvdata(pdev);
2799 +
2800 ++ mlxreg_lc_state_update_locked(mlxreg_lc, MLXREG_LC_INITIALIZED, 0);
2801 ++
2802 + /*
2803 + * Probing and removing are invoked by hotplug events raised upon line card insertion and
2804 + * removing. If probing procedure fails all data is cleared. However, hotplug event still
2805 +diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
2806 +index 154317e9910d2..5c757c7f64dee 100644
2807 +--- a/drivers/platform/x86/pmc_atom.c
2808 ++++ b/drivers/platform/x86/pmc_atom.c
2809 +@@ -232,7 +232,7 @@ static void pmc_power_off(void)
2810 + pm1_cnt_port = acpi_base_addr + PM1_CNT;
2811 +
2812 + pm1_cnt_value = inl(pm1_cnt_port);
2813 +- pm1_cnt_value &= SLEEP_TYPE_MASK;
2814 ++ pm1_cnt_value &= ~SLEEP_TYPE_MASK;
2815 + pm1_cnt_value |= SLEEP_TYPE_S5;
2816 + pm1_cnt_value |= SLEEP_ENABLE;
2817 +
2818 +diff --git a/drivers/platform/x86/x86-android-tablets.c b/drivers/platform/x86/x86-android-tablets.c
2819 +index 4803759774358..4acd6fa8d43b8 100644
2820 +--- a/drivers/platform/x86/x86-android-tablets.c
2821 ++++ b/drivers/platform/x86/x86-android-tablets.c
2822 +@@ -663,9 +663,23 @@ static const struct x86_i2c_client_info chuwi_hi8_i2c_clients[] __initconst = {
2823 + },
2824 + };
2825 +
2826 ++static int __init chuwi_hi8_init(void)
2827 ++{
2828 ++ /*
2829 ++ * Avoid the acpi_unregister_gsi() call in x86_acpi_irq_helper_get()
2830 ++ * breaking the touchscreen + logging various errors when the Windows
2831 ++ * BIOS is used.
2832 ++ */
2833 ++ if (acpi_dev_present("MSSL0001", NULL, 1))
2834 ++ return -ENODEV;
2835 ++
2836 ++ return 0;
2837 ++}
2838 ++
2839 + static const struct x86_dev_info chuwi_hi8_info __initconst = {
2840 + .i2c_client_info = chuwi_hi8_i2c_clients,
2841 + .i2c_client_count = ARRAY_SIZE(chuwi_hi8_i2c_clients),
2842 ++ .init = chuwi_hi8_init,
2843 + };
2844 +
2845 + #define CZC_EC_EXTRA_PORT 0x68
2846 +diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
2847 +index b5ec7726592c8..71d2931cb885c 100644
2848 +--- a/drivers/soundwire/qcom.c
2849 ++++ b/drivers/soundwire/qcom.c
2850 +@@ -167,7 +167,7 @@ struct qcom_swrm_ctrl {
2851 + u8 wcmd_id;
2852 + struct qcom_swrm_port_config pconfig[QCOM_SDW_MAX_PORTS];
2853 + struct sdw_stream_runtime *sruntime[SWRM_MAX_DAIS];
2854 +- enum sdw_slave_status status[SDW_MAX_DEVICES];
2855 ++ enum sdw_slave_status status[SDW_MAX_DEVICES + 1];
2856 + int (*reg_read)(struct qcom_swrm_ctrl *ctrl, int reg, u32 *val);
2857 + int (*reg_write)(struct qcom_swrm_ctrl *ctrl, int reg, int val);
2858 + u32 slave_status;
2859 +@@ -411,7 +411,7 @@ static int qcom_swrm_get_alert_slave_dev_num(struct qcom_swrm_ctrl *ctrl)
2860 +
2861 + ctrl->reg_read(ctrl, SWRM_MCP_SLV_STATUS, &val);
2862 +
2863 +- for (dev_num = 0; dev_num < SDW_MAX_DEVICES; dev_num++) {
2864 ++ for (dev_num = 0; dev_num <= SDW_MAX_DEVICES; dev_num++) {
2865 + status = (val >> (dev_num * SWRM_MCP_SLV_STATUS_SZ));
2866 +
2867 + if ((status & SWRM_MCP_SLV_STATUS_MASK) == SDW_SLAVE_ALERT) {
2868 +@@ -431,7 +431,7 @@ static void qcom_swrm_get_device_status(struct qcom_swrm_ctrl *ctrl)
2869 + ctrl->reg_read(ctrl, SWRM_MCP_SLV_STATUS, &val);
2870 + ctrl->slave_status = val;
2871 +
2872 +- for (i = 0; i < SDW_MAX_DEVICES; i++) {
2873 ++ for (i = 0; i <= SDW_MAX_DEVICES; i++) {
2874 + u32 s;
2875 +
2876 + s = (val >> (i * 2));
2877 +diff --git a/drivers/staging/r8188eu/os_dep/os_intfs.c b/drivers/staging/r8188eu/os_dep/os_intfs.c
2878 +index cac9553666e6d..aa100b5141e1e 100644
2879 +--- a/drivers/staging/r8188eu/os_dep/os_intfs.c
2880 ++++ b/drivers/staging/r8188eu/os_dep/os_intfs.c
2881 +@@ -18,6 +18,7 @@ MODULE_LICENSE("GPL");
2882 + MODULE_DESCRIPTION("Realtek Wireless Lan Driver");
2883 + MODULE_AUTHOR("Realtek Semiconductor Corp.");
2884 + MODULE_VERSION(DRIVERVERSION);
2885 ++MODULE_FIRMWARE("rtlwifi/rtl8188eufw.bin");
2886 +
2887 + #define CONFIG_BR_EXT_BRNAME "br0"
2888 + #define RTW_NOTCH_FILTER 0 /* 0:Disable, 1:Enable, */
2889 +diff --git a/drivers/staging/r8188eu/os_dep/usb_intf.c b/drivers/staging/r8188eu/os_dep/usb_intf.c
2890 +index 68869c5daeff8..e5dc977d2fa21 100644
2891 +--- a/drivers/staging/r8188eu/os_dep/usb_intf.c
2892 ++++ b/drivers/staging/r8188eu/os_dep/usb_intf.c
2893 +@@ -28,6 +28,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
2894 + /*=== Realtek demoboard ===*/
2895 + {USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8179)}, /* 8188EUS */
2896 + {USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */
2897 ++ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill USB-N150 Nano */
2898 + /*=== Customer ID ===*/
2899 + /****** 8188EUS ********/
2900 + {USB_DEVICE(0x07B8, 0x8179)}, /* Abocom - Abocom */
2901 +diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
2902 +index 2326aae6709e2..bb7db96ed8219 100644
2903 +--- a/drivers/staging/rtl8712/rtl8712_cmd.c
2904 ++++ b/drivers/staging/rtl8712/rtl8712_cmd.c
2905 +@@ -117,34 +117,6 @@ static void r871x_internal_cmd_hdl(struct _adapter *padapter, u8 *pbuf)
2906 + kfree(pdrvcmd->pbuf);
2907 + }
2908 +
2909 +-static u8 read_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
2910 +-{
2911 +- void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
2912 +- struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
2913 +-
2914 +- /* invoke cmd->callback function */
2915 +- pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
2916 +- if (!pcmd_callback)
2917 +- r8712_free_cmd_obj(pcmd);
2918 +- else
2919 +- pcmd_callback(padapter, pcmd);
2920 +- return H2C_SUCCESS;
2921 +-}
2922 +-
2923 +-static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
2924 +-{
2925 +- void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
2926 +- struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
2927 +-
2928 +- /* invoke cmd->callback function */
2929 +- pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
2930 +- if (!pcmd_callback)
2931 +- r8712_free_cmd_obj(pcmd);
2932 +- else
2933 +- pcmd_callback(padapter, pcmd);
2934 +- return H2C_SUCCESS;
2935 +-}
2936 +-
2937 + static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
2938 + {
2939 + struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
2940 +@@ -213,14 +185,6 @@ static struct cmd_obj *cmd_hdl_filter(struct _adapter *padapter,
2941 + pcmd_r = NULL;
2942 +
2943 + switch (pcmd->cmdcode) {
2944 +- case GEN_CMD_CODE(_Read_MACREG):
2945 +- read_macreg_hdl(padapter, (u8 *)pcmd);
2946 +- pcmd_r = pcmd;
2947 +- break;
2948 +- case GEN_CMD_CODE(_Write_MACREG):
2949 +- write_macreg_hdl(padapter, (u8 *)pcmd);
2950 +- pcmd_r = pcmd;
2951 +- break;
2952 + case GEN_CMD_CODE(_Read_BBREG):
2953 + read_bbreg_hdl(padapter, (u8 *)pcmd);
2954 + break;
2955 +diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
2956 +index e92c658dba1c6..3f7827d72c480 100644
2957 +--- a/drivers/thunderbolt/ctl.c
2958 ++++ b/drivers/thunderbolt/ctl.c
2959 +@@ -407,7 +407,7 @@ static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
2960 +
2961 + static int tb_async_error(const struct ctl_pkg *pkg)
2962 + {
2963 +- const struct cfg_error_pkg *error = (const struct cfg_error_pkg *)pkg;
2964 ++ const struct cfg_error_pkg *error = pkg->buffer;
2965 +
2966 + if (pkg->frame.eof != TB_CFG_PKG_ERROR)
2967 + return false;
2968 +diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
2969 +index 561e1d77240e2..64f0aec7e70ae 100644
2970 +--- a/drivers/thunderbolt/switch.c
2971 ++++ b/drivers/thunderbolt/switch.c
2972 +@@ -3781,14 +3781,18 @@ int tb_switch_pcie_l1_enable(struct tb_switch *sw)
2973 + */
2974 + int tb_switch_xhci_connect(struct tb_switch *sw)
2975 + {
2976 +- bool usb_port1, usb_port3, xhci_port1, xhci_port3;
2977 + struct tb_port *port1, *port3;
2978 + int ret;
2979 +
2980 ++ if (sw->generation != 3)
2981 ++ return 0;
2982 ++
2983 + port1 = &sw->ports[1];
2984 + port3 = &sw->ports[3];
2985 +
2986 + if (tb_switch_is_alpine_ridge(sw)) {
2987 ++ bool usb_port1, usb_port3, xhci_port1, xhci_port3;
2988 ++
2989 + usb_port1 = tb_lc_is_usb_plugged(port1);
2990 + usb_port3 = tb_lc_is_usb_plugged(port3);
2991 + xhci_port1 = tb_lc_is_xhci_connected(port1);
2992 +diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
2993 +index caa5c14ed57f0..01c112e2e2142 100644
2994 +--- a/drivers/tty/n_gsm.c
2995 ++++ b/drivers/tty/n_gsm.c
2996 +@@ -248,7 +248,7 @@ struct gsm_mux {
2997 + bool constipated; /* Asked by remote to shut up */
2998 + bool has_devices; /* Devices were registered */
2999 +
3000 +- spinlock_t tx_lock;
3001 ++ struct mutex tx_mutex;
3002 + unsigned int tx_bytes; /* TX data outstanding */
3003 + #define TX_THRESH_HI 8192
3004 + #define TX_THRESH_LO 2048
3005 +@@ -256,7 +256,7 @@ struct gsm_mux {
3006 + struct list_head tx_data_list; /* Pending data packets */
3007 +
3008 + /* Control messages */
3009 +- struct timer_list kick_timer; /* Kick TX queuing on timeout */
3010 ++ struct delayed_work kick_timeout; /* Kick TX queuing on timeout */
3011 + struct timer_list t2_timer; /* Retransmit timer for commands */
3012 + int cretries; /* Command retry counter */
3013 + struct gsm_control *pending_cmd;/* Our current pending command */
3014 +@@ -680,7 +680,6 @@ static int gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
3015 + struct gsm_msg *msg;
3016 + u8 *dp;
3017 + int ocr;
3018 +- unsigned long flags;
3019 +
3020 + msg = gsm_data_alloc(gsm, addr, 0, control);
3021 + if (!msg)
3022 +@@ -702,10 +701,10 @@ static int gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
3023 +
3024 + gsm_print_packet("Q->", addr, cr, control, NULL, 0);
3025 +
3026 +- spin_lock_irqsave(&gsm->tx_lock, flags);
3027 ++ mutex_lock(&gsm->tx_mutex);
3028 + list_add_tail(&msg->list, &gsm->tx_ctrl_list);
3029 + gsm->tx_bytes += msg->len;
3030 +- spin_unlock_irqrestore(&gsm->tx_lock, flags);
3031 ++ mutex_unlock(&gsm->tx_mutex);
3032 + gsmld_write_trigger(gsm);
3033 +
3034 + return 0;
3035 +@@ -730,7 +729,7 @@ static void gsm_dlci_clear_queues(struct gsm_mux *gsm, struct gsm_dlci *dlci)
3036 + spin_unlock_irqrestore(&dlci->lock, flags);
3037 +
3038 + /* Clear data packets in MUX write queue */
3039 +- spin_lock_irqsave(&gsm->tx_lock, flags);
3040 ++ mutex_lock(&gsm->tx_mutex);
3041 + list_for_each_entry_safe(msg, nmsg, &gsm->tx_data_list, list) {
3042 + if (msg->addr != addr)
3043 + continue;
3044 +@@ -738,7 +737,7 @@ static void gsm_dlci_clear_queues(struct gsm_mux *gsm, struct gsm_dlci *dlci)
3045 + list_del(&msg->list);
3046 + kfree(msg);
3047 + }
3048 +- spin_unlock_irqrestore(&gsm->tx_lock, flags);
3049 ++ mutex_unlock(&gsm->tx_mutex);
3050 + }
3051 +
3052 + /**
3053 +@@ -1009,7 +1008,7 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
3054 + gsm->tx_bytes += msg->len;
3055 +
3056 + gsmld_write_trigger(gsm);
3057 +- mod_timer(&gsm->kick_timer, jiffies + 10 * gsm->t1 * HZ / 100);
3058 ++ schedule_delayed_work(&gsm->kick_timeout, 10 * gsm->t1 * HZ / 100);
3059 + }
3060 +
3061 + /**
3062 +@@ -1024,10 +1023,9 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
3063 +
3064 + static void gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
3065 + {
3066 +- unsigned long flags;
3067 +- spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
3068 ++ mutex_lock(&dlci->gsm->tx_mutex);
3069 + __gsm_data_queue(dlci, msg);
3070 +- spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
3071 ++ mutex_unlock(&dlci->gsm->tx_mutex);
3072 + }
3073 +
3074 + /**
3075 +@@ -1039,7 +1037,7 @@ static void gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
3076 + * is data. Keep to the MRU of the mux. This path handles the usual tty
3077 + * interface which is a byte stream with optional modem data.
3078 + *
3079 +- * Caller must hold the tx_lock of the mux.
3080 ++ * Caller must hold the tx_mutex of the mux.
3081 + */
3082 +
3083 + static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
3084 +@@ -1099,7 +1097,7 @@ static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
3085 + * is data. Keep to the MRU of the mux. This path handles framed data
3086 + * queued as skbuffs to the DLCI.
3087 + *
3088 +- * Caller must hold the tx_lock of the mux.
3089 ++ * Caller must hold the tx_mutex of the mux.
3090 + */
3091 +
3092 + static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
3093 +@@ -1115,7 +1113,7 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
3094 + if (dlci->adaption == 4)
3095 + overhead = 1;
3096 +
3097 +- /* dlci->skb is locked by tx_lock */
3098 ++ /* dlci->skb is locked by tx_mutex */
3099 + if (dlci->skb == NULL) {
3100 + dlci->skb = skb_dequeue_tail(&dlci->skb_list);
3101 + if (dlci->skb == NULL)
3102 +@@ -1169,7 +1167,7 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
3103 + * Push an empty frame in to the transmit queue to update the modem status
3104 + * bits and to transmit an optional break.
3105 + *
3106 +- * Caller must hold the tx_lock of the mux.
3107 ++ * Caller must hold the tx_mutex of the mux.
3108 + */
3109 +
3110 + static int gsm_dlci_modem_output(struct gsm_mux *gsm, struct gsm_dlci *dlci,
3111 +@@ -1283,13 +1281,12 @@ static int gsm_dlci_data_sweep(struct gsm_mux *gsm)
3112 +
3113 + static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
3114 + {
3115 +- unsigned long flags;
3116 + int sweep;
3117 +
3118 + if (dlci->constipated)
3119 + return;
3120 +
3121 +- spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
3122 ++ mutex_lock(&dlci->gsm->tx_mutex);
3123 + /* If we have nothing running then we need to fire up */
3124 + sweep = (dlci->gsm->tx_bytes < TX_THRESH_LO);
3125 + if (dlci->gsm->tx_bytes == 0) {
3126 +@@ -1300,7 +1297,7 @@ static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
3127 + }
3128 + if (sweep)
3129 + gsm_dlci_data_sweep(dlci->gsm);
3130 +- spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
3131 ++ mutex_unlock(&dlci->gsm->tx_mutex);
3132 + }
3133 +
3134 + /*
3135 +@@ -1984,24 +1981,23 @@ static void gsm_dlci_command(struct gsm_dlci *dlci, const u8 *data, int len)
3136 + }
3137 +
3138 + /**
3139 +- * gsm_kick_timer - transmit if possible
3140 +- * @t: timer contained in our gsm object
3141 ++ * gsm_kick_timeout - transmit if possible
3142 ++ * @work: work contained in our gsm object
3143 + *
3144 + * Transmit data from DLCIs if the queue is empty. We can't rely on
3145 + * a tty wakeup except when we filled the pipe so we need to fire off
3146 + * new data ourselves in other cases.
3147 + */
3148 +-static void gsm_kick_timer(struct timer_list *t)
3149 ++static void gsm_kick_timeout(struct work_struct *work)
3150 + {
3151 +- struct gsm_mux *gsm = from_timer(gsm, t, kick_timer);
3152 +- unsigned long flags;
3153 ++ struct gsm_mux *gsm = container_of(work, struct gsm_mux, kick_timeout.work);
3154 + int sent = 0;
3155 +
3156 +- spin_lock_irqsave(&gsm->tx_lock, flags);
3157 ++ mutex_lock(&gsm->tx_mutex);
3158 + /* If we have nothing running then we need to fire up */
3159 + if (gsm->tx_bytes < TX_THRESH_LO)
3160 + sent = gsm_dlci_data_sweep(gsm);
3161 +- spin_unlock_irqrestore(&gsm->tx_lock, flags);
3162 ++ mutex_unlock(&gsm->tx_mutex);
3163 +
3164 + if (sent && debug & 4)
3165 + pr_info("%s TX queue stalled\n", __func__);
3166 +@@ -2458,7 +2454,7 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
3167 + }
3168 +
3169 + /* Finish outstanding timers, making sure they are done */
3170 +- del_timer_sync(&gsm->kick_timer);
3171 ++ cancel_delayed_work_sync(&gsm->kick_timeout);
3172 + del_timer_sync(&gsm->t2_timer);
3173 +
3174 + /* Finish writing to ldisc */
3175 +@@ -2501,13 +2497,6 @@ static int gsm_activate_mux(struct gsm_mux *gsm)
3176 + if (dlci == NULL)
3177 + return -ENOMEM;
3178 +
3179 +- timer_setup(&gsm->kick_timer, gsm_kick_timer, 0);
3180 +- timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
3181 +- INIT_WORK(&gsm->tx_work, gsmld_write_task);
3182 +- init_waitqueue_head(&gsm->event);
3183 +- spin_lock_init(&gsm->control_lock);
3184 +- spin_lock_init(&gsm->tx_lock);
3185 +-
3186 + if (gsm->encoding == 0)
3187 + gsm->receive = gsm0_receive;
3188 + else
3189 +@@ -2538,6 +2527,7 @@ static void gsm_free_mux(struct gsm_mux *gsm)
3190 + break;
3191 + }
3192 + }
3193 ++ mutex_destroy(&gsm->tx_mutex);
3194 + mutex_destroy(&gsm->mutex);
3195 + kfree(gsm->txframe);
3196 + kfree(gsm->buf);
3197 +@@ -2609,9 +2599,15 @@ static struct gsm_mux *gsm_alloc_mux(void)
3198 + }
3199 + spin_lock_init(&gsm->lock);
3200 + mutex_init(&gsm->mutex);
3201 ++ mutex_init(&gsm->tx_mutex);
3202 + kref_init(&gsm->ref);
3203 + INIT_LIST_HEAD(&gsm->tx_ctrl_list);
3204 + INIT_LIST_HEAD(&gsm->tx_data_list);
3205 ++ INIT_DELAYED_WORK(&gsm->kick_timeout, gsm_kick_timeout);
3206 ++ timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
3207 ++ INIT_WORK(&gsm->tx_work, gsmld_write_task);
3208 ++ init_waitqueue_head(&gsm->event);
3209 ++ spin_lock_init(&gsm->control_lock);
3210 +
3211 + gsm->t1 = T1;
3212 + gsm->t2 = T2;
3213 +@@ -2636,6 +2632,7 @@ static struct gsm_mux *gsm_alloc_mux(void)
3214 + }
3215 + spin_unlock(&gsm_mux_lock);
3216 + if (i == MAX_MUX) {
3217 ++ mutex_destroy(&gsm->tx_mutex);
3218 + mutex_destroy(&gsm->mutex);
3219 + kfree(gsm->txframe);
3220 + kfree(gsm->buf);
3221 +@@ -2791,17 +2788,16 @@ static void gsmld_write_trigger(struct gsm_mux *gsm)
3222 + static void gsmld_write_task(struct work_struct *work)
3223 + {
3224 + struct gsm_mux *gsm = container_of(work, struct gsm_mux, tx_work);
3225 +- unsigned long flags;
3226 + int i, ret;
3227 +
3228 + /* All outstanding control channel and control messages and one data
3229 + * frame is sent.
3230 + */
3231 + ret = -ENODEV;
3232 +- spin_lock_irqsave(&gsm->tx_lock, flags);
3233 ++ mutex_lock(&gsm->tx_mutex);
3234 + if (gsm->tty)
3235 + ret = gsm_data_kick(gsm);
3236 +- spin_unlock_irqrestore(&gsm->tx_lock, flags);
3237 ++ mutex_unlock(&gsm->tx_mutex);
3238 +
3239 + if (ret >= 0)
3240 + for (i = 0; i < NUM_DLCI; i++)
3241 +@@ -2858,7 +2854,8 @@ static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
3242 + flags = *fp++;
3243 + switch (flags) {
3244 + case TTY_NORMAL:
3245 +- gsm->receive(gsm, *cp);
3246 ++ if (gsm->receive)
3247 ++ gsm->receive(gsm, *cp);
3248 + break;
3249 + case TTY_OVERRUN:
3250 + case TTY_BREAK:
3251 +@@ -2946,10 +2943,6 @@ static int gsmld_open(struct tty_struct *tty)
3252 +
3253 + gsmld_attach_gsm(tty, gsm);
3254 +
3255 +- timer_setup(&gsm->kick_timer, gsm_kick_timer, 0);
3256 +- timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
3257 +- INIT_WORK(&gsm->tx_work, gsmld_write_task);
3258 +-
3259 + return 0;
3260 + }
3261 +
3262 +@@ -3012,7 +3005,6 @@ static ssize_t gsmld_write(struct tty_struct *tty, struct file *file,
3263 + const unsigned char *buf, size_t nr)
3264 + {
3265 + struct gsm_mux *gsm = tty->disc_data;
3266 +- unsigned long flags;
3267 + int space;
3268 + int ret;
3269 +
3270 +@@ -3020,13 +3012,13 @@ static ssize_t gsmld_write(struct tty_struct *tty, struct file *file,
3271 + return -ENODEV;
3272 +
3273 + ret = -ENOBUFS;
3274 +- spin_lock_irqsave(&gsm->tx_lock, flags);
3275 ++ mutex_lock(&gsm->tx_mutex);
3276 + space = tty_write_room(tty);
3277 + if (space >= nr)
3278 + ret = tty->ops->write(tty, buf, nr);
3279 + else
3280 + set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
3281 +- spin_unlock_irqrestore(&gsm->tx_lock, flags);
3282 ++ mutex_unlock(&gsm->tx_mutex);
3283 +
3284 + return ret;
3285 + }
3286 +@@ -3323,14 +3315,13 @@ static struct tty_ldisc_ops tty_ldisc_packet = {
3287 + static void gsm_modem_upd_via_data(struct gsm_dlci *dlci, u8 brk)
3288 + {
3289 + struct gsm_mux *gsm = dlci->gsm;
3290 +- unsigned long flags;
3291 +
3292 + if (dlci->state != DLCI_OPEN || dlci->adaption != 2)
3293 + return;
3294 +
3295 +- spin_lock_irqsave(&gsm->tx_lock, flags);
3296 ++ mutex_lock(&gsm->tx_mutex);
3297 + gsm_dlci_modem_output(gsm, dlci, brk);
3298 +- spin_unlock_irqrestore(&gsm->tx_lock, flags);
3299 ++ mutex_unlock(&gsm->tx_mutex);
3300 + }
3301 +
3302 + /**
3303 +diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
3304 +index dd1c7e4bd1c95..400a1686a6b26 100644
3305 +--- a/drivers/tty/serial/atmel_serial.c
3306 ++++ b/drivers/tty/serial/atmel_serial.c
3307 +@@ -296,9 +296,6 @@ static int atmel_config_rs485(struct uart_port *port,
3308 +
3309 + mode = atmel_uart_readl(port, ATMEL_US_MR);
3310 +
3311 +- /* Resetting serial mode to RS232 (0x0) */
3312 +- mode &= ~ATMEL_US_USMODE;
3313 +-
3314 + if (rs485conf->flags & SER_RS485_ENABLED) {
3315 + dev_dbg(port->dev, "Setting UART to RS485\n");
3316 + if (rs485conf->flags & SER_RS485_RX_DURING_TX)
3317 +@@ -308,6 +305,7 @@ static int atmel_config_rs485(struct uart_port *port,
3318 +
3319 + atmel_uart_writel(port, ATMEL_US_TTGR,
3320 + rs485conf->delay_rts_after_send);
3321 ++ mode &= ~ATMEL_US_USMODE;
3322 + mode |= ATMEL_US_USMODE_RS485;
3323 + } else {
3324 + dev_dbg(port->dev, "Setting UART to RS232\n");
3325 +diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
3326 +index 561d6d0b7c945..2945c1b890880 100644
3327 +--- a/drivers/tty/serial/fsl_lpuart.c
3328 ++++ b/drivers/tty/serial/fsl_lpuart.c
3329 +@@ -1381,9 +1381,9 @@ static int lpuart_config_rs485(struct uart_port *port,
3330 + * Note: UART is assumed to be active high.
3331 + */
3332 + if (rs485->flags & SER_RS485_RTS_ON_SEND)
3333 +- modem &= ~UARTMODEM_TXRTSPOL;
3334 +- else if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
3335 + modem |= UARTMODEM_TXRTSPOL;
3336 ++ else if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
3337 ++ modem &= ~UARTMODEM_TXRTSPOL;
3338 + }
3339 +
3340 + writeb(modem, sport->port.membase + UARTMODEM);
3341 +@@ -2182,6 +2182,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
3342 + uart_update_timeout(port, termios->c_cflag, baud);
3343 +
3344 + /* wait transmit engin complete */
3345 ++ lpuart32_write(&sport->port, 0, UARTMODIR);
3346 + lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC);
3347 +
3348 + /* disable transmit and receive */
3349 +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
3350 +index 6eaf8eb846619..b8f5bc19416d9 100644
3351 +--- a/drivers/tty/vt/vt.c
3352 ++++ b/drivers/tty/vt/vt.c
3353 +@@ -4662,9 +4662,11 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op)
3354 + console_lock();
3355 + if (vc->vc_mode != KD_TEXT)
3356 + rc = -EINVAL;
3357 +- else if (vc->vc_sw->con_font_set)
3358 ++ else if (vc->vc_sw->con_font_set) {
3359 ++ if (vc_is_sel(vc))
3360 ++ clear_selection();
3361 + rc = vc->vc_sw->con_font_set(vc, &font, op->flags);
3362 +- else
3363 ++ } else
3364 + rc = -ENOSYS;
3365 + console_unlock();
3366 + kfree(font.data);
3367 +@@ -4691,9 +4693,11 @@ static int con_font_default(struct vc_data *vc, struct console_font_op *op)
3368 + console_unlock();
3369 + return -EINVAL;
3370 + }
3371 +- if (vc->vc_sw->con_font_default)
3372 ++ if (vc->vc_sw->con_font_default) {
3373 ++ if (vc_is_sel(vc))
3374 ++ clear_selection();
3375 + rc = vc->vc_sw->con_font_default(vc, &font, s);
3376 +- else
3377 ++ } else
3378 + rc = -ENOSYS;
3379 + console_unlock();
3380 + if (!rc) {
3381 +diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
3382 +index d21b69997e750..5adcb349718c3 100644
3383 +--- a/drivers/usb/cdns3/cdns3-gadget.c
3384 ++++ b/drivers/usb/cdns3/cdns3-gadget.c
3385 +@@ -1530,7 +1530,8 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
3386 + TRB_LEN(le32_to_cpu(trb->length));
3387 +
3388 + if (priv_req->num_of_trb > 1 &&
3389 +- le32_to_cpu(trb->control) & TRB_SMM)
3390 ++ le32_to_cpu(trb->control) & TRB_SMM &&
3391 ++ le32_to_cpu(trb->control) & TRB_CHAIN)
3392 + transfer_end = true;
3393 +
3394 + cdns3_ep_inc_deq(priv_ep);
3395 +@@ -1690,6 +1691,7 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
3396 + ep_cfg &= ~EP_CFG_ENABLE;
3397 + writel(ep_cfg, &priv_dev->regs->ep_cfg);
3398 + priv_ep->flags &= ~EP_QUIRK_ISO_OUT_EN;
3399 ++ priv_ep->flags |= EP_UPDATE_EP_TRBADDR;
3400 + }
3401 + cdns3_transfer_completed(priv_dev, priv_ep);
3402 + } else if (!(priv_ep->flags & EP_STALLED) &&
3403 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
3404 +index 9b9aea24d58c4..f3c6aad277895 100644
3405 +--- a/drivers/usb/class/cdc-acm.c
3406 ++++ b/drivers/usb/class/cdc-acm.c
3407 +@@ -1810,6 +1810,9 @@ static const struct usb_device_id acm_ids[] = {
3408 + { USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
3409 + .driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
3410 + },
3411 ++ { USB_DEVICE(0x0c26, 0x0020), /* Icom ICF3400 Serie */
3412 ++ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
3413 ++ },
3414 + { USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */
3415 + .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
3416 + },
3417 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
3418 +index 68e9121c18788..dfef85a18eb55 100644
3419 +--- a/drivers/usb/core/hub.c
3420 ++++ b/drivers/usb/core/hub.c
3421 +@@ -6048,6 +6048,11 @@ re_enumerate:
3422 + * the reset is over (using their post_reset method).
3423 + *
3424 + * Return: The same as for usb_reset_and_verify_device().
3425 ++ * However, if a reset is already in progress (for instance, if a
3426 ++ * driver doesn't have pre_ or post_reset() callbacks, and while
3427 ++ * being unbound or re-bound during the ongoing reset its disconnect()
3428 ++ * or probe() routine tries to perform a second, nested reset), the
3429 ++ * routine returns -EINPROGRESS.
3430 + *
3431 + * Note:
3432 + * The caller must own the device lock. For example, it's safe to use
3433 +@@ -6081,6 +6086,10 @@ int usb_reset_device(struct usb_device *udev)
3434 + return -EISDIR;
3435 + }
3436 +
3437 ++ if (udev->reset_in_progress)
3438 ++ return -EINPROGRESS;
3439 ++ udev->reset_in_progress = 1;
3440 ++
3441 + port_dev = hub->ports[udev->portnum - 1];
3442 +
3443 + /*
3444 +@@ -6145,6 +6154,7 @@ int usb_reset_device(struct usb_device *udev)
3445 +
3446 + usb_autosuspend_device(udev);
3447 + memalloc_noio_restore(noio_flag);
3448 ++ udev->reset_in_progress = 0;
3449 + return ret;
3450 + }
3451 + EXPORT_SYMBOL_GPL(usb_reset_device);
3452 +diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
3453 +index c8ba87df7abef..fd0ccf6f3ec5a 100644
3454 +--- a/drivers/usb/dwc2/platform.c
3455 ++++ b/drivers/usb/dwc2/platform.c
3456 +@@ -154,9 +154,9 @@ static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
3457 + } else if (hsotg->plat && hsotg->plat->phy_init) {
3458 + ret = hsotg->plat->phy_init(pdev, hsotg->plat->phy_type);
3459 + } else {
3460 +- ret = phy_power_on(hsotg->phy);
3461 ++ ret = phy_init(hsotg->phy);
3462 + if (ret == 0)
3463 +- ret = phy_init(hsotg->phy);
3464 ++ ret = phy_power_on(hsotg->phy);
3465 + }
3466 +
3467 + return ret;
3468 +@@ -188,9 +188,9 @@ static int __dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg)
3469 + } else if (hsotg->plat && hsotg->plat->phy_exit) {
3470 + ret = hsotg->plat->phy_exit(pdev, hsotg->plat->phy_type);
3471 + } else {
3472 +- ret = phy_exit(hsotg->phy);
3473 ++ ret = phy_power_off(hsotg->phy);
3474 + if (ret == 0)
3475 +- ret = phy_power_off(hsotg->phy);
3476 ++ ret = phy_exit(hsotg->phy);
3477 + }
3478 + if (ret)
3479 + return ret;
3480 +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
3481 +index ba2fa91be1d64..1db9f51f98aef 100644
3482 +--- a/drivers/usb/dwc3/core.c
3483 ++++ b/drivers/usb/dwc3/core.c
3484 +@@ -833,15 +833,16 @@ static void dwc3_core_exit(struct dwc3 *dwc)
3485 + {
3486 + dwc3_event_buffers_cleanup(dwc);
3487 +
3488 ++ usb_phy_set_suspend(dwc->usb2_phy, 1);
3489 ++ usb_phy_set_suspend(dwc->usb3_phy, 1);
3490 ++ phy_power_off(dwc->usb2_generic_phy);
3491 ++ phy_power_off(dwc->usb3_generic_phy);
3492 ++
3493 + usb_phy_shutdown(dwc->usb2_phy);
3494 + usb_phy_shutdown(dwc->usb3_phy);
3495 + phy_exit(dwc->usb2_generic_phy);
3496 + phy_exit(dwc->usb3_generic_phy);
3497 +
3498 +- usb_phy_set_suspend(dwc->usb2_phy, 1);
3499 +- usb_phy_set_suspend(dwc->usb3_phy, 1);
3500 +- phy_power_off(dwc->usb2_generic_phy);
3501 +- phy_power_off(dwc->usb3_generic_phy);
3502 + dwc3_clk_disable(dwc);
3503 + reset_control_assert(dwc->reset);
3504 + }
3505 +@@ -1844,16 +1845,16 @@ err5:
3506 + dwc3_debugfs_exit(dwc);
3507 + dwc3_event_buffers_cleanup(dwc);
3508 +
3509 +- usb_phy_shutdown(dwc->usb2_phy);
3510 +- usb_phy_shutdown(dwc->usb3_phy);
3511 +- phy_exit(dwc->usb2_generic_phy);
3512 +- phy_exit(dwc->usb3_generic_phy);
3513 +-
3514 + usb_phy_set_suspend(dwc->usb2_phy, 1);
3515 + usb_phy_set_suspend(dwc->usb3_phy, 1);
3516 + phy_power_off(dwc->usb2_generic_phy);
3517 + phy_power_off(dwc->usb3_generic_phy);
3518 +
3519 ++ usb_phy_shutdown(dwc->usb2_phy);
3520 ++ usb_phy_shutdown(dwc->usb3_phy);
3521 ++ phy_exit(dwc->usb2_generic_phy);
3522 ++ phy_exit(dwc->usb3_generic_phy);
3523 ++
3524 + dwc3_ulpi_exit(dwc);
3525 +
3526 + err4:
3527 +diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
3528 +index 6b018048fe2e1..4ee4ca09873af 100644
3529 +--- a/drivers/usb/dwc3/dwc3-pci.c
3530 ++++ b/drivers/usb/dwc3/dwc3-pci.c
3531 +@@ -44,6 +44,7 @@
3532 + #define PCI_DEVICE_ID_INTEL_ADLP 0x51ee
3533 + #define PCI_DEVICE_ID_INTEL_ADLM 0x54ee
3534 + #define PCI_DEVICE_ID_INTEL_ADLS 0x7ae1
3535 ++#define PCI_DEVICE_ID_INTEL_RPL 0x460e
3536 + #define PCI_DEVICE_ID_INTEL_RPLS 0x7a61
3537 + #define PCI_DEVICE_ID_INTEL_MTLP 0x7ec1
3538 + #define PCI_DEVICE_ID_INTEL_MTL 0x7e7e
3539 +@@ -456,6 +457,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
3540 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLS),
3541 + (kernel_ulong_t) &dwc3_pci_intel_swnode, },
3542 +
3543 ++ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPL),
3544 ++ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
3545 ++
3546 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPLS),
3547 + (kernel_ulong_t) &dwc3_pci_intel_swnode, },
3548 +
3549 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
3550 +index 52d5a7c81362a..886fab0008a75 100644
3551 +--- a/drivers/usb/dwc3/gadget.c
3552 ++++ b/drivers/usb/dwc3/gadget.c
3553 +@@ -2538,9 +2538,6 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
3554 +
3555 + is_on = !!is_on;
3556 +
3557 +- if (dwc->pullups_connected == is_on)
3558 +- return 0;
3559 +-
3560 + dwc->softconnect = is_on;
3561 +
3562 + /*
3563 +@@ -2565,6 +2562,11 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
3564 + return 0;
3565 + }
3566 +
3567 ++ if (dwc->pullups_connected == is_on) {
3568 ++ pm_runtime_put(dwc->dev);
3569 ++ return 0;
3570 ++ }
3571 ++
3572 + if (!is_on) {
3573 + ret = dwc3_gadget_soft_disconnect(dwc);
3574 + } else {
3575 +diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
3576 +index f56c30cf151e4..06b3d988fbf32 100644
3577 +--- a/drivers/usb/dwc3/host.c
3578 ++++ b/drivers/usb/dwc3/host.c
3579 +@@ -11,8 +11,13 @@
3580 + #include <linux/of.h>
3581 + #include <linux/platform_device.h>
3582 +
3583 ++#include "../host/xhci-plat.h"
3584 + #include "core.h"
3585 +
3586 ++static const struct xhci_plat_priv dwc3_xhci_plat_priv = {
3587 ++ .quirks = XHCI_SKIP_PHY_INIT,
3588 ++};
3589 ++
3590 + static void dwc3_host_fill_xhci_irq_res(struct dwc3 *dwc,
3591 + int irq, char *name)
3592 + {
3593 +@@ -92,6 +97,11 @@ int dwc3_host_init(struct dwc3 *dwc)
3594 + goto err;
3595 + }
3596 +
3597 ++ ret = platform_device_add_data(xhci, &dwc3_xhci_plat_priv,
3598 ++ sizeof(dwc3_xhci_plat_priv));
3599 ++ if (ret)
3600 ++ goto err;
3601 ++
3602 + memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
3603 +
3604 + if (dwc->usb3_lpm_capable)
3605 +diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
3606 +index 1905a8d8e0c9f..08726e4c68a56 100644
3607 +--- a/drivers/usb/gadget/function/f_uac2.c
3608 ++++ b/drivers/usb/gadget/function/f_uac2.c
3609 +@@ -291,6 +291,12 @@ static struct usb_endpoint_descriptor ss_ep_int_desc = {
3610 + .bInterval = 4,
3611 + };
3612 +
3613 ++static struct usb_ss_ep_comp_descriptor ss_ep_int_desc_comp = {
3614 ++ .bLength = sizeof(ss_ep_int_desc_comp),
3615 ++ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
3616 ++ .wBytesPerInterval = cpu_to_le16(6),
3617 ++};
3618 ++
3619 + /* Audio Streaming OUT Interface - Alt0 */
3620 + static struct usb_interface_descriptor std_as_out_if0_desc = {
3621 + .bLength = sizeof std_as_out_if0_desc,
3622 +@@ -604,7 +610,8 @@ static struct usb_descriptor_header *ss_audio_desc[] = {
3623 + (struct usb_descriptor_header *)&in_feature_unit_desc,
3624 + (struct usb_descriptor_header *)&io_out_ot_desc,
3625 +
3626 +- (struct usb_descriptor_header *)&ss_ep_int_desc,
3627 ++ (struct usb_descriptor_header *)&ss_ep_int_desc,
3628 ++ (struct usb_descriptor_header *)&ss_ep_int_desc_comp,
3629 +
3630 + (struct usb_descriptor_header *)&std_as_out_if0_desc,
3631 + (struct usb_descriptor_header *)&std_as_out_if1_desc,
3632 +@@ -800,6 +807,7 @@ static void setup_headers(struct f_uac2_opts *opts,
3633 + struct usb_ss_ep_comp_descriptor *epout_desc_comp = NULL;
3634 + struct usb_ss_ep_comp_descriptor *epin_desc_comp = NULL;
3635 + struct usb_ss_ep_comp_descriptor *epin_fback_desc_comp = NULL;
3636 ++ struct usb_ss_ep_comp_descriptor *ep_int_desc_comp = NULL;
3637 + struct usb_endpoint_descriptor *epout_desc;
3638 + struct usb_endpoint_descriptor *epin_desc;
3639 + struct usb_endpoint_descriptor *epin_fback_desc;
3640 +@@ -827,6 +835,7 @@ static void setup_headers(struct f_uac2_opts *opts,
3641 + epin_fback_desc = &ss_epin_fback_desc;
3642 + epin_fback_desc_comp = &ss_epin_fback_desc_comp;
3643 + ep_int_desc = &ss_ep_int_desc;
3644 ++ ep_int_desc_comp = &ss_ep_int_desc_comp;
3645 + }
3646 +
3647 + i = 0;
3648 +@@ -855,8 +864,11 @@ static void setup_headers(struct f_uac2_opts *opts,
3649 + if (EPOUT_EN(opts))
3650 + headers[i++] = USBDHDR(&io_out_ot_desc);
3651 +
3652 +- if (FUOUT_EN(opts) || FUIN_EN(opts))
3653 ++ if (FUOUT_EN(opts) || FUIN_EN(opts)) {
3654 + headers[i++] = USBDHDR(ep_int_desc);
3655 ++ if (ep_int_desc_comp)
3656 ++ headers[i++] = USBDHDR(ep_int_desc_comp);
3657 ++ }
3658 +
3659 + if (EPOUT_EN(opts)) {
3660 + headers[i++] = USBDHDR(&std_as_out_if0_desc);
3661 +diff --git a/drivers/usb/gadget/function/storage_common.c b/drivers/usb/gadget/function/storage_common.c
3662 +index b859a158a4140..e122050eebaf1 100644
3663 +--- a/drivers/usb/gadget/function/storage_common.c
3664 ++++ b/drivers/usb/gadget/function/storage_common.c
3665 +@@ -294,8 +294,10 @@ EXPORT_SYMBOL_GPL(fsg_lun_fsync_sub);
3666 + void store_cdrom_address(u8 *dest, int msf, u32 addr)
3667 + {
3668 + if (msf) {
3669 +- /* Convert to Minutes-Seconds-Frames */
3670 +- addr >>= 2; /* Convert to 2048-byte frames */
3671 ++ /*
3672 ++ * Convert to Minutes-Seconds-Frames.
3673 ++ * Sector size is already set to 2048 bytes.
3674 ++ */
3675 + addr += 2*75; /* Lead-in occupies 2 seconds */
3676 + dest[3] = addr % 75; /* Frames */
3677 + addr /= 75;
3678 +diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
3679 +index cafcf260394cd..c63c0c2cf649d 100644
3680 +--- a/drivers/usb/gadget/udc/core.c
3681 ++++ b/drivers/usb/gadget/udc/core.c
3682 +@@ -736,7 +736,10 @@ int usb_gadget_disconnect(struct usb_gadget *gadget)
3683 + ret = gadget->ops->pullup(gadget, 0);
3684 + if (!ret) {
3685 + gadget->connected = 0;
3686 +- gadget->udc->driver->disconnect(gadget);
3687 ++ mutex_lock(&udc_lock);
3688 ++ if (gadget->udc->driver)
3689 ++ gadget->udc->driver->disconnect(gadget);
3690 ++ mutex_unlock(&udc_lock);
3691 + }
3692 +
3693 + out:
3694 +@@ -1489,7 +1492,6 @@ static int gadget_bind_driver(struct device *dev)
3695 +
3696 + usb_gadget_udc_set_speed(udc, driver->max_speed);
3697 +
3698 +- mutex_lock(&udc_lock);
3699 + ret = driver->bind(udc->gadget, driver);
3700 + if (ret)
3701 + goto err_bind;
3702 +@@ -1499,7 +1501,6 @@ static int gadget_bind_driver(struct device *dev)
3703 + goto err_start;
3704 + usb_gadget_enable_async_callbacks(udc);
3705 + usb_udc_connect_control(udc);
3706 +- mutex_unlock(&udc_lock);
3707 +
3708 + kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
3709 + return 0;
3710 +@@ -1512,6 +1513,7 @@ static int gadget_bind_driver(struct device *dev)
3711 + dev_err(&udc->dev, "failed to start %s: %d\n",
3712 + driver->function, ret);
3713 +
3714 ++ mutex_lock(&udc_lock);
3715 + udc->driver = NULL;
3716 + driver->is_bound = false;
3717 + mutex_unlock(&udc_lock);
3718 +@@ -1529,7 +1531,6 @@ static void gadget_unbind_driver(struct device *dev)
3719 +
3720 + kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
3721 +
3722 +- mutex_lock(&udc_lock);
3723 + usb_gadget_disconnect(gadget);
3724 + usb_gadget_disable_async_callbacks(udc);
3725 + if (gadget->irq)
3726 +@@ -1537,6 +1538,7 @@ static void gadget_unbind_driver(struct device *dev)
3727 + udc->driver->unbind(gadget);
3728 + usb_gadget_udc_stop(udc);
3729 +
3730 ++ mutex_lock(&udc_lock);
3731 + driver->is_bound = false;
3732 + udc->driver = NULL;
3733 + mutex_unlock(&udc_lock);
3734 +@@ -1612,7 +1614,7 @@ static ssize_t soft_connect_store(struct device *dev,
3735 + struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
3736 + ssize_t ret;
3737 +
3738 +- mutex_lock(&udc_lock);
3739 ++ device_lock(&udc->gadget->dev);
3740 + if (!udc->driver) {
3741 + dev_err(dev, "soft-connect without a gadget driver\n");
3742 + ret = -EOPNOTSUPP;
3743 +@@ -1633,7 +1635,7 @@ static ssize_t soft_connect_store(struct device *dev,
3744 +
3745 + ret = n;
3746 + out:
3747 +- mutex_unlock(&udc_lock);
3748 ++ device_unlock(&udc->gadget->dev);
3749 + return ret;
3750 + }
3751 + static DEVICE_ATTR_WO(soft_connect);
3752 +@@ -1652,11 +1654,15 @@ static ssize_t function_show(struct device *dev, struct device_attribute *attr,
3753 + char *buf)
3754 + {
3755 + struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
3756 +- struct usb_gadget_driver *drv = udc->driver;
3757 ++ struct usb_gadget_driver *drv;
3758 ++ int rc = 0;
3759 +
3760 +- if (!drv || !drv->function)
3761 +- return 0;
3762 +- return scnprintf(buf, PAGE_SIZE, "%s\n", drv->function);
3763 ++ mutex_lock(&udc_lock);
3764 ++ drv = udc->driver;
3765 ++ if (drv && drv->function)
3766 ++ rc = scnprintf(buf, PAGE_SIZE, "%s\n", drv->function);
3767 ++ mutex_unlock(&udc_lock);
3768 ++ return rc;
3769 + }
3770 + static DEVICE_ATTR_RO(function);
3771 +
3772 +diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
3773 +index 0fdc014c94011..4619d5e89d5be 100644
3774 +--- a/drivers/usb/host/xhci-hub.c
3775 ++++ b/drivers/usb/host/xhci-hub.c
3776 +@@ -652,7 +652,7 @@ struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd)
3777 + * It will release and re-aquire the lock while calling ACPI
3778 + * method.
3779 + */
3780 +-void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd,
3781 ++static void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd,
3782 + u16 index, bool on, unsigned long *flags)
3783 + __must_hold(&xhci->lock)
3784 + {
3785 +@@ -1648,6 +1648,17 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
3786 +
3787 + status = bus_state->resuming_ports;
3788 +
3789 ++ /*
3790 ++ * SS devices are only visible to roothub after link training completes.
3791 ++ * Keep polling roothubs for a grace period after xHC start
3792 ++ */
3793 ++ if (xhci->run_graceperiod) {
3794 ++ if (time_before(jiffies, xhci->run_graceperiod))
3795 ++ status = 1;
3796 ++ else
3797 ++ xhci->run_graceperiod = 0;
3798 ++ }
3799 ++
3800 + mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC;
3801 +
3802 + /* For each port, did anything change? If so, set that bit in buf. */
3803 +diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
3804 +index 06a6b19acaae6..579899eb24c15 100644
3805 +--- a/drivers/usb/host/xhci-mtk-sch.c
3806 ++++ b/drivers/usb/host/xhci-mtk-sch.c
3807 +@@ -425,7 +425,6 @@ static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset)
3808 +
3809 + static int check_sch_tt(struct mu3h_sch_ep_info *sch_ep, u32 offset)
3810 + {
3811 +- u32 extra_cs_count;
3812 + u32 start_ss, last_ss;
3813 + u32 start_cs, last_cs;
3814 +
3815 +@@ -461,18 +460,12 @@ static int check_sch_tt(struct mu3h_sch_ep_info *sch_ep, u32 offset)
3816 + if (last_cs > 7)
3817 + return -ESCH_CS_OVERFLOW;
3818 +
3819 +- if (sch_ep->ep_type == ISOC_IN_EP)
3820 +- extra_cs_count = (last_cs == 7) ? 1 : 2;
3821 +- else /* ep_type : INTR IN / INTR OUT */
3822 +- extra_cs_count = 1;
3823 +-
3824 +- cs_count += extra_cs_count;
3825 + if (cs_count > 7)
3826 + cs_count = 7; /* HW limit */
3827 +
3828 + sch_ep->cs_count = cs_count;
3829 +- /* one for ss, the other for idle */
3830 +- sch_ep->num_budget_microframes = cs_count + 2;
3831 ++ /* ss, idle are ignored */
3832 ++ sch_ep->num_budget_microframes = cs_count;
3833 +
3834 + /*
3835 + * if interval=1, maxp >752, num_budge_micoframe is larger
3836 +@@ -771,8 +764,8 @@ int xhci_mtk_drop_ep(struct usb_hcd *hcd, struct usb_device *udev,
3837 + if (ret)
3838 + return ret;
3839 +
3840 +- if (ep->hcpriv)
3841 +- drop_ep_quirk(hcd, udev, ep);
3842 ++ /* needn't check @ep->hcpriv, xhci_endpoint_disable set it NULL */
3843 ++ drop_ep_quirk(hcd, udev, ep);
3844 +
3845 + return 0;
3846 + }
3847 +diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
3848 +index 044855818cb11..a8641b6536eea 100644
3849 +--- a/drivers/usb/host/xhci-plat.c
3850 ++++ b/drivers/usb/host/xhci-plat.c
3851 +@@ -398,12 +398,17 @@ static int xhci_plat_remove(struct platform_device *dev)
3852 + pm_runtime_get_sync(&dev->dev);
3853 + xhci->xhc_state |= XHCI_STATE_REMOVING;
3854 +
3855 +- usb_remove_hcd(shared_hcd);
3856 +- xhci->shared_hcd = NULL;
3857 ++ if (shared_hcd) {
3858 ++ usb_remove_hcd(shared_hcd);
3859 ++ xhci->shared_hcd = NULL;
3860 ++ }
3861 ++
3862 + usb_phy_shutdown(hcd->usb_phy);
3863 +
3864 + usb_remove_hcd(hcd);
3865 +- usb_put_hcd(shared_hcd);
3866 ++
3867 ++ if (shared_hcd)
3868 ++ usb_put_hcd(shared_hcd);
3869 +
3870 + clk_disable_unprepare(clk);
3871 + clk_disable_unprepare(reg_clk);
3872 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
3873 +index 65858f6074377..38649284ff889 100644
3874 +--- a/drivers/usb/host/xhci.c
3875 ++++ b/drivers/usb/host/xhci.c
3876 +@@ -151,9 +151,11 @@ int xhci_start(struct xhci_hcd *xhci)
3877 + xhci_err(xhci, "Host took too long to start, "
3878 + "waited %u microseconds.\n",
3879 + XHCI_MAX_HALT_USEC);
3880 +- if (!ret)
3881 ++ if (!ret) {
3882 + /* clear state flags. Including dying, halted or removing */
3883 + xhci->xhc_state = 0;
3884 ++ xhci->run_graceperiod = jiffies + msecs_to_jiffies(500);
3885 ++ }
3886 +
3887 + return ret;
3888 + }
3889 +@@ -791,8 +793,6 @@ static void xhci_stop(struct usb_hcd *hcd)
3890 + void xhci_shutdown(struct usb_hcd *hcd)
3891 + {
3892 + struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3893 +- unsigned long flags;
3894 +- int i;
3895 +
3896 + if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
3897 + usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
3898 +@@ -808,21 +808,12 @@ void xhci_shutdown(struct usb_hcd *hcd)
3899 + del_timer_sync(&xhci->shared_hcd->rh_timer);
3900 + }
3901 +
3902 +- spin_lock_irqsave(&xhci->lock, flags);
3903 ++ spin_lock_irq(&xhci->lock);
3904 + xhci_halt(xhci);
3905 +-
3906 +- /* Power off USB2 ports*/
3907 +- for (i = 0; i < xhci->usb2_rhub.num_ports; i++)
3908 +- xhci_set_port_power(xhci, xhci->main_hcd, i, false, &flags);
3909 +-
3910 +- /* Power off USB3 ports*/
3911 +- for (i = 0; i < xhci->usb3_rhub.num_ports; i++)
3912 +- xhci_set_port_power(xhci, xhci->shared_hcd, i, false, &flags);
3913 +-
3914 + /* Workaround for spurious wakeups at shutdown with HSW */
3915 + if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
3916 + xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
3917 +- spin_unlock_irqrestore(&xhci->lock, flags);
3918 ++ spin_unlock_irq(&xhci->lock);
3919 +
3920 + xhci_cleanup_msix(xhci);
3921 +
3922 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
3923 +index 1960b47acfb28..7caa0db5e826d 100644
3924 +--- a/drivers/usb/host/xhci.h
3925 ++++ b/drivers/usb/host/xhci.h
3926 +@@ -1826,7 +1826,7 @@ struct xhci_hcd {
3927 +
3928 + /* Host controller watchdog timer structures */
3929 + unsigned int xhc_state;
3930 +-
3931 ++ unsigned long run_graceperiod;
3932 + u32 command;
3933 + struct s3_save s3;
3934 + /* Host controller is dying - not responding to commands. "I'm not dead yet!"
3935 +@@ -2196,8 +2196,6 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
3936 + int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
3937 + int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1);
3938 + struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd);
3939 +-void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd, u16 index,
3940 +- bool on, unsigned long *flags);
3941 +
3942 + void xhci_hc_died(struct xhci_hcd *xhci);
3943 +
3944 +diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
3945 +index 4d61df6a9b5c8..70693cae83efb 100644
3946 +--- a/drivers/usb/musb/Kconfig
3947 ++++ b/drivers/usb/musb/Kconfig
3948 +@@ -86,7 +86,7 @@ config USB_MUSB_TUSB6010
3949 + tristate "TUSB6010"
3950 + depends on HAS_IOMEM
3951 + depends on ARCH_OMAP2PLUS || COMPILE_TEST
3952 +- depends on NOP_USB_XCEIV = USB_MUSB_HDRC # both built-in or both modules
3953 ++ depends on NOP_USB_XCEIV!=m || USB_MUSB_HDRC=m
3954 +
3955 + config USB_MUSB_OMAP2PLUS
3956 + tristate "OMAP2430 and onwards"
3957 +diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
3958 +index 2798fca712612..af01a462cc43c 100644
3959 +--- a/drivers/usb/serial/ch341.c
3960 ++++ b/drivers/usb/serial/ch341.c
3961 +@@ -97,7 +97,10 @@ struct ch341_private {
3962 + u8 mcr;
3963 + u8 msr;
3964 + u8 lcr;
3965 ++
3966 + unsigned long quirks;
3967 ++ u8 version;
3968 ++
3969 + unsigned long break_end;
3970 + };
3971 +
3972 +@@ -250,8 +253,12 @@ static int ch341_set_baudrate_lcr(struct usb_device *dev,
3973 + /*
3974 + * CH341A buffers data until a full endpoint-size packet (32 bytes)
3975 + * has been received unless bit 7 is set.
3976 ++ *
3977 ++ * At least one device with version 0x27 appears to have this bit
3978 ++ * inverted.
3979 + */
3980 +- val |= BIT(7);
3981 ++ if (priv->version > 0x27)
3982 ++ val |= BIT(7);
3983 +
3984 + r = ch341_control_out(dev, CH341_REQ_WRITE_REG,
3985 + CH341_REG_DIVISOR << 8 | CH341_REG_PRESCALER,
3986 +@@ -265,6 +272,9 @@ static int ch341_set_baudrate_lcr(struct usb_device *dev,
3987 + * (stop bits, parity and word length). Version 0x30 and above use
3988 + * CH341_REG_LCR only and CH341_REG_LCR2 is always set to zero.
3989 + */
3990 ++ if (priv->version < 0x30)
3991 ++ return 0;
3992 ++
3993 + r = ch341_control_out(dev, CH341_REQ_WRITE_REG,
3994 + CH341_REG_LCR2 << 8 | CH341_REG_LCR, lcr);
3995 + if (r)
3996 +@@ -308,7 +318,9 @@ static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
3997 + r = ch341_control_in(dev, CH341_REQ_READ_VERSION, 0, 0, buffer, size);
3998 + if (r)
3999 + return r;
4000 +- dev_dbg(&dev->dev, "Chip version: 0x%02x\n", buffer[0]);
4001 ++
4002 ++ priv->version = buffer[0];
4003 ++ dev_dbg(&dev->dev, "Chip version: 0x%02x\n", priv->version);
4004 +
4005 + r = ch341_control_out(dev, CH341_REQ_SERIAL_INIT, 0, 0);
4006 + if (r < 0)
4007 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
4008 +index c374620a486f0..a34957c4b64c0 100644
4009 +--- a/drivers/usb/serial/cp210x.c
4010 ++++ b/drivers/usb/serial/cp210x.c
4011 +@@ -130,6 +130,7 @@ static const struct usb_device_id id_table[] = {
4012 + { USB_DEVICE(0x10C4, 0x83AA) }, /* Mark-10 Digital Force Gauge */
4013 + { USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */
4014 + { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
4015 ++ { USB_DEVICE(0x10C4, 0x8414) }, /* Decagon USB Cable Adapter */
4016 + { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
4017 + { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
4018 + { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
4019 +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
4020 +index d5a3986dfee75..52d59be920342 100644
4021 +--- a/drivers/usb/serial/ftdi_sio.c
4022 ++++ b/drivers/usb/serial/ftdi_sio.c
4023 +@@ -1045,6 +1045,8 @@ static const struct usb_device_id id_table_combined[] = {
4024 + /* IDS GmbH devices */
4025 + { USB_DEVICE(IDS_VID, IDS_SI31A_PID) },
4026 + { USB_DEVICE(IDS_VID, IDS_CM31A_PID) },
4027 ++ /* Omron devices */
4028 ++ { USB_DEVICE(OMRON_VID, OMRON_CS1W_CIF31_PID) },
4029 + /* U-Blox devices */
4030 + { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) },
4031 + { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) },
4032 +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
4033 +index 4e92c165c86bf..31c8ccabbbb78 100644
4034 +--- a/drivers/usb/serial/ftdi_sio_ids.h
4035 ++++ b/drivers/usb/serial/ftdi_sio_ids.h
4036 +@@ -661,6 +661,12 @@
4037 + #define INFINEON_TRIBOARD_TC1798_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
4038 + #define INFINEON_TRIBOARD_TC2X7_PID 0x0043 /* DAS JTAG TriBoard TC2X7 V1.0 */
4039 +
4040 ++/*
4041 ++ * Omron corporation (https://www.omron.com)
4042 ++ */
4043 ++ #define OMRON_VID 0x0590
4044 ++ #define OMRON_CS1W_CIF31_PID 0x00b2
4045 ++
4046 + /*
4047 + * Acton Research Corp.
4048 + */
4049 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
4050 +index de59fa919540a..a5e8374a8d710 100644
4051 +--- a/drivers/usb/serial/option.c
4052 ++++ b/drivers/usb/serial/option.c
4053 +@@ -253,6 +253,7 @@ static void option_instat_callback(struct urb *urb);
4054 + #define QUECTEL_PRODUCT_BG96 0x0296
4055 + #define QUECTEL_PRODUCT_EP06 0x0306
4056 + #define QUECTEL_PRODUCT_EM05G 0x030a
4057 ++#define QUECTEL_PRODUCT_EM060K 0x030b
4058 + #define QUECTEL_PRODUCT_EM12 0x0512
4059 + #define QUECTEL_PRODUCT_RM500Q 0x0800
4060 + #define QUECTEL_PRODUCT_EC200S_CN 0x6002
4061 +@@ -438,6 +439,8 @@ static void option_instat_callback(struct urb *urb);
4062 + #define CINTERION_PRODUCT_MV31_2_RMNET 0x00b9
4063 + #define CINTERION_PRODUCT_MV32_WA 0x00f1
4064 + #define CINTERION_PRODUCT_MV32_WB 0x00f2
4065 ++#define CINTERION_PRODUCT_MV32_WA_RMNET 0x00f3
4066 ++#define CINTERION_PRODUCT_MV32_WB_RMNET 0x00f4
4067 +
4068 + /* Olivetti products */
4069 + #define OLIVETTI_VENDOR_ID 0x0b3c
4070 +@@ -573,6 +576,10 @@ static void option_instat_callback(struct urb *urb);
4071 + #define WETELECOM_PRODUCT_6802 0x6802
4072 + #define WETELECOM_PRODUCT_WMD300 0x6803
4073 +
4074 ++/* OPPO products */
4075 ++#define OPPO_VENDOR_ID 0x22d9
4076 ++#define OPPO_PRODUCT_R11 0x276c
4077 ++
4078 +
4079 + /* Device flags */
4080 +
4081 +@@ -1138,6 +1145,9 @@ static const struct usb_device_id option_ids[] = {
4082 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
4083 + { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G, 0xff),
4084 + .driver_info = RSVD(6) | ZLP },
4085 ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0x00, 0x40) },
4086 ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x30) },
4087 ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x40) },
4088 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
4089 + .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
4090 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
4091 +@@ -1993,8 +2003,12 @@ static const struct usb_device_id option_ids[] = {
4092 + .driver_info = RSVD(0)},
4093 + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA, 0xff),
4094 + .driver_info = RSVD(3)},
4095 ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA_RMNET, 0xff),
4096 ++ .driver_info = RSVD(0) },
4097 + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB, 0xff),
4098 + .driver_info = RSVD(3)},
4099 ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB_RMNET, 0xff),
4100 ++ .driver_info = RSVD(0) },
4101 + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
4102 + .driver_info = RSVD(4) },
4103 + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
4104 +@@ -2155,6 +2169,7 @@ static const struct usb_device_id option_ids[] = {
4105 + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
4106 + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
4107 + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
4108 ++ { USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
4109 + { } /* Terminating entry */
4110 + };
4111 + MODULE_DEVICE_TABLE(usb, option_ids);
4112 +diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
4113 +index 1a05e3dcfec8a..4993227ab2930 100644
4114 +--- a/drivers/usb/storage/unusual_devs.h
4115 ++++ b/drivers/usb/storage/unusual_devs.h
4116 +@@ -2294,6 +2294,13 @@ UNUSUAL_DEV( 0x1e74, 0x4621, 0x0000, 0x0000,
4117 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
4118 + US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ),
4119 +
4120 ++/* Reported by Witold Lipieta <witold.lipieta@×××××××××.com> */
4121 ++UNUSUAL_DEV( 0x1fc9, 0x0117, 0x0100, 0x0100,
4122 ++ "NXP Semiconductors",
4123 ++ "PN7462AU",
4124 ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
4125 ++ US_FL_IGNORE_RESIDUE ),
4126 ++
4127 + /* Supplied with some Castlewood ORB removable drives */
4128 + UNUSUAL_DEV( 0x2027, 0xa001, 0x0000, 0x9999,
4129 + "Double-H Technology",
4130 +diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
4131 +index c1d8c23baa399..de66a2949e33b 100644
4132 +--- a/drivers/usb/typec/altmodes/displayport.c
4133 ++++ b/drivers/usb/typec/altmodes/displayport.c
4134 +@@ -99,8 +99,8 @@ static int dp_altmode_configure(struct dp_altmode *dp, u8 con)
4135 + case DP_STATUS_CON_UFP_D:
4136 + case DP_STATUS_CON_BOTH: /* NOTE: First acting as DP source */
4137 + conf |= DP_CONF_UFP_U_AS_UFP_D;
4138 +- pin_assign = DP_CAP_DFP_D_PIN_ASSIGN(dp->alt->vdo) &
4139 +- DP_CAP_UFP_D_PIN_ASSIGN(dp->port->vdo);
4140 ++ pin_assign = DP_CAP_PIN_ASSIGN_UFP_D(dp->alt->vdo) &
4141 ++ DP_CAP_PIN_ASSIGN_DFP_D(dp->port->vdo);
4142 + break;
4143 + default:
4144 + break;
4145 +diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
4146 +index 47b733f78fb0d..a8e273fe204ab 100644
4147 +--- a/drivers/usb/typec/mux/intel_pmc_mux.c
4148 ++++ b/drivers/usb/typec/mux/intel_pmc_mux.c
4149 +@@ -571,9 +571,11 @@ err_unregister_switch:
4150 +
4151 + static int is_memory(struct acpi_resource *res, void *data)
4152 + {
4153 +- struct resource r;
4154 ++ struct resource_win win = {};
4155 ++ struct resource *r = &win.res;
4156 +
4157 +- return !acpi_dev_resource_memory(res, &r);
4158 ++ return !(acpi_dev_resource_memory(res, r) ||
4159 ++ acpi_dev_resource_address_space(res, &win));
4160 + }
4161 +
4162 + /* IOM ACPI IDs and IOM_PORT_STATUS_OFFSET */
4163 +@@ -583,6 +585,9 @@ static const struct acpi_device_id iom_acpi_ids[] = {
4164 +
4165 + /* AlderLake */
4166 + { "INTC1079", 0x160, },
4167 ++
4168 ++ /* Meteor Lake */
4169 ++ { "INTC107A", 0x160, },
4170 + {}
4171 + };
4172 +
4173 +diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
4174 +index 3bc2f4ebd1feb..984a13a9efc22 100644
4175 +--- a/drivers/usb/typec/tcpm/tcpm.c
4176 ++++ b/drivers/usb/typec/tcpm/tcpm.c
4177 +@@ -6191,6 +6191,13 @@ static int tcpm_psy_set_prop(struct power_supply *psy,
4178 + struct tcpm_port *port = power_supply_get_drvdata(psy);
4179 + int ret;
4180 +
4181 ++ /*
4182 ++ * All the properties below are related to USB PD. The check needs to be
4183 ++ * property specific when a non-pd related property is added.
4184 ++ */
4185 ++ if (!port->pd_supported)
4186 ++ return -EOPNOTSUPP;
4187 ++
4188 + switch (psp) {
4189 + case POWER_SUPPLY_PROP_ONLINE:
4190 + ret = tcpm_psy_set_online(port, val);
4191 +diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
4192 +index 1aea46493b852..7f2624f427241 100644
4193 +--- a/drivers/usb/typec/ucsi/ucsi.c
4194 ++++ b/drivers/usb/typec/ucsi/ucsi.c
4195 +@@ -1200,32 +1200,6 @@ out_unlock:
4196 + return ret;
4197 + }
4198 +
4199 +-static void ucsi_unregister_connectors(struct ucsi *ucsi)
4200 +-{
4201 +- struct ucsi_connector *con;
4202 +- int i;
4203 +-
4204 +- if (!ucsi->connector)
4205 +- return;
4206 +-
4207 +- for (i = 0; i < ucsi->cap.num_connectors; i++) {
4208 +- con = &ucsi->connector[i];
4209 +-
4210 +- if (!con->wq)
4211 +- break;
4212 +-
4213 +- cancel_work_sync(&con->work);
4214 +- ucsi_unregister_partner(con);
4215 +- ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON);
4216 +- ucsi_unregister_port_psy(con);
4217 +- destroy_workqueue(con->wq);
4218 +- typec_unregister_port(con->port);
4219 +- }
4220 +-
4221 +- kfree(ucsi->connector);
4222 +- ucsi->connector = NULL;
4223 +-}
4224 +-
4225 + /**
4226 + * ucsi_init - Initialize UCSI interface
4227 + * @ucsi: UCSI to be initialized
4228 +@@ -1234,6 +1208,7 @@ static void ucsi_unregister_connectors(struct ucsi *ucsi)
4229 + */
4230 + static int ucsi_init(struct ucsi *ucsi)
4231 + {
4232 ++ struct ucsi_connector *con;
4233 + u64 command;
4234 + int ret;
4235 + int i;
4236 +@@ -1264,7 +1239,7 @@ static int ucsi_init(struct ucsi *ucsi)
4237 + }
4238 +
4239 + /* Allocate the connectors. Released in ucsi_unregister() */
4240 +- ucsi->connector = kcalloc(ucsi->cap.num_connectors,
4241 ++ ucsi->connector = kcalloc(ucsi->cap.num_connectors + 1,
4242 + sizeof(*ucsi->connector), GFP_KERNEL);
4243 + if (!ucsi->connector) {
4244 + ret = -ENOMEM;
4245 +@@ -1288,7 +1263,15 @@ static int ucsi_init(struct ucsi *ucsi)
4246 + return 0;
4247 +
4248 + err_unregister:
4249 +- ucsi_unregister_connectors(ucsi);
4250 ++ for (con = ucsi->connector; con->port; con++) {
4251 ++ ucsi_unregister_partner(con);
4252 ++ ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON);
4253 ++ ucsi_unregister_port_psy(con);
4254 ++ if (con->wq)
4255 ++ destroy_workqueue(con->wq);
4256 ++ typec_unregister_port(con->port);
4257 ++ con->port = NULL;
4258 ++ }
4259 +
4260 + err_reset:
4261 + memset(&ucsi->cap, 0, sizeof(ucsi->cap));
4262 +@@ -1402,6 +1385,7 @@ EXPORT_SYMBOL_GPL(ucsi_register);
4263 + void ucsi_unregister(struct ucsi *ucsi)
4264 + {
4265 + u64 cmd = UCSI_SET_NOTIFICATION_ENABLE;
4266 ++ int i;
4267 +
4268 + /* Make sure that we are not in the middle of driver initialization */
4269 + cancel_delayed_work_sync(&ucsi->work);
4270 +@@ -1409,7 +1393,18 @@ void ucsi_unregister(struct ucsi *ucsi)
4271 + /* Disable notifications */
4272 + ucsi->ops->async_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd));
4273 +
4274 +- ucsi_unregister_connectors(ucsi);
4275 ++ for (i = 0; i < ucsi->cap.num_connectors; i++) {
4276 ++ cancel_work_sync(&ucsi->connector[i].work);
4277 ++ ucsi_unregister_partner(&ucsi->connector[i]);
4278 ++ ucsi_unregister_altmodes(&ucsi->connector[i],
4279 ++ UCSI_RECIPIENT_CON);
4280 ++ ucsi_unregister_port_psy(&ucsi->connector[i]);
4281 ++ if (ucsi->connector[i].wq)
4282 ++ destroy_workqueue(ucsi->connector[i].wq);
4283 ++ typec_unregister_port(ucsi->connector[i].port);
4284 ++ }
4285 ++
4286 ++ kfree(ucsi->connector);
4287 + }
4288 + EXPORT_SYMBOL_GPL(ucsi_unregister);
4289 +
4290 +diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
4291 +index 738029de3c672..e1ec725c2819d 100644
4292 +--- a/drivers/xen/grant-table.c
4293 ++++ b/drivers/xen/grant-table.c
4294 +@@ -1047,6 +1047,9 @@ int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args)
4295 + size_t size;
4296 + int i, ret;
4297 +
4298 ++ if (args->nr_pages < 0 || args->nr_pages > (INT_MAX >> PAGE_SHIFT))
4299 ++ return -ENOMEM;
4300 ++
4301 + size = args->nr_pages << PAGE_SHIFT;
4302 + if (args->coherent)
4303 + args->vaddr = dma_alloc_coherent(args->dev, size,
4304 +diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
4305 +index 6cba2c6de2f96..2ad58c4652084 100644
4306 +--- a/fs/cachefiles/internal.h
4307 ++++ b/fs/cachefiles/internal.h
4308 +@@ -111,6 +111,7 @@ struct cachefiles_cache {
4309 + char *tag; /* cache binding tag */
4310 + refcount_t unbind_pincount;/* refcount to do daemon unbind */
4311 + struct xarray reqs; /* xarray of pending on-demand requests */
4312 ++ unsigned long req_id_next;
4313 + struct xarray ondemand_ids; /* xarray for ondemand_id allocation */
4314 + u32 ondemand_id_next;
4315 + };
4316 +diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c
4317 +index 1fee702d55293..0254ed39f68ce 100644
4318 +--- a/fs/cachefiles/ondemand.c
4319 ++++ b/fs/cachefiles/ondemand.c
4320 +@@ -158,9 +158,13 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
4321 +
4322 + /* fail OPEN request if daemon reports an error */
4323 + if (size < 0) {
4324 +- if (!IS_ERR_VALUE(size))
4325 +- size = -EINVAL;
4326 +- req->error = size;
4327 ++ if (!IS_ERR_VALUE(size)) {
4328 ++ req->error = -EINVAL;
4329 ++ ret = -EINVAL;
4330 ++ } else {
4331 ++ req->error = size;
4332 ++ ret = 0;
4333 ++ }
4334 + goto out;
4335 + }
4336 +
4337 +@@ -238,14 +242,19 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
4338 + unsigned long id = 0;
4339 + size_t n;
4340 + int ret = 0;
4341 +- XA_STATE(xas, &cache->reqs, 0);
4342 ++ XA_STATE(xas, &cache->reqs, cache->req_id_next);
4343 +
4344 + /*
4345 +- * Search for a request that has not ever been processed, to prevent
4346 +- * requests from being processed repeatedly.
4347 ++ * Cyclically search for a request that has not ever been processed,
4348 ++ * to prevent requests from being processed repeatedly, and make
4349 ++ * request distribution fair.
4350 + */
4351 + xa_lock(&cache->reqs);
4352 + req = xas_find_marked(&xas, UINT_MAX, CACHEFILES_REQ_NEW);
4353 ++ if (!req && cache->req_id_next > 0) {
4354 ++ xas_set(&xas, 0);
4355 ++ req = xas_find_marked(&xas, cache->req_id_next - 1, CACHEFILES_REQ_NEW);
4356 ++ }
4357 + if (!req) {
4358 + xa_unlock(&cache->reqs);
4359 + return 0;
4360 +@@ -260,6 +269,7 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
4361 + }
4362 +
4363 + xas_clear_mark(&xas, CACHEFILES_REQ_NEW);
4364 ++ cache->req_id_next = xas.xa_index + 1;
4365 + xa_unlock(&cache->reqs);
4366 +
4367 + id = xas.xa_index;
4368 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
4369 +index c7614ade875b5..ba58d7fd54f9e 100644
4370 +--- a/fs/cifs/smb2pdu.c
4371 ++++ b/fs/cifs/smb2pdu.c
4372 +@@ -964,16 +964,17 @@ SMB2_negotiate(const unsigned int xid,
4373 + } else if (rc != 0)
4374 + goto neg_exit;
4375 +
4376 ++ rc = -EIO;
4377 + if (strcmp(server->vals->version_string,
4378 + SMB3ANY_VERSION_STRING) == 0) {
4379 + if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
4380 + cifs_server_dbg(VFS,
4381 + "SMB2 dialect returned but not requested\n");
4382 +- return -EIO;
4383 ++ goto neg_exit;
4384 + } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
4385 + cifs_server_dbg(VFS,
4386 + "SMB2.1 dialect returned but not requested\n");
4387 +- return -EIO;
4388 ++ goto neg_exit;
4389 + } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
4390 + /* ops set to 3.0 by default for default so update */
4391 + server->ops = &smb311_operations;
4392 +@@ -984,7 +985,7 @@ SMB2_negotiate(const unsigned int xid,
4393 + if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
4394 + cifs_server_dbg(VFS,
4395 + "SMB2 dialect returned but not requested\n");
4396 +- return -EIO;
4397 ++ goto neg_exit;
4398 + } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
4399 + /* ops set to 3.0 by default for default so update */
4400 + server->ops = &smb21_operations;
4401 +@@ -998,7 +999,7 @@ SMB2_negotiate(const unsigned int xid,
4402 + /* if requested single dialect ensure returned dialect matched */
4403 + cifs_server_dbg(VFS, "Invalid 0x%x dialect returned: not requested\n",
4404 + le16_to_cpu(rsp->DialectRevision));
4405 +- return -EIO;
4406 ++ goto neg_exit;
4407 + }
4408 +
4409 + cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode);
4410 +@@ -1016,9 +1017,10 @@ SMB2_negotiate(const unsigned int xid,
4411 + else {
4412 + cifs_server_dbg(VFS, "Invalid dialect returned by server 0x%x\n",
4413 + le16_to_cpu(rsp->DialectRevision));
4414 +- rc = -EIO;
4415 + goto neg_exit;
4416 + }
4417 ++
4418 ++ rc = 0;
4419 + server->dialect = le16_to_cpu(rsp->DialectRevision);
4420 +
4421 + /*
4422 +diff --git a/include/linux/bpf.h b/include/linux/bpf.h
4423 +index 7424cf234ae03..ed352c00330cd 100644
4424 +--- a/include/linux/bpf.h
4425 ++++ b/include/linux/bpf.h
4426 +@@ -398,6 +398,9 @@ enum bpf_type_flag {
4427 + /* DYNPTR points to a ringbuf record. */
4428 + DYNPTR_TYPE_RINGBUF = BIT(9 + BPF_BASE_TYPE_BITS),
4429 +
4430 ++ /* Size is known at compile time. */
4431 ++ MEM_FIXED_SIZE = BIT(10 + BPF_BASE_TYPE_BITS),
4432 ++
4433 + __BPF_TYPE_FLAG_MAX,
4434 + __BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1,
4435 + };
4436 +@@ -461,6 +464,8 @@ enum bpf_arg_type {
4437 + * all bytes or clear them in error case.
4438 + */
4439 + ARG_PTR_TO_UNINIT_MEM = MEM_UNINIT | ARG_PTR_TO_MEM,
4440 ++ /* Pointer to valid memory of size known at compile time. */
4441 ++ ARG_PTR_TO_FIXED_SIZE_MEM = MEM_FIXED_SIZE | ARG_PTR_TO_MEM,
4442 +
4443 + /* This must be the last entry. Its purpose is to ensure the enum is
4444 + * wide enough to hold the higher bits reserved for bpf_type_flag.
4445 +@@ -526,6 +531,14 @@ struct bpf_func_proto {
4446 + u32 *arg5_btf_id;
4447 + };
4448 + u32 *arg_btf_id[5];
4449 ++ struct {
4450 ++ size_t arg1_size;
4451 ++ size_t arg2_size;
4452 ++ size_t arg3_size;
4453 ++ size_t arg4_size;
4454 ++ size_t arg5_size;
4455 ++ };
4456 ++ size_t arg_size[5];
4457 + };
4458 + int *ret_btf_id; /* return value btf_id */
4459 + bool (*allowed)(const struct bpf_prog *prog);
4460 +diff --git a/include/linux/platform_data/x86/pmc_atom.h b/include/linux/platform_data/x86/pmc_atom.h
4461 +index 6807839c718bd..ea01dd80153b3 100644
4462 +--- a/include/linux/platform_data/x86/pmc_atom.h
4463 ++++ b/include/linux/platform_data/x86/pmc_atom.h
4464 +@@ -7,6 +7,8 @@
4465 + #ifndef PMC_ATOM_H
4466 + #define PMC_ATOM_H
4467 +
4468 ++#include <linux/bits.h>
4469 ++
4470 + /* ValleyView Power Control Unit PCI Device ID */
4471 + #define PCI_DEVICE_ID_VLV_PMC 0x0F1C
4472 + /* CherryTrail Power Control Unit PCI Device ID */
4473 +@@ -139,9 +141,9 @@
4474 + #define ACPI_MMIO_REG_LEN 0x100
4475 +
4476 + #define PM1_CNT 0x4
4477 +-#define SLEEP_TYPE_MASK 0xFFFFECFF
4478 ++#define SLEEP_TYPE_MASK GENMASK(12, 10)
4479 + #define SLEEP_TYPE_S5 0x1C00
4480 +-#define SLEEP_ENABLE 0x2000
4481 ++#define SLEEP_ENABLE BIT(13)
4482 +
4483 + extern int pmc_atom_read(int offset, u32 *value);
4484 +
4485 +diff --git a/include/linux/usb.h b/include/linux/usb.h
4486 +index 60bee864d8977..1a664ab2ebc66 100644
4487 +--- a/include/linux/usb.h
4488 ++++ b/include/linux/usb.h
4489 +@@ -575,6 +575,7 @@ struct usb3_lpm_parameters {
4490 + * @devaddr: device address, XHCI: assigned by HW, others: same as devnum
4491 + * @can_submit: URBs may be submitted
4492 + * @persist_enabled: USB_PERSIST enabled for this device
4493 ++ * @reset_in_progress: the device is being reset
4494 + * @have_langid: whether string_langid is valid
4495 + * @authorized: policy has said we can use it;
4496 + * (user space) policy determines if we authorize this device to be
4497 +@@ -661,6 +662,7 @@ struct usb_device {
4498 +
4499 + unsigned can_submit:1;
4500 + unsigned persist_enabled:1;
4501 ++ unsigned reset_in_progress:1;
4502 + unsigned have_langid:1;
4503 + unsigned authorized:1;
4504 + unsigned authenticated:1;
4505 +diff --git a/include/linux/usb/typec_dp.h b/include/linux/usb/typec_dp.h
4506 +index cfb916cccd316..8d09c2f0a9b80 100644
4507 +--- a/include/linux/usb/typec_dp.h
4508 ++++ b/include/linux/usb/typec_dp.h
4509 +@@ -73,6 +73,11 @@ enum {
4510 + #define DP_CAP_USB BIT(7)
4511 + #define DP_CAP_DFP_D_PIN_ASSIGN(_cap_) (((_cap_) & GENMASK(15, 8)) >> 8)
4512 + #define DP_CAP_UFP_D_PIN_ASSIGN(_cap_) (((_cap_) & GENMASK(23, 16)) >> 16)
4513 ++/* Get pin assignment taking plug & receptacle into consideration */
4514 ++#define DP_CAP_PIN_ASSIGN_UFP_D(_cap_) ((_cap_ & DP_CAP_RECEPTACLE) ? \
4515 ++ DP_CAP_UFP_D_PIN_ASSIGN(_cap_) : DP_CAP_DFP_D_PIN_ASSIGN(_cap_))
4516 ++#define DP_CAP_PIN_ASSIGN_DFP_D(_cap_) ((_cap_ & DP_CAP_RECEPTACLE) ? \
4517 ++ DP_CAP_DFP_D_PIN_ASSIGN(_cap_) : DP_CAP_UFP_D_PIN_ASSIGN(_cap_))
4518 +
4519 + /* DisplayPort Status Update VDO bits */
4520 + #define DP_STATUS_CONNECTION(_status_) ((_status_) & 3)
4521 +diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
4522 +index 20f60d9da7418..cf1f22c01ed3d 100644
4523 +--- a/include/net/ip_tunnels.h
4524 ++++ b/include/net/ip_tunnels.h
4525 +@@ -246,7 +246,8 @@ static inline void ip_tunnel_init_flow(struct flowi4 *fl4,
4526 + __be32 daddr, __be32 saddr,
4527 + __be32 key, __u8 tos,
4528 + struct net *net, int oif,
4529 +- __u32 mark, __u32 tun_inner_hash)
4530 ++ __u32 mark, __u32 tun_inner_hash,
4531 ++ __u8 flow_flags)
4532 + {
4533 + memset(fl4, 0, sizeof(*fl4));
4534 +
4535 +@@ -263,6 +264,7 @@ static inline void ip_tunnel_init_flow(struct flowi4 *fl4,
4536 + fl4->fl4_gre_key = key;
4537 + fl4->flowi4_mark = mark;
4538 + fl4->flowi4_multipath_hash = tun_inner_hash;
4539 ++ fl4->flowi4_flags = flow_flags;
4540 + }
4541 +
4542 + int ip_tunnel_init(struct net_device *dev);
4543 +diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
4544 +index 7a394f7c205c4..34dfa45ef4f3b 100644
4545 +--- a/kernel/bpf/cgroup.c
4546 ++++ b/kernel/bpf/cgroup.c
4547 +@@ -762,8 +762,10 @@ static void purge_effective_progs(struct cgroup *cgrp, struct bpf_prog *prog,
4548 + pos++;
4549 + }
4550 + }
4551 ++
4552 ++ /* no link or prog match, skip the cgroup of this layer */
4553 ++ continue;
4554 + found:
4555 +- BUG_ON(!cg);
4556 + progs = rcu_dereference_protected(
4557 + desc->bpf.effective[atype],
4558 + lockdep_is_held(&cgroup_mutex));
4559 +diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
4560 +index fb6bd57228a84..cf44ff50b1f23 100644
4561 +--- a/kernel/bpf/core.c
4562 ++++ b/kernel/bpf/core.c
4563 +@@ -1005,7 +1005,7 @@ pure_initcall(bpf_jit_charge_init);
4564 +
4565 + int bpf_jit_charge_modmem(u32 size)
4566 + {
4567 +- if (atomic_long_add_return(size, &bpf_jit_current) > bpf_jit_limit) {
4568 ++ if (atomic_long_add_return(size, &bpf_jit_current) > READ_ONCE(bpf_jit_limit)) {
4569 + if (!bpf_capable()) {
4570 + atomic_long_sub(size, &bpf_jit_current);
4571 + return -EPERM;
4572 +diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
4573 +index 82e83cfb4114a..dd0fc2a86ce17 100644
4574 +--- a/kernel/bpf/syscall.c
4575 ++++ b/kernel/bpf/syscall.c
4576 +@@ -5153,7 +5153,7 @@ syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4577 + {
4578 + switch (func_id) {
4579 + case BPF_FUNC_sys_bpf:
4580 +- return &bpf_sys_bpf_proto;
4581 ++ return !perfmon_capable() ? NULL : &bpf_sys_bpf_proto;
4582 + case BPF_FUNC_btf_find_by_name_kind:
4583 + return &bpf_btf_find_by_name_kind_proto;
4584 + case BPF_FUNC_sys_close:
4585 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
4586 +index 0e45d405f151c..339147061127a 100644
4587 +--- a/kernel/bpf/verifier.c
4588 ++++ b/kernel/bpf/verifier.c
4589 +@@ -5533,17 +5533,6 @@ static bool arg_type_is_mem_size(enum bpf_arg_type type)
4590 + type == ARG_CONST_SIZE_OR_ZERO;
4591 + }
4592 +
4593 +-static bool arg_type_is_alloc_size(enum bpf_arg_type type)
4594 +-{
4595 +- return type == ARG_CONST_ALLOC_SIZE_OR_ZERO;
4596 +-}
4597 +-
4598 +-static bool arg_type_is_int_ptr(enum bpf_arg_type type)
4599 +-{
4600 +- return type == ARG_PTR_TO_INT ||
4601 +- type == ARG_PTR_TO_LONG;
4602 +-}
4603 +-
4604 + static bool arg_type_is_release(enum bpf_arg_type type)
4605 + {
4606 + return type & OBJ_RELEASE;
4607 +@@ -5847,6 +5836,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
4608 + struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
4609 + enum bpf_arg_type arg_type = fn->arg_type[arg];
4610 + enum bpf_reg_type type = reg->type;
4611 ++ u32 *arg_btf_id = NULL;
4612 + int err = 0;
4613 +
4614 + if (arg_type == ARG_DONTCARE)
4615 +@@ -5883,7 +5873,11 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
4616 + */
4617 + goto skip_type_check;
4618 +
4619 +- err = check_reg_type(env, regno, arg_type, fn->arg_btf_id[arg], meta);
4620 ++ /* arg_btf_id and arg_size are in a union. */
4621 ++ if (base_type(arg_type) == ARG_PTR_TO_BTF_ID)
4622 ++ arg_btf_id = fn->arg_btf_id[arg];
4623 ++
4624 ++ err = check_reg_type(env, regno, arg_type, arg_btf_id, meta);
4625 + if (err)
4626 + return err;
4627 +
4628 +@@ -5924,7 +5918,8 @@ skip_type_check:
4629 + meta->ref_obj_id = reg->ref_obj_id;
4630 + }
4631 +
4632 +- if (arg_type == ARG_CONST_MAP_PTR) {
4633 ++ switch (base_type(arg_type)) {
4634 ++ case ARG_CONST_MAP_PTR:
4635 + /* bpf_map_xxx(map_ptr) call: remember that map_ptr */
4636 + if (meta->map_ptr) {
4637 + /* Use map_uid (which is unique id of inner map) to reject:
4638 +@@ -5949,7 +5944,8 @@ skip_type_check:
4639 + }
4640 + meta->map_ptr = reg->map_ptr;
4641 + meta->map_uid = reg->map_uid;
4642 +- } else if (arg_type == ARG_PTR_TO_MAP_KEY) {
4643 ++ break;
4644 ++ case ARG_PTR_TO_MAP_KEY:
4645 + /* bpf_map_xxx(..., map_ptr, ..., key) call:
4646 + * check that [key, key + map->key_size) are within
4647 + * stack limits and initialized
4648 +@@ -5966,7 +5962,8 @@ skip_type_check:
4649 + err = check_helper_mem_access(env, regno,
4650 + meta->map_ptr->key_size, false,
4651 + NULL);
4652 +- } else if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE) {
4653 ++ break;
4654 ++ case ARG_PTR_TO_MAP_VALUE:
4655 + if (type_may_be_null(arg_type) && register_is_null(reg))
4656 + return 0;
4657 +
4658 +@@ -5982,14 +5979,16 @@ skip_type_check:
4659 + err = check_helper_mem_access(env, regno,
4660 + meta->map_ptr->value_size, false,
4661 + meta);
4662 +- } else if (arg_type == ARG_PTR_TO_PERCPU_BTF_ID) {
4663 ++ break;
4664 ++ case ARG_PTR_TO_PERCPU_BTF_ID:
4665 + if (!reg->btf_id) {
4666 + verbose(env, "Helper has invalid btf_id in R%d\n", regno);
4667 + return -EACCES;
4668 + }
4669 + meta->ret_btf = reg->btf;
4670 + meta->ret_btf_id = reg->btf_id;
4671 +- } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
4672 ++ break;
4673 ++ case ARG_PTR_TO_SPIN_LOCK:
4674 + if (meta->func_id == BPF_FUNC_spin_lock) {
4675 + if (process_spin_lock(env, regno, true))
4676 + return -EACCES;
4677 +@@ -6000,21 +5999,32 @@ skip_type_check:
4678 + verbose(env, "verifier internal error\n");
4679 + return -EFAULT;
4680 + }
4681 +- } else if (arg_type == ARG_PTR_TO_TIMER) {
4682 ++ break;
4683 ++ case ARG_PTR_TO_TIMER:
4684 + if (process_timer_func(env, regno, meta))
4685 + return -EACCES;
4686 +- } else if (arg_type == ARG_PTR_TO_FUNC) {
4687 ++ break;
4688 ++ case ARG_PTR_TO_FUNC:
4689 + meta->subprogno = reg->subprogno;
4690 +- } else if (base_type(arg_type) == ARG_PTR_TO_MEM) {
4691 ++ break;
4692 ++ case ARG_PTR_TO_MEM:
4693 + /* The access to this pointer is only checked when we hit the
4694 + * next is_mem_size argument below.
4695 + */
4696 + meta->raw_mode = arg_type & MEM_UNINIT;
4697 +- } else if (arg_type_is_mem_size(arg_type)) {
4698 +- bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
4699 +-
4700 +- err = check_mem_size_reg(env, reg, regno, zero_size_allowed, meta);
4701 +- } else if (arg_type_is_dynptr(arg_type)) {
4702 ++ if (arg_type & MEM_FIXED_SIZE) {
4703 ++ err = check_helper_mem_access(env, regno,
4704 ++ fn->arg_size[arg], false,
4705 ++ meta);
4706 ++ }
4707 ++ break;
4708 ++ case ARG_CONST_SIZE:
4709 ++ err = check_mem_size_reg(env, reg, regno, false, meta);
4710 ++ break;
4711 ++ case ARG_CONST_SIZE_OR_ZERO:
4712 ++ err = check_mem_size_reg(env, reg, regno, true, meta);
4713 ++ break;
4714 ++ case ARG_PTR_TO_DYNPTR:
4715 + if (arg_type & MEM_UNINIT) {
4716 + if (!is_dynptr_reg_valid_uninit(env, reg)) {
4717 + verbose(env, "Dynptr has to be an uninitialized dynptr\n");
4718 +@@ -6048,21 +6058,31 @@ skip_type_check:
4719 + err_extra, arg + 1);
4720 + return -EINVAL;
4721 + }
4722 +- } else if (arg_type_is_alloc_size(arg_type)) {
4723 ++ break;
4724 ++ case ARG_CONST_ALLOC_SIZE_OR_ZERO:
4725 + if (!tnum_is_const(reg->var_off)) {
4726 + verbose(env, "R%d is not a known constant'\n",
4727 + regno);
4728 + return -EACCES;
4729 + }
4730 + meta->mem_size = reg->var_off.value;
4731 +- } else if (arg_type_is_int_ptr(arg_type)) {
4732 ++ err = mark_chain_precision(env, regno);
4733 ++ if (err)
4734 ++ return err;
4735 ++ break;
4736 ++ case ARG_PTR_TO_INT:
4737 ++ case ARG_PTR_TO_LONG:
4738 ++ {
4739 + int size = int_ptr_type_to_size(arg_type);
4740 +
4741 + err = check_helper_mem_access(env, regno, size, false, meta);
4742 + if (err)
4743 + return err;
4744 + err = check_ptr_alignment(env, reg, 0, size, true);
4745 +- } else if (arg_type == ARG_PTR_TO_CONST_STR) {
4746 ++ break;
4747 ++ }
4748 ++ case ARG_PTR_TO_CONST_STR:
4749 ++ {
4750 + struct bpf_map *map = reg->map_ptr;
4751 + int map_off;
4752 + u64 map_addr;
4753 +@@ -6101,9 +6121,12 @@ skip_type_check:
4754 + verbose(env, "string is not zero-terminated\n");
4755 + return -EINVAL;
4756 + }
4757 +- } else if (arg_type == ARG_PTR_TO_KPTR) {
4758 ++ break;
4759 ++ }
4760 ++ case ARG_PTR_TO_KPTR:
4761 + if (process_kptr_func(env, regno, meta))
4762 + return -EACCES;
4763 ++ break;
4764 + }
4765 +
4766 + return err;
4767 +@@ -6400,11 +6423,19 @@ static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
4768 + return count <= 1;
4769 + }
4770 +
4771 +-static bool check_args_pair_invalid(enum bpf_arg_type arg_curr,
4772 +- enum bpf_arg_type arg_next)
4773 ++static bool check_args_pair_invalid(const struct bpf_func_proto *fn, int arg)
4774 + {
4775 +- return (base_type(arg_curr) == ARG_PTR_TO_MEM) !=
4776 +- arg_type_is_mem_size(arg_next);
4777 ++ bool is_fixed = fn->arg_type[arg] & MEM_FIXED_SIZE;
4778 ++ bool has_size = fn->arg_size[arg] != 0;
4779 ++ bool is_next_size = false;
4780 ++
4781 ++ if (arg + 1 < ARRAY_SIZE(fn->arg_type))
4782 ++ is_next_size = arg_type_is_mem_size(fn->arg_type[arg + 1]);
4783 ++
4784 ++ if (base_type(fn->arg_type[arg]) != ARG_PTR_TO_MEM)
4785 ++ return is_next_size;
4786 ++
4787 ++ return has_size == is_next_size || is_next_size == is_fixed;
4788 + }
4789 +
4790 + static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
4791 +@@ -6415,11 +6446,11 @@ static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
4792 + * helper function specification.
4793 + */
4794 + if (arg_type_is_mem_size(fn->arg1_type) ||
4795 +- base_type(fn->arg5_type) == ARG_PTR_TO_MEM ||
4796 +- check_args_pair_invalid(fn->arg1_type, fn->arg2_type) ||
4797 +- check_args_pair_invalid(fn->arg2_type, fn->arg3_type) ||
4798 +- check_args_pair_invalid(fn->arg3_type, fn->arg4_type) ||
4799 +- check_args_pair_invalid(fn->arg4_type, fn->arg5_type))
4800 ++ check_args_pair_invalid(fn, 0) ||
4801 ++ check_args_pair_invalid(fn, 1) ||
4802 ++ check_args_pair_invalid(fn, 2) ||
4803 ++ check_args_pair_invalid(fn, 3) ||
4804 ++ check_args_pair_invalid(fn, 4))
4805 + return false;
4806 +
4807 + return true;
4808 +@@ -6460,7 +6491,10 @@ static bool check_btf_id_ok(const struct bpf_func_proto *fn)
4809 + if (base_type(fn->arg_type[i]) == ARG_PTR_TO_BTF_ID && !fn->arg_btf_id[i])
4810 + return false;
4811 +
4812 +- if (base_type(fn->arg_type[i]) != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i])
4813 ++ if (base_type(fn->arg_type[i]) != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i] &&
4814 ++ /* arg_btf_id and arg_size are in a union. */
4815 ++ (base_type(fn->arg_type[i]) != ARG_PTR_TO_MEM ||
4816 ++ !(fn->arg_type[i] & MEM_FIXED_SIZE)))
4817 + return false;
4818 + }
4819 +
4820 +diff --git a/mm/pagewalk.c b/mm/pagewalk.c
4821 +index 9b3db11a4d1db..fa7a3d21a7518 100644
4822 +--- a/mm/pagewalk.c
4823 ++++ b/mm/pagewalk.c
4824 +@@ -110,7 +110,7 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
4825 + do {
4826 + again:
4827 + next = pmd_addr_end(addr, end);
4828 +- if (pmd_none(*pmd) || (!walk->vma && !walk->no_vma)) {
4829 ++ if (pmd_none(*pmd)) {
4830 + if (ops->pte_hole)
4831 + err = ops->pte_hole(addr, next, depth, walk);
4832 + if (err)
4833 +@@ -171,7 +171,7 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
4834 + do {
4835 + again:
4836 + next = pud_addr_end(addr, end);
4837 +- if (pud_none(*pud) || (!walk->vma && !walk->no_vma)) {
4838 ++ if (pud_none(*pud)) {
4839 + if (ops->pte_hole)
4840 + err = ops->pte_hole(addr, next, depth, walk);
4841 + if (err)
4842 +@@ -366,19 +366,19 @@ static int __walk_page_range(unsigned long start, unsigned long end,
4843 + struct vm_area_struct *vma = walk->vma;
4844 + const struct mm_walk_ops *ops = walk->ops;
4845 +
4846 +- if (vma && ops->pre_vma) {
4847 ++ if (ops->pre_vma) {
4848 + err = ops->pre_vma(start, end, walk);
4849 + if (err)
4850 + return err;
4851 + }
4852 +
4853 +- if (vma && is_vm_hugetlb_page(vma)) {
4854 ++ if (is_vm_hugetlb_page(vma)) {
4855 + if (ops->hugetlb_entry)
4856 + err = walk_hugetlb_range(start, end, walk);
4857 + } else
4858 + err = walk_pgd_range(start, end, walk);
4859 +
4860 +- if (vma && ops->post_vma)
4861 ++ if (ops->post_vma)
4862 + ops->post_vma(walk);
4863 +
4864 + return err;
4865 +@@ -450,9 +450,13 @@ int walk_page_range(struct mm_struct *mm, unsigned long start,
4866 + if (!vma) { /* after the last vma */
4867 + walk.vma = NULL;
4868 + next = end;
4869 ++ if (ops->pte_hole)
4870 ++ err = ops->pte_hole(start, next, -1, &walk);
4871 + } else if (start < vma->vm_start) { /* outside vma */
4872 + walk.vma = NULL;
4873 + next = min(end, vma->vm_start);
4874 ++ if (ops->pte_hole)
4875 ++ err = ops->pte_hole(start, next, -1, &walk);
4876 + } else { /* inside vma */
4877 + walk.vma = vma;
4878 + next = min(end, vma->vm_end);
4879 +@@ -470,9 +474,8 @@ int walk_page_range(struct mm_struct *mm, unsigned long start,
4880 + }
4881 + if (err < 0)
4882 + break;
4883 +- }
4884 +- if (walk.vma || walk.ops->pte_hole)
4885 + err = __walk_page_range(start, next, &walk);
4886 ++ }
4887 + if (err)
4888 + break;
4889 + } while (start = next, start < end);
4890 +@@ -501,9 +504,9 @@ int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
4891 + if (start >= end || !walk.mm)
4892 + return -EINVAL;
4893 +
4894 +- mmap_assert_locked(walk.mm);
4895 ++ mmap_assert_write_locked(walk.mm);
4896 +
4897 +- return __walk_page_range(start, end, &walk);
4898 ++ return walk_pgd_range(start, end, &walk);
4899 + }
4900 +
4901 + int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
4902 +diff --git a/mm/ptdump.c b/mm/ptdump.c
4903 +index eea3d28d173c2..8adab455a68b3 100644
4904 +--- a/mm/ptdump.c
4905 ++++ b/mm/ptdump.c
4906 +@@ -152,13 +152,13 @@ void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd)
4907 + {
4908 + const struct ptdump_range *range = st->range;
4909 +
4910 +- mmap_read_lock(mm);
4911 ++ mmap_write_lock(mm);
4912 + while (range->start != range->end) {
4913 + walk_page_range_novma(mm, range->start, range->end,
4914 + &ptdump_ops, pgd, st);
4915 + range++;
4916 + }
4917 +- mmap_read_unlock(mm);
4918 ++ mmap_write_unlock(mm);
4919 +
4920 + /* Flush out the last page */
4921 + st->note_page(st, 0, -1, 0);
4922 +diff --git a/mm/slab_common.c b/mm/slab_common.c
4923 +index 77c3adf40e504..dbd4b6f9b0e79 100644
4924 +--- a/mm/slab_common.c
4925 ++++ b/mm/slab_common.c
4926 +@@ -420,6 +420,28 @@ kmem_cache_create(const char *name, unsigned int size, unsigned int align,
4927 + }
4928 + EXPORT_SYMBOL(kmem_cache_create);
4929 +
4930 ++#ifdef SLAB_SUPPORTS_SYSFS
4931 ++/*
4932 ++ * For a given kmem_cache, kmem_cache_destroy() should only be called
4933 ++ * once or there will be a use-after-free problem. The actual deletion
4934 ++ * and release of the kobject does not need slab_mutex or cpu_hotplug_lock
4935 ++ * protection. So they are now done without holding those locks.
4936 ++ *
4937 ++ * Note that there will be a slight delay in the deletion of sysfs files
4938 ++ * if kmem_cache_release() is called indrectly from a work function.
4939 ++ */
4940 ++static void kmem_cache_release(struct kmem_cache *s)
4941 ++{
4942 ++ sysfs_slab_unlink(s);
4943 ++ sysfs_slab_release(s);
4944 ++}
4945 ++#else
4946 ++static void kmem_cache_release(struct kmem_cache *s)
4947 ++{
4948 ++ slab_kmem_cache_release(s);
4949 ++}
4950 ++#endif
4951 ++
4952 + static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
4953 + {
4954 + LIST_HEAD(to_destroy);
4955 +@@ -446,11 +468,7 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
4956 + list_for_each_entry_safe(s, s2, &to_destroy, list) {
4957 + debugfs_slab_release(s);
4958 + kfence_shutdown_cache(s);
4959 +-#ifdef SLAB_SUPPORTS_SYSFS
4960 +- sysfs_slab_release(s);
4961 +-#else
4962 +- slab_kmem_cache_release(s);
4963 +-#endif
4964 ++ kmem_cache_release(s);
4965 + }
4966 + }
4967 +
4968 +@@ -465,20 +483,11 @@ static int shutdown_cache(struct kmem_cache *s)
4969 + list_del(&s->list);
4970 +
4971 + if (s->flags & SLAB_TYPESAFE_BY_RCU) {
4972 +-#ifdef SLAB_SUPPORTS_SYSFS
4973 +- sysfs_slab_unlink(s);
4974 +-#endif
4975 + list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
4976 + schedule_work(&slab_caches_to_rcu_destroy_work);
4977 + } else {
4978 + kfence_shutdown_cache(s);
4979 + debugfs_slab_release(s);
4980 +-#ifdef SLAB_SUPPORTS_SYSFS
4981 +- sysfs_slab_unlink(s);
4982 +- sysfs_slab_release(s);
4983 +-#else
4984 +- slab_kmem_cache_release(s);
4985 +-#endif
4986 + }
4987 +
4988 + return 0;
4989 +@@ -493,14 +502,16 @@ void slab_kmem_cache_release(struct kmem_cache *s)
4990 +
4991 + void kmem_cache_destroy(struct kmem_cache *s)
4992 + {
4993 ++ int refcnt;
4994 ++
4995 + if (unlikely(!s) || !kasan_check_byte(s))
4996 + return;
4997 +
4998 + cpus_read_lock();
4999 + mutex_lock(&slab_mutex);
5000 +
5001 +- s->refcount--;
5002 +- if (s->refcount)
5003 ++ refcnt = --s->refcount;
5004 ++ if (refcnt)
5005 + goto out_unlock;
5006 +
5007 + WARN(shutdown_cache(s),
5008 +@@ -509,6 +520,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
5009 + out_unlock:
5010 + mutex_unlock(&slab_mutex);
5011 + cpus_read_unlock();
5012 ++ if (!refcnt && !(s->flags & SLAB_TYPESAFE_BY_RCU))
5013 ++ kmem_cache_release(s);
5014 + }
5015 + EXPORT_SYMBOL(kmem_cache_destroy);
5016 +
5017 +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
5018 +index 7cb956d3abb26..2c320a8fe70d7 100644
5019 +--- a/net/bluetooth/hci_event.c
5020 ++++ b/net/bluetooth/hci_event.c
5021 +@@ -3998,6 +3998,17 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
5022 + }
5023 + }
5024 +
5025 ++ if (i == ARRAY_SIZE(hci_cc_table)) {
5026 ++ /* Unknown opcode, assume byte 0 contains the status, so
5027 ++ * that e.g. __hci_cmd_sync() properly returns errors
5028 ++ * for vendor specific commands send by HCI drivers.
5029 ++ * If a vendor doesn't actually follow this convention we may
5030 ++ * need to introduce a vendor CC table in order to properly set
5031 ++ * the status.
5032 ++ */
5033 ++ *status = skb->data[0];
5034 ++ }
5035 ++
5036 + handle_cmd_cnt_and_timer(hdev, ev->ncmd);
5037 +
5038 + hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
5039 +@@ -5557,7 +5568,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5040 + */
5041 + hci_dev_clear_flag(hdev, HCI_LE_ADV);
5042 +
5043 +- conn = hci_lookup_le_connect(hdev);
5044 ++ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
5045 + if (!conn) {
5046 + /* In case of error status and there is no connection pending
5047 + * just unlock as there is nothing to cleanup.
5048 +diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
5049 +index b5e7d4b8ab24a..3b4cee67bbd60 100644
5050 +--- a/net/bluetooth/hci_sync.c
5051 ++++ b/net/bluetooth/hci_sync.c
5052 +@@ -4452,9 +4452,11 @@ static int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
5053 + /* Cleanup hci_conn object if it cannot be cancelled as it
5054 + * likelly means the controller and host stack are out of sync.
5055 + */
5056 +- if (err)
5057 ++ if (err) {
5058 ++ hci_dev_lock(hdev);
5059 + hci_conn_failed(conn, err);
5060 +-
5061 ++ hci_dev_unlock(hdev);
5062 ++ }
5063 + return err;
5064 + case BT_CONNECT2:
5065 + return hci_reject_conn_sync(hdev, conn, reason);
5066 +@@ -4967,17 +4969,21 @@ int hci_suspend_sync(struct hci_dev *hdev)
5067 + /* Prevent disconnects from causing scanning to be re-enabled */
5068 + hci_pause_scan_sync(hdev);
5069 +
5070 +- /* Soft disconnect everything (power off) */
5071 +- err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
5072 +- if (err) {
5073 +- /* Set state to BT_RUNNING so resume doesn't notify */
5074 +- hdev->suspend_state = BT_RUNNING;
5075 +- hci_resume_sync(hdev);
5076 +- return err;
5077 +- }
5078 ++ if (hci_conn_count(hdev)) {
5079 ++ /* Soft disconnect everything (power off) */
5080 ++ err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
5081 ++ if (err) {
5082 ++ /* Set state to BT_RUNNING so resume doesn't notify */
5083 ++ hdev->suspend_state = BT_RUNNING;
5084 ++ hci_resume_sync(hdev);
5085 ++ return err;
5086 ++ }
5087 +
5088 +- /* Update event mask so only the allowed event can wakeup the host */
5089 +- hci_set_event_mask_sync(hdev);
5090 ++ /* Update event mask so only the allowed event can wakeup the
5091 ++ * host.
5092 ++ */
5093 ++ hci_set_event_mask_sync(hdev);
5094 ++ }
5095 +
5096 + /* Only configure accept list if disconnect succeeded and wake
5097 + * isn't being prevented.
5098 +diff --git a/net/core/skmsg.c b/net/core/skmsg.c
5099 +index 84209e661171e..69ac686c7cae3 100644
5100 +--- a/net/core/skmsg.c
5101 ++++ b/net/core/skmsg.c
5102 +@@ -462,7 +462,7 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
5103 +
5104 + if (copied == len)
5105 + break;
5106 +- } while (!sg_is_last(sge));
5107 ++ } while ((i != msg_rx->sg.end) && !sg_is_last(sge));
5108 +
5109 + if (unlikely(peek)) {
5110 + msg_rx = sk_psock_next_msg(psock, msg_rx);
5111 +@@ -472,7 +472,7 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
5112 + }
5113 +
5114 + msg_rx->sg.start = i;
5115 +- if (!sge->length && sg_is_last(sge)) {
5116 ++ if (!sge->length && (i == msg_rx->sg.end || sg_is_last(sge))) {
5117 + msg_rx = sk_psock_dequeue_msg(psock);
5118 + kfree_sk_msg(msg_rx);
5119 + }
5120 +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
5121 +index f361d3d56be27..943edf4ad4db0 100644
5122 +--- a/net/ipv4/fib_frontend.c
5123 ++++ b/net/ipv4/fib_frontend.c
5124 +@@ -389,7 +389,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
5125 + dev_match = dev_match || (res.type == RTN_LOCAL &&
5126 + dev == net->loopback_dev);
5127 + if (dev_match) {
5128 +- ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_HOST;
5129 ++ ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_LINK;
5130 + return ret;
5131 + }
5132 + if (no_addr)
5133 +@@ -401,7 +401,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
5134 + ret = 0;
5135 + if (fib_lookup(net, &fl4, &res, FIB_LOOKUP_IGNORE_LINKSTATE) == 0) {
5136 + if (res.type == RTN_UNICAST)
5137 +- ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_HOST;
5138 ++ ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_LINK;
5139 + }
5140 + return ret;
5141 +
5142 +diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
5143 +index 5c58e21f724e9..f866d6282b2b3 100644
5144 +--- a/net/ipv4/ip_gre.c
5145 ++++ b/net/ipv4/ip_gre.c
5146 +@@ -609,7 +609,7 @@ static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
5147 + ip_tunnel_init_flow(&fl4, IPPROTO_GRE, key->u.ipv4.dst, key->u.ipv4.src,
5148 + tunnel_id_to_key32(key->tun_id),
5149 + key->tos & ~INET_ECN_MASK, dev_net(dev), 0,
5150 +- skb->mark, skb_get_hash(skb));
5151 ++ skb->mark, skb_get_hash(skb), key->flow_flags);
5152 + rt = ip_route_output_key(dev_net(dev), &fl4);
5153 + if (IS_ERR(rt))
5154 + return PTR_ERR(rt);
5155 +diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
5156 +index 94017a8c39945..1ad8809fc2e3b 100644
5157 +--- a/net/ipv4/ip_tunnel.c
5158 ++++ b/net/ipv4/ip_tunnel.c
5159 +@@ -295,7 +295,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
5160 + ip_tunnel_init_flow(&fl4, iph->protocol, iph->daddr,
5161 + iph->saddr, tunnel->parms.o_key,
5162 + RT_TOS(iph->tos), dev_net(dev),
5163 +- tunnel->parms.link, tunnel->fwmark, 0);
5164 ++ tunnel->parms.link, tunnel->fwmark, 0, 0);
5165 + rt = ip_route_output_key(tunnel->net, &fl4);
5166 +
5167 + if (!IS_ERR(rt)) {
5168 +@@ -570,7 +570,8 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
5169 + }
5170 + ip_tunnel_init_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src,
5171 + tunnel_id_to_key32(key->tun_id), RT_TOS(tos),
5172 +- dev_net(dev), 0, skb->mark, skb_get_hash(skb));
5173 ++ dev_net(dev), 0, skb->mark, skb_get_hash(skb),
5174 ++ key->flow_flags);
5175 + if (tunnel->encap.type != TUNNEL_ENCAP_NONE)
5176 + goto tx_error;
5177 +
5178 +@@ -728,7 +729,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
5179 + ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr,
5180 + tunnel->parms.o_key, RT_TOS(tos),
5181 + dev_net(dev), tunnel->parms.link,
5182 +- tunnel->fwmark, skb_get_hash(skb));
5183 ++ tunnel->fwmark, skb_get_hash(skb), 0);
5184 +
5185 + if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0)
5186 + goto tx_error;
5187 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
5188 +index b1637990d5708..e5435156e545d 100644
5189 +--- a/net/ipv4/tcp_input.c
5190 ++++ b/net/ipv4/tcp_input.c
5191 +@@ -3630,11 +3630,11 @@ static void tcp_send_challenge_ack(struct sock *sk)
5192 +
5193 + /* Then check host-wide RFC 5961 rate limit. */
5194 + now = jiffies / HZ;
5195 +- if (now != challenge_timestamp) {
5196 ++ if (now != READ_ONCE(challenge_timestamp)) {
5197 + u32 ack_limit = READ_ONCE(net->ipv4.sysctl_tcp_challenge_ack_limit);
5198 + u32 half = (ack_limit + 1) >> 1;
5199 +
5200 +- challenge_timestamp = now;
5201 ++ WRITE_ONCE(challenge_timestamp, now);
5202 + WRITE_ONCE(challenge_count, half + prandom_u32_max(ack_limit));
5203 + }
5204 + count = READ_ONCE(challenge_count);
5205 +diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
5206 +index 71899e5a5a111..1215c863e1c41 100644
5207 +--- a/net/kcm/kcmsock.c
5208 ++++ b/net/kcm/kcmsock.c
5209 +@@ -1412,12 +1412,6 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
5210 + psock->sk = csk;
5211 + psock->bpf_prog = prog;
5212 +
5213 +- err = strp_init(&psock->strp, csk, &cb);
5214 +- if (err) {
5215 +- kmem_cache_free(kcm_psockp, psock);
5216 +- goto out;
5217 +- }
5218 +-
5219 + write_lock_bh(&csk->sk_callback_lock);
5220 +
5221 + /* Check if sk_user_data is already by KCM or someone else.
5222 +@@ -1425,13 +1419,18 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
5223 + */
5224 + if (csk->sk_user_data) {
5225 + write_unlock_bh(&csk->sk_callback_lock);
5226 +- strp_stop(&psock->strp);
5227 +- strp_done(&psock->strp);
5228 + kmem_cache_free(kcm_psockp, psock);
5229 + err = -EALREADY;
5230 + goto out;
5231 + }
5232 +
5233 ++ err = strp_init(&psock->strp, csk, &cb);
5234 ++ if (err) {
5235 ++ write_unlock_bh(&csk->sk_callback_lock);
5236 ++ kmem_cache_free(kcm_psockp, psock);
5237 ++ goto out;
5238 ++ }
5239 ++
5240 + psock->save_data_ready = csk->sk_data_ready;
5241 + psock->save_write_space = csk->sk_write_space;
5242 + psock->save_state_change = csk->sk_state_change;
5243 +diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
5244 +index 8ff547ff351ed..4e4c9df637354 100644
5245 +--- a/net/mac80211/ibss.c
5246 ++++ b/net/mac80211/ibss.c
5247 +@@ -534,6 +534,10 @@ int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata)
5248 +
5249 + sdata_assert_lock(sdata);
5250 +
5251 ++ /* When not connected/joined, sending CSA doesn't make sense. */
5252 ++ if (ifibss->state != IEEE80211_IBSS_MLME_JOINED)
5253 ++ return -ENOLINK;
5254 ++
5255 + /* update cfg80211 bss information with the new channel */
5256 + if (!is_zero_ether_addr(ifibss->bssid)) {
5257 + cbss = cfg80211_get_bss(sdata->local->hw.wiphy,
5258 +diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
5259 +index b698756887eb5..e692a2487eb5d 100644
5260 +--- a/net/mac80211/scan.c
5261 ++++ b/net/mac80211/scan.c
5262 +@@ -465,16 +465,19 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
5263 + scan_req = rcu_dereference_protected(local->scan_req,
5264 + lockdep_is_held(&local->mtx));
5265 +
5266 +- if (scan_req != local->int_scan_req) {
5267 +- local->scan_info.aborted = aborted;
5268 +- cfg80211_scan_done(scan_req, &local->scan_info);
5269 +- }
5270 + RCU_INIT_POINTER(local->scan_req, NULL);
5271 + RCU_INIT_POINTER(local->scan_sdata, NULL);
5272 +
5273 + local->scanning = 0;
5274 + local->scan_chandef.chan = NULL;
5275 +
5276 ++ synchronize_rcu();
5277 ++
5278 ++ if (scan_req != local->int_scan_req) {
5279 ++ local->scan_info.aborted = aborted;
5280 ++ cfg80211_scan_done(scan_req, &local->scan_info);
5281 ++ }
5282 ++
5283 + /* Set power back to normal operating levels. */
5284 + ieee80211_hw_config(local, 0);
5285 +
5286 +diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
5287 +index c0b2ce70e101c..fef9ad44d82ec 100644
5288 +--- a/net/mac80211/sta_info.c
5289 ++++ b/net/mac80211/sta_info.c
5290 +@@ -2221,9 +2221,9 @@ static inline u64 sta_get_tidstats_msdu(struct ieee80211_sta_rx_stats *rxstats,
5291 + u64 value;
5292 +
5293 + do {
5294 +- start = u64_stats_fetch_begin(&rxstats->syncp);
5295 ++ start = u64_stats_fetch_begin_irq(&rxstats->syncp);
5296 + value = rxstats->msdu[tid];
5297 +- } while (u64_stats_fetch_retry(&rxstats->syncp, start));
5298 ++ } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start));
5299 +
5300 + return value;
5301 + }
5302 +@@ -2289,9 +2289,9 @@ static inline u64 sta_get_stats_bytes(struct ieee80211_sta_rx_stats *rxstats)
5303 + u64 value;
5304 +
5305 + do {
5306 +- start = u64_stats_fetch_begin(&rxstats->syncp);
5307 ++ start = u64_stats_fetch_begin_irq(&rxstats->syncp);
5308 + value = rxstats->bytes;
5309 +- } while (u64_stats_fetch_retry(&rxstats->syncp, start));
5310 ++ } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start));
5311 +
5312 + return value;
5313 + }
5314 +diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
5315 +index b8ce84618a55b..c439125ef2b91 100644
5316 +--- a/net/mac802154/rx.c
5317 ++++ b/net/mac802154/rx.c
5318 +@@ -44,7 +44,7 @@ ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata,
5319 +
5320 + switch (mac_cb(skb)->dest.mode) {
5321 + case IEEE802154_ADDR_NONE:
5322 +- if (mac_cb(skb)->dest.mode != IEEE802154_ADDR_NONE)
5323 ++ if (hdr->source.mode != IEEE802154_ADDR_NONE)
5324 + /* FIXME: check if we are PAN coordinator */
5325 + skb->pkt_type = PACKET_OTHERHOST;
5326 + else
5327 +diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
5328 +index 35b5f806fdda1..b52afe316dc41 100644
5329 +--- a/net/mpls/af_mpls.c
5330 ++++ b/net/mpls/af_mpls.c
5331 +@@ -1079,9 +1079,9 @@ static void mpls_get_stats(struct mpls_dev *mdev,
5332 +
5333 + p = per_cpu_ptr(mdev->stats, i);
5334 + do {
5335 +- start = u64_stats_fetch_begin(&p->syncp);
5336 ++ start = u64_stats_fetch_begin_irq(&p->syncp);
5337 + local = p->stats;
5338 +- } while (u64_stats_fetch_retry(&p->syncp, start));
5339 ++ } while (u64_stats_fetch_retry_irq(&p->syncp, start));
5340 +
5341 + stats->rx_packets += local.rx_packets;
5342 + stats->rx_bytes += local.rx_bytes;
5343 +diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
5344 +index 7e8a39a356271..6c9d153afbeee 100644
5345 +--- a/net/openvswitch/datapath.c
5346 ++++ b/net/openvswitch/datapath.c
5347 +@@ -1802,7 +1802,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
5348 + ovs_dp_reset_user_features(skb, info);
5349 + }
5350 +
5351 +- goto err_unlock_and_destroy_meters;
5352 ++ goto err_destroy_portids;
5353 + }
5354 +
5355 + err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
5356 +@@ -1817,6 +1817,8 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
5357 + ovs_notify(&dp_datapath_genl_family, reply, info);
5358 + return 0;
5359 +
5360 ++err_destroy_portids:
5361 ++ kfree(rcu_dereference_raw(dp->upcall_portids));
5362 + err_unlock_and_destroy_meters:
5363 + ovs_unlock();
5364 + ovs_meters_exit(dp);
5365 +diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
5366 +index a64c3c1541118..b3596d4bd14a2 100644
5367 +--- a/net/sched/sch_generic.c
5368 ++++ b/net/sched/sch_generic.c
5369 +@@ -1125,6 +1125,21 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
5370 + }
5371 + EXPORT_SYMBOL(dev_graft_qdisc);
5372 +
5373 ++static void shutdown_scheduler_queue(struct net_device *dev,
5374 ++ struct netdev_queue *dev_queue,
5375 ++ void *_qdisc_default)
5376 ++{
5377 ++ struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
5378 ++ struct Qdisc *qdisc_default = _qdisc_default;
5379 ++
5380 ++ if (qdisc) {
5381 ++ rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
5382 ++ dev_queue->qdisc_sleeping = qdisc_default;
5383 ++
5384 ++ qdisc_put(qdisc);
5385 ++ }
5386 ++}
5387 ++
5388 + static void attach_one_default_qdisc(struct net_device *dev,
5389 + struct netdev_queue *dev_queue,
5390 + void *_unused)
5391 +@@ -1172,6 +1187,7 @@ static void attach_default_qdiscs(struct net_device *dev)
5392 + if (qdisc == &noop_qdisc) {
5393 + netdev_warn(dev, "default qdisc (%s) fail, fallback to %s\n",
5394 + default_qdisc_ops->id, noqueue_qdisc_ops.id);
5395 ++ netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
5396 + dev->priv_flags |= IFF_NO_QUEUE;
5397 + netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
5398 + qdisc = txq->qdisc_sleeping;
5399 +@@ -1450,21 +1466,6 @@ void dev_init_scheduler(struct net_device *dev)
5400 + timer_setup(&dev->watchdog_timer, dev_watchdog, 0);
5401 + }
5402 +
5403 +-static void shutdown_scheduler_queue(struct net_device *dev,
5404 +- struct netdev_queue *dev_queue,
5405 +- void *_qdisc_default)
5406 +-{
5407 +- struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
5408 +- struct Qdisc *qdisc_default = _qdisc_default;
5409 +-
5410 +- if (qdisc) {
5411 +- rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
5412 +- dev_queue->qdisc_sleeping = qdisc_default;
5413 +-
5414 +- qdisc_put(qdisc);
5415 +- }
5416 +-}
5417 +-
5418 + void dev_shutdown(struct net_device *dev)
5419 + {
5420 + netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
5421 +diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
5422 +index 72102277449e1..36079fdde2cb5 100644
5423 +--- a/net/sched/sch_tbf.c
5424 ++++ b/net/sched/sch_tbf.c
5425 +@@ -356,6 +356,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
5426 + struct nlattr *tb[TCA_TBF_MAX + 1];
5427 + struct tc_tbf_qopt *qopt;
5428 + struct Qdisc *child = NULL;
5429 ++ struct Qdisc *old = NULL;
5430 + struct psched_ratecfg rate;
5431 + struct psched_ratecfg peak;
5432 + u64 max_size;
5433 +@@ -447,7 +448,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
5434 + sch_tree_lock(sch);
5435 + if (child) {
5436 + qdisc_tree_flush_backlog(q->qdisc);
5437 +- qdisc_put(q->qdisc);
5438 ++ old = q->qdisc;
5439 + q->qdisc = child;
5440 + }
5441 + q->limit = qopt->limit;
5442 +@@ -467,6 +468,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
5443 + memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
5444 +
5445 + sch_tree_unlock(sch);
5446 ++ qdisc_put(old);
5447 + err = 0;
5448 +
5449 + tbf_offload_change(sch);
5450 +diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
5451 +index 433bb5a7df31e..a51d5ed2ad764 100644
5452 +--- a/net/smc/af_smc.c
5453 ++++ b/net/smc/af_smc.c
5454 +@@ -1812,7 +1812,6 @@ static void smc_listen_out_connected(struct smc_sock *new_smc)
5455 + {
5456 + struct sock *newsmcsk = &new_smc->sk;
5457 +
5458 +- sk_refcnt_debug_inc(newsmcsk);
5459 + if (newsmcsk->sk_state == SMC_INIT)
5460 + newsmcsk->sk_state = SMC_ACTIVE;
5461 +
5462 +diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c
5463 +index aab43469a2f04..0878b162890af 100644
5464 +--- a/net/wireless/debugfs.c
5465 ++++ b/net/wireless/debugfs.c
5466 +@@ -65,9 +65,10 @@ static ssize_t ht40allow_map_read(struct file *file,
5467 + {
5468 + struct wiphy *wiphy = file->private_data;
5469 + char *buf;
5470 +- unsigned int offset = 0, buf_size = PAGE_SIZE, i, r;
5471 ++ unsigned int offset = 0, buf_size = PAGE_SIZE, i;
5472 + enum nl80211_band band;
5473 + struct ieee80211_supported_band *sband;
5474 ++ ssize_t r;
5475 +
5476 + buf = kzalloc(buf_size, GFP_KERNEL);
5477 + if (!buf)
5478 +diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
5479 +index f70112176b7c1..a71a8c6edf553 100644
5480 +--- a/net/xdp/xsk_buff_pool.c
5481 ++++ b/net/xdp/xsk_buff_pool.c
5482 +@@ -379,6 +379,16 @@ static void xp_check_dma_contiguity(struct xsk_dma_map *dma_map)
5483 +
5484 + static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_map)
5485 + {
5486 ++ if (!pool->unaligned) {
5487 ++ u32 i;
5488 ++
5489 ++ for (i = 0; i < pool->heads_cnt; i++) {
5490 ++ struct xdp_buff_xsk *xskb = &pool->heads[i];
5491 ++
5492 ++ xp_init_xskb_dma(xskb, pool, dma_map->dma_pages, xskb->orig_addr);
5493 ++ }
5494 ++ }
5495 ++
5496 + pool->dma_pages = kvcalloc(dma_map->dma_pages_cnt, sizeof(*pool->dma_pages), GFP_KERNEL);
5497 + if (!pool->dma_pages)
5498 + return -ENOMEM;
5499 +@@ -428,12 +438,6 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
5500 +
5501 + if (pool->unaligned)
5502 + xp_check_dma_contiguity(dma_map);
5503 +- else
5504 +- for (i = 0; i < pool->heads_cnt; i++) {
5505 +- struct xdp_buff_xsk *xskb = &pool->heads[i];
5506 +-
5507 +- xp_init_xskb_dma(xskb, pool, dma_map->dma_pages, xskb->orig_addr);
5508 +- }
5509 +
5510 + err = xp_init_dma_info(pool, dma_map);
5511 + if (err) {
5512 +diff --git a/security/landlock/fs.c b/security/landlock/fs.c
5513 +index ec5a6247cd3e7..a9dbd99d9ee76 100644
5514 +--- a/security/landlock/fs.c
5515 ++++ b/security/landlock/fs.c
5516 +@@ -149,6 +149,16 @@ retry:
5517 + LANDLOCK_ACCESS_FS_READ_FILE)
5518 + /* clang-format on */
5519 +
5520 ++/*
5521 ++ * All access rights that are denied by default whether they are handled or not
5522 ++ * by a ruleset/layer. This must be ORed with all ruleset->fs_access_masks[]
5523 ++ * entries when we need to get the absolute handled access masks.
5524 ++ */
5525 ++/* clang-format off */
5526 ++#define ACCESS_INITIALLY_DENIED ( \
5527 ++ LANDLOCK_ACCESS_FS_REFER)
5528 ++/* clang-format on */
5529 ++
5530 + /*
5531 + * @path: Should have been checked by get_path_from_fd().
5532 + */
5533 +@@ -167,7 +177,9 @@ int landlock_append_fs_rule(struct landlock_ruleset *const ruleset,
5534 + return -EINVAL;
5535 +
5536 + /* Transforms relative access rights to absolute ones. */
5537 +- access_rights |= LANDLOCK_MASK_ACCESS_FS & ~ruleset->fs_access_masks[0];
5538 ++ access_rights |=
5539 ++ LANDLOCK_MASK_ACCESS_FS &
5540 ++ ~(ruleset->fs_access_masks[0] | ACCESS_INITIALLY_DENIED);
5541 + object = get_inode_object(d_backing_inode(path->dentry));
5542 + if (IS_ERR(object))
5543 + return PTR_ERR(object);
5544 +@@ -277,23 +289,12 @@ static inline bool is_nouser_or_private(const struct dentry *dentry)
5545 + static inline access_mask_t
5546 + get_handled_accesses(const struct landlock_ruleset *const domain)
5547 + {
5548 +- access_mask_t access_dom = 0;
5549 +- unsigned long access_bit;
5550 +-
5551 +- for (access_bit = 0; access_bit < LANDLOCK_NUM_ACCESS_FS;
5552 +- access_bit++) {
5553 +- size_t layer_level;
5554 ++ access_mask_t access_dom = ACCESS_INITIALLY_DENIED;
5555 ++ size_t layer_level;
5556 +
5557 +- for (layer_level = 0; layer_level < domain->num_layers;
5558 +- layer_level++) {
5559 +- if (domain->fs_access_masks[layer_level] &
5560 +- BIT_ULL(access_bit)) {
5561 +- access_dom |= BIT_ULL(access_bit);
5562 +- break;
5563 +- }
5564 +- }
5565 +- }
5566 +- return access_dom;
5567 ++ for (layer_level = 0; layer_level < domain->num_layers; layer_level++)
5568 ++ access_dom |= domain->fs_access_masks[layer_level];
5569 ++ return access_dom & LANDLOCK_MASK_ACCESS_FS;
5570 + }
5571 +
5572 + static inline access_mask_t
5573 +@@ -316,8 +317,13 @@ init_layer_masks(const struct landlock_ruleset *const domain,
5574 +
5575 + for_each_set_bit(access_bit, &access_req,
5576 + ARRAY_SIZE(*layer_masks)) {
5577 +- if (domain->fs_access_masks[layer_level] &
5578 +- BIT_ULL(access_bit)) {
5579 ++ /*
5580 ++ * Artificially handles all initially denied by default
5581 ++ * access rights.
5582 ++ */
5583 ++ if (BIT_ULL(access_bit) &
5584 ++ (domain->fs_access_masks[layer_level] |
5585 ++ ACCESS_INITIALLY_DENIED)) {
5586 + (*layer_masks)[access_bit] |=
5587 + BIT_ULL(layer_level);
5588 + handled_accesses |= BIT_ULL(access_bit);
5589 +@@ -857,10 +863,6 @@ static int current_check_refer_path(struct dentry *const old_dentry,
5590 + NULL, NULL);
5591 + }
5592 +
5593 +- /* Backward compatibility: no reparenting support. */
5594 +- if (!(get_handled_accesses(dom) & LANDLOCK_ACCESS_FS_REFER))
5595 +- return -EXDEV;
5596 +-
5597 + access_request_parent1 |= LANDLOCK_ACCESS_FS_REFER;
5598 + access_request_parent2 |= LANDLOCK_ACCESS_FS_REFER;
5599 +
5600 +diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
5601 +index 8cfdaee779050..55b3c49ba61de 100644
5602 +--- a/sound/core/memalloc.c
5603 ++++ b/sound/core/memalloc.c
5604 +@@ -20,6 +20,13 @@
5605 +
5606 + static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab);
5607 +
5608 ++#ifdef CONFIG_SND_DMA_SGBUF
5609 ++static void *do_alloc_fallback_pages(struct device *dev, size_t size,
5610 ++ dma_addr_t *addr, bool wc);
5611 ++static void do_free_fallback_pages(void *p, size_t size, bool wc);
5612 ++static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size);
5613 ++#endif
5614 ++
5615 + /* a cast to gfp flag from the dev pointer; for CONTINUOUS and VMALLOC types */
5616 + static inline gfp_t snd_mem_get_gfp_flags(const struct snd_dma_buffer *dmab,
5617 + gfp_t default_gfp)
5618 +@@ -269,16 +276,21 @@ EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
5619 + /*
5620 + * Continuous pages allocator
5621 + */
5622 +-static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
5623 ++static void *do_alloc_pages(size_t size, dma_addr_t *addr, gfp_t gfp)
5624 + {
5625 +- gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL);
5626 + void *p = alloc_pages_exact(size, gfp);
5627 +
5628 + if (p)
5629 +- dmab->addr = page_to_phys(virt_to_page(p));
5630 ++ *addr = page_to_phys(virt_to_page(p));
5631 + return p;
5632 + }
5633 +
5634 ++static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
5635 ++{
5636 ++ return do_alloc_pages(size, &dmab->addr,
5637 ++ snd_mem_get_gfp_flags(dmab, GFP_KERNEL));
5638 ++}
5639 ++
5640 + static void snd_dma_continuous_free(struct snd_dma_buffer *dmab)
5641 + {
5642 + free_pages_exact(dmab->area, dmab->bytes);
5643 +@@ -455,6 +467,25 @@ static const struct snd_malloc_ops snd_dma_dev_ops = {
5644 + /*
5645 + * Write-combined pages
5646 + */
5647 ++/* x86-specific allocations */
5648 ++#ifdef CONFIG_SND_DMA_SGBUF
5649 ++static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
5650 ++{
5651 ++ return do_alloc_fallback_pages(dmab->dev.dev, size, &dmab->addr, true);
5652 ++}
5653 ++
5654 ++static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
5655 ++{
5656 ++ do_free_fallback_pages(dmab->area, dmab->bytes, true);
5657 ++}
5658 ++
5659 ++static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
5660 ++ struct vm_area_struct *area)
5661 ++{
5662 ++ area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
5663 ++ return snd_dma_continuous_mmap(dmab, area);
5664 ++}
5665 ++#else
5666 + static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
5667 + {
5668 + return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
5669 +@@ -471,6 +502,7 @@ static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
5670 + return dma_mmap_wc(dmab->dev.dev, area,
5671 + dmab->area, dmab->addr, dmab->bytes);
5672 + }
5673 ++#endif /* CONFIG_SND_DMA_SGBUF */
5674 +
5675 + static const struct snd_malloc_ops snd_dma_wc_ops = {
5676 + .alloc = snd_dma_wc_alloc,
5677 +@@ -478,10 +510,6 @@ static const struct snd_malloc_ops snd_dma_wc_ops = {
5678 + .mmap = snd_dma_wc_mmap,
5679 + };
5680 +
5681 +-#ifdef CONFIG_SND_DMA_SGBUF
5682 +-static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size);
5683 +-#endif
5684 +-
5685 + /*
5686 + * Non-contiguous pages allocator
5687 + */
5688 +@@ -661,6 +689,37 @@ static const struct snd_malloc_ops snd_dma_sg_wc_ops = {
5689 + .get_chunk_size = snd_dma_noncontig_get_chunk_size,
5690 + };
5691 +
5692 ++/* manual page allocations with wc setup */
5693 ++static void *do_alloc_fallback_pages(struct device *dev, size_t size,
5694 ++ dma_addr_t *addr, bool wc)
5695 ++{
5696 ++ gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
5697 ++ void *p;
5698 ++
5699 ++ again:
5700 ++ p = do_alloc_pages(size, addr, gfp);
5701 ++ if (!p || (*addr + size - 1) & ~dev->coherent_dma_mask) {
5702 ++ if (IS_ENABLED(CONFIG_ZONE_DMA32) && !(gfp & GFP_DMA32)) {
5703 ++ gfp |= GFP_DMA32;
5704 ++ goto again;
5705 ++ }
5706 ++ if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
5707 ++ gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
5708 ++ goto again;
5709 ++ }
5710 ++ }
5711 ++ if (p && wc)
5712 ++ set_memory_wc((unsigned long)(p), size >> PAGE_SHIFT);
5713 ++ return p;
5714 ++}
5715 ++
5716 ++static void do_free_fallback_pages(void *p, size_t size, bool wc)
5717 ++{
5718 ++ if (wc)
5719 ++ set_memory_wb((unsigned long)(p), size >> PAGE_SHIFT);
5720 ++ free_pages_exact(p, size);
5721 ++}
5722 ++
5723 + /* Fallback SG-buffer allocations for x86 */
5724 + struct snd_dma_sg_fallback {
5725 + size_t count;
5726 +@@ -671,14 +730,11 @@ struct snd_dma_sg_fallback {
5727 + static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
5728 + struct snd_dma_sg_fallback *sgbuf)
5729 + {
5730 ++ bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
5731 + size_t i;
5732 +
5733 +- if (sgbuf->count && dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
5734 +- set_pages_array_wb(sgbuf->pages, sgbuf->count);
5735 + for (i = 0; i < sgbuf->count && sgbuf->pages[i]; i++)
5736 +- dma_free_coherent(dmab->dev.dev, PAGE_SIZE,
5737 +- page_address(sgbuf->pages[i]),
5738 +- sgbuf->addrs[i]);
5739 ++ do_free_fallback_pages(page_address(sgbuf->pages[i]), PAGE_SIZE, wc);
5740 + kvfree(sgbuf->pages);
5741 + kvfree(sgbuf->addrs);
5742 + kfree(sgbuf);
5743 +@@ -690,6 +746,7 @@ static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
5744 + struct page **pages;
5745 + size_t i, count;
5746 + void *p;
5747 ++ bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
5748 +
5749 + sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
5750 + if (!sgbuf)
5751 +@@ -704,15 +761,13 @@ static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
5752 + goto error;
5753 +
5754 + for (i = 0; i < count; sgbuf->count++, i++) {
5755 +- p = dma_alloc_coherent(dmab->dev.dev, PAGE_SIZE,
5756 +- &sgbuf->addrs[i], DEFAULT_GFP);
5757 ++ p = do_alloc_fallback_pages(dmab->dev.dev, PAGE_SIZE,
5758 ++ &sgbuf->addrs[i], wc);
5759 + if (!p)
5760 + goto error;
5761 + sgbuf->pages[i] = virt_to_page(p);
5762 + }
5763 +
5764 +- if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
5765 +- set_pages_array_wc(pages, count);
5766 + p = vmap(pages, count, VM_MAP, PAGE_KERNEL);
5767 + if (!p)
5768 + goto error;
5769 +diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c
5770 +index 1e3bf086f8671..07efb38f58ac1 100644
5771 +--- a/sound/core/seq/oss/seq_oss_midi.c
5772 ++++ b/sound/core/seq/oss/seq_oss_midi.c
5773 +@@ -270,7 +270,9 @@ snd_seq_oss_midi_clear_all(void)
5774 + void
5775 + snd_seq_oss_midi_setup(struct seq_oss_devinfo *dp)
5776 + {
5777 ++ spin_lock_irq(&register_lock);
5778 + dp->max_mididev = max_midi_devs;
5779 ++ spin_unlock_irq(&register_lock);
5780 + }
5781 +
5782 + /*
5783 +diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
5784 +index 2e9d695d336c9..2d707afa1ef1c 100644
5785 +--- a/sound/core/seq/seq_clientmgr.c
5786 ++++ b/sound/core/seq/seq_clientmgr.c
5787 +@@ -121,13 +121,13 @@ struct snd_seq_client *snd_seq_client_use_ptr(int clientid)
5788 + spin_unlock_irqrestore(&clients_lock, flags);
5789 + #ifdef CONFIG_MODULES
5790 + if (!in_interrupt()) {
5791 +- static char client_requested[SNDRV_SEQ_GLOBAL_CLIENTS];
5792 +- static char card_requested[SNDRV_CARDS];
5793 ++ static DECLARE_BITMAP(client_requested, SNDRV_SEQ_GLOBAL_CLIENTS);
5794 ++ static DECLARE_BITMAP(card_requested, SNDRV_CARDS);
5795 ++
5796 + if (clientid < SNDRV_SEQ_GLOBAL_CLIENTS) {
5797 + int idx;
5798 +
5799 +- if (!client_requested[clientid]) {
5800 +- client_requested[clientid] = 1;
5801 ++ if (!test_and_set_bit(clientid, client_requested)) {
5802 + for (idx = 0; idx < 15; idx++) {
5803 + if (seq_client_load[idx] < 0)
5804 + break;
5805 +@@ -142,10 +142,8 @@ struct snd_seq_client *snd_seq_client_use_ptr(int clientid)
5806 + int card = (clientid - SNDRV_SEQ_GLOBAL_CLIENTS) /
5807 + SNDRV_SEQ_CLIENTS_PER_CARD;
5808 + if (card < snd_ecards_limit) {
5809 +- if (! card_requested[card]) {
5810 +- card_requested[card] = 1;
5811 ++ if (!test_and_set_bit(card, card_requested))
5812 + snd_request_card(card);
5813 +- }
5814 + snd_seq_device_load_drivers();
5815 + }
5816 + }
5817 +diff --git a/sound/hda/intel-nhlt.c b/sound/hda/intel-nhlt.c
5818 +index 9db5ccd9aa2db..13bb0ccfb36c0 100644
5819 +--- a/sound/hda/intel-nhlt.c
5820 ++++ b/sound/hda/intel-nhlt.c
5821 +@@ -55,16 +55,22 @@ int intel_nhlt_get_dmic_geo(struct device *dev, struct nhlt_acpi_table *nhlt)
5822 +
5823 + /* find max number of channels based on format_configuration */
5824 + if (fmt_configs->fmt_count) {
5825 ++ struct nhlt_fmt_cfg *fmt_cfg = fmt_configs->fmt_config;
5826 ++
5827 + dev_dbg(dev, "found %d format definitions\n",
5828 + fmt_configs->fmt_count);
5829 +
5830 + for (i = 0; i < fmt_configs->fmt_count; i++) {
5831 + struct wav_fmt_ext *fmt_ext;
5832 +
5833 +- fmt_ext = &fmt_configs->fmt_config[i].fmt_ext;
5834 ++ fmt_ext = &fmt_cfg->fmt_ext;
5835 +
5836 + if (fmt_ext->fmt.channels > max_ch)
5837 + max_ch = fmt_ext->fmt.channels;
5838 ++
5839 ++ /* Move to the next nhlt_fmt_cfg */
5840 ++ fmt_cfg = (struct nhlt_fmt_cfg *)(fmt_cfg->config.caps +
5841 ++ fmt_cfg->config.size);
5842 + }
5843 + dev_dbg(dev, "max channels found %d\n", max_ch);
5844 + } else {
5845 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5846 +index b44b882f8378c..799f6bf266dd0 100644
5847 +--- a/sound/pci/hda/patch_realtek.c
5848 ++++ b/sound/pci/hda/patch_realtek.c
5849 +@@ -4689,6 +4689,48 @@ static void alc236_fixup_hp_mute_led_micmute_vref(struct hda_codec *codec,
5850 + alc236_fixup_hp_micmute_led_vref(codec, fix, action);
5851 + }
5852 +
5853 ++static inline void alc298_samsung_write_coef_pack(struct hda_codec *codec,
5854 ++ const unsigned short coefs[2])
5855 ++{
5856 ++ alc_write_coef_idx(codec, 0x23, coefs[0]);
5857 ++ alc_write_coef_idx(codec, 0x25, coefs[1]);
5858 ++ alc_write_coef_idx(codec, 0x26, 0xb011);
5859 ++}
5860 ++
5861 ++struct alc298_samsung_amp_desc {
5862 ++ unsigned char nid;
5863 ++ unsigned short init_seq[2][2];
5864 ++};
5865 ++
5866 ++static void alc298_fixup_samsung_amp(struct hda_codec *codec,
5867 ++ const struct hda_fixup *fix, int action)
5868 ++{
5869 ++ int i, j;
5870 ++ static const unsigned short init_seq[][2] = {
5871 ++ { 0x19, 0x00 }, { 0x20, 0xc0 }, { 0x22, 0x44 }, { 0x23, 0x08 },
5872 ++ { 0x24, 0x85 }, { 0x25, 0x41 }, { 0x35, 0x40 }, { 0x36, 0x01 },
5873 ++ { 0x38, 0x81 }, { 0x3a, 0x03 }, { 0x3b, 0x81 }, { 0x40, 0x3e },
5874 ++ { 0x41, 0x07 }, { 0x400, 0x1 }
5875 ++ };
5876 ++ static const struct alc298_samsung_amp_desc amps[] = {
5877 ++ { 0x3a, { { 0x18, 0x1 }, { 0x26, 0x0 } } },
5878 ++ { 0x39, { { 0x18, 0x2 }, { 0x26, 0x1 } } }
5879 ++ };
5880 ++
5881 ++ if (action != HDA_FIXUP_ACT_INIT)
5882 ++ return;
5883 ++
5884 ++ for (i = 0; i < ARRAY_SIZE(amps); i++) {
5885 ++ alc_write_coef_idx(codec, 0x22, amps[i].nid);
5886 ++
5887 ++ for (j = 0; j < ARRAY_SIZE(amps[i].init_seq); j++)
5888 ++ alc298_samsung_write_coef_pack(codec, amps[i].init_seq[j]);
5889 ++
5890 ++ for (j = 0; j < ARRAY_SIZE(init_seq); j++)
5891 ++ alc298_samsung_write_coef_pack(codec, init_seq[j]);
5892 ++ }
5893 ++}
5894 ++
5895 + #if IS_REACHABLE(CONFIG_INPUT)
5896 + static void gpio2_mic_hotkey_event(struct hda_codec *codec,
5897 + struct hda_jack_callback *event)
5898 +@@ -7000,6 +7042,7 @@ enum {
5899 + ALC236_FIXUP_HP_GPIO_LED,
5900 + ALC236_FIXUP_HP_MUTE_LED,
5901 + ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
5902 ++ ALC298_FIXUP_SAMSUNG_AMP,
5903 + ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
5904 + ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
5905 + ALC295_FIXUP_ASUS_MIC_NO_PRESENCE,
5906 +@@ -8365,6 +8408,12 @@ static const struct hda_fixup alc269_fixups[] = {
5907 + .type = HDA_FIXUP_FUNC,
5908 + .v.func = alc236_fixup_hp_mute_led_micmute_vref,
5909 + },
5910 ++ [ALC298_FIXUP_SAMSUNG_AMP] = {
5911 ++ .type = HDA_FIXUP_FUNC,
5912 ++ .v.func = alc298_fixup_samsung_amp,
5913 ++ .chained = true,
5914 ++ .chain_id = ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET
5915 ++ },
5916 + [ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET] = {
5917 + .type = HDA_FIXUP_VERBS,
5918 + .v.verbs = (const struct hda_verb[]) {
5919 +@@ -9307,13 +9356,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5920 + SND_PCI_QUIRK(0x10ec, 0x1254, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
5921 + SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE),
5922 + SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
5923 +- SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
5924 +- SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
5925 +- SND_PCI_QUIRK(0x144d, 0xc189, "Samsung Galaxy Flex Book (NT950QCG-X716)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
5926 +- SND_PCI_QUIRK(0x144d, 0xc18a, "Samsung Galaxy Book Ion (NP930XCJ-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
5927 ++ SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_AMP),
5928 ++ SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_AMP),
5929 ++ SND_PCI_QUIRK(0x144d, 0xc189, "Samsung Galaxy Flex Book (NT950QCG-X716)", ALC298_FIXUP_SAMSUNG_AMP),
5930 ++ SND_PCI_QUIRK(0x144d, 0xc18a, "Samsung Galaxy Book Ion (NP930XCJ-K01US)", ALC298_FIXUP_SAMSUNG_AMP),
5931 + SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
5932 +- SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
5933 +- SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
5934 ++ SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_AMP),
5935 ++ SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_AMP),
5936 + SND_PCI_QUIRK(0x144d, 0xc832, "Samsung Galaxy Book Flex Alpha (NP730QCJ)", ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
5937 + SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
5938 + SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
5939 +@@ -9679,7 +9728,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
5940 + {.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"},
5941 + {.id = ALC298_FIXUP_HUAWEI_MBX_STEREO, .name = "huawei-mbx-stereo"},
5942 + {.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"},
5943 +- {.id = ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, .name = "alc298-samsung-headphone"},
5944 ++ {.id = ALC298_FIXUP_SAMSUNG_AMP, .name = "alc298-samsung-amp"},
5945 + {.id = ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, .name = "alc256-samsung-headphone"},
5946 + {.id = ALC255_FIXUP_XIAOMI_HEADSET_MIC, .name = "alc255-xiaomi-headset"},
5947 + {.id = ALC274_FIXUP_HP_MIC, .name = "alc274-hp-mic-detect"},
5948 +diff --git a/tools/testing/selftests/landlock/fs_test.c b/tools/testing/selftests/landlock/fs_test.c
5949 +index 21a2ce8fa739d..45de42a027c54 100644
5950 +--- a/tools/testing/selftests/landlock/fs_test.c
5951 ++++ b/tools/testing/selftests/landlock/fs_test.c
5952 +@@ -4,7 +4,7 @@
5953 + *
5954 + * Copyright © 2017-2020 Mickaël Salaün <mic@×××××××.net>
5955 + * Copyright © 2020 ANSSI
5956 +- * Copyright © 2020-2021 Microsoft Corporation
5957 ++ * Copyright © 2020-2022 Microsoft Corporation
5958 + */
5959 +
5960 + #define _GNU_SOURCE
5961 +@@ -371,6 +371,13 @@ TEST_F_FORK(layout1, inval)
5962 + ASSERT_EQ(EINVAL, errno);
5963 + path_beneath.allowed_access &= ~LANDLOCK_ACCESS_FS_EXECUTE;
5964 +
5965 ++ /* Tests with denied-by-default access right. */
5966 ++ path_beneath.allowed_access |= LANDLOCK_ACCESS_FS_REFER;
5967 ++ ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
5968 ++ &path_beneath, 0));
5969 ++ ASSERT_EQ(EINVAL, errno);
5970 ++ path_beneath.allowed_access &= ~LANDLOCK_ACCESS_FS_REFER;
5971 ++
5972 + /* Test with unknown (64-bits) value. */
5973 + path_beneath.allowed_access |= (1ULL << 60);
5974 + ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
5975 +@@ -1826,6 +1833,20 @@ TEST_F_FORK(layout1, link)
5976 + ASSERT_EQ(0, link(file1_s1d3, file2_s1d3));
5977 + }
5978 +
5979 ++static int test_rename(const char *const oldpath, const char *const newpath)
5980 ++{
5981 ++ if (rename(oldpath, newpath))
5982 ++ return errno;
5983 ++ return 0;
5984 ++}
5985 ++
5986 ++static int test_exchange(const char *const oldpath, const char *const newpath)
5987 ++{
5988 ++ if (renameat2(AT_FDCWD, oldpath, AT_FDCWD, newpath, RENAME_EXCHANGE))
5989 ++ return errno;
5990 ++ return 0;
5991 ++}
5992 ++
5993 + TEST_F_FORK(layout1, rename_file)
5994 + {
5995 + const struct rule rules[] = {
5996 +@@ -1867,10 +1888,10 @@ TEST_F_FORK(layout1, rename_file)
5997 + * to a different directory (which allows file removal).
5998 + */
5999 + ASSERT_EQ(-1, rename(file1_s2d1, file1_s1d3));
6000 +- ASSERT_EQ(EXDEV, errno);
6001 ++ ASSERT_EQ(EACCES, errno);
6002 + ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d1, AT_FDCWD, file1_s1d3,
6003 + RENAME_EXCHANGE));
6004 +- ASSERT_EQ(EXDEV, errno);
6005 ++ ASSERT_EQ(EACCES, errno);
6006 + ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_s2d2, AT_FDCWD, file1_s1d3,
6007 + RENAME_EXCHANGE));
6008 + ASSERT_EQ(EXDEV, errno);
6009 +@@ -1894,7 +1915,7 @@ TEST_F_FORK(layout1, rename_file)
6010 + ASSERT_EQ(EXDEV, errno);
6011 + ASSERT_EQ(0, unlink(file1_s1d3));
6012 + ASSERT_EQ(-1, rename(file1_s2d1, file1_s1d3));
6013 +- ASSERT_EQ(EXDEV, errno);
6014 ++ ASSERT_EQ(EACCES, errno);
6015 +
6016 + /* Exchanges and renames files with same parent. */
6017 + ASSERT_EQ(0, renameat2(AT_FDCWD, file2_s2d3, AT_FDCWD, file1_s2d3,
6018 +@@ -2014,6 +2035,115 @@ TEST_F_FORK(layout1, reparent_refer)
6019 + ASSERT_EQ(0, rename(dir_s1d3, dir_s2d3));
6020 + }
6021 +
6022 ++/* Checks renames beneath dir_s1d1. */
6023 ++static void refer_denied_by_default(struct __test_metadata *const _metadata,
6024 ++ const struct rule layer1[],
6025 ++ const int layer1_err,
6026 ++ const struct rule layer2[])
6027 ++{
6028 ++ int ruleset_fd;
6029 ++
6030 ++ ASSERT_EQ(0, unlink(file1_s1d2));
6031 ++
6032 ++ ruleset_fd = create_ruleset(_metadata, layer1[0].access, layer1);
6033 ++ ASSERT_LE(0, ruleset_fd);
6034 ++ enforce_ruleset(_metadata, ruleset_fd);
6035 ++ ASSERT_EQ(0, close(ruleset_fd));
6036 ++
6037 ++ /*
6038 ++ * If the first layer handles LANDLOCK_ACCESS_FS_REFER (according to
6039 ++ * layer1_err), then it allows some different-parent renames and links.
6040 ++ */
6041 ++ ASSERT_EQ(layer1_err, test_rename(file1_s1d1, file1_s1d2));
6042 ++ if (layer1_err == 0)
6043 ++ ASSERT_EQ(layer1_err, test_rename(file1_s1d2, file1_s1d1));
6044 ++ ASSERT_EQ(layer1_err, test_exchange(file2_s1d1, file2_s1d2));
6045 ++ ASSERT_EQ(layer1_err, test_exchange(file2_s1d2, file2_s1d1));
6046 ++
6047 ++ ruleset_fd = create_ruleset(_metadata, layer2[0].access, layer2);
6048 ++ ASSERT_LE(0, ruleset_fd);
6049 ++ enforce_ruleset(_metadata, ruleset_fd);
6050 ++ ASSERT_EQ(0, close(ruleset_fd));
6051 ++
6052 ++ /*
6053 ++ * Now, either the first or the second layer does not handle
6054 ++ * LANDLOCK_ACCESS_FS_REFER, which means that any different-parent
6055 ++ * renames and links are denied, thus making the layer handling
6056 ++ * LANDLOCK_ACCESS_FS_REFER null and void.
6057 ++ */
6058 ++ ASSERT_EQ(EXDEV, test_rename(file1_s1d1, file1_s1d2));
6059 ++ ASSERT_EQ(EXDEV, test_exchange(file2_s1d1, file2_s1d2));
6060 ++ ASSERT_EQ(EXDEV, test_exchange(file2_s1d2, file2_s1d1));
6061 ++}
6062 ++
6063 ++const struct rule layer_dir_s1d1_refer[] = {
6064 ++ {
6065 ++ .path = dir_s1d1,
6066 ++ .access = LANDLOCK_ACCESS_FS_REFER,
6067 ++ },
6068 ++ {},
6069 ++};
6070 ++
6071 ++const struct rule layer_dir_s1d1_execute[] = {
6072 ++ {
6073 ++ /* Matches a parent directory. */
6074 ++ .path = dir_s1d1,
6075 ++ .access = LANDLOCK_ACCESS_FS_EXECUTE,
6076 ++ },
6077 ++ {},
6078 ++};
6079 ++
6080 ++const struct rule layer_dir_s2d1_execute[] = {
6081 ++ {
6082 ++ /* Does not match a parent directory. */
6083 ++ .path = dir_s2d1,
6084 ++ .access = LANDLOCK_ACCESS_FS_EXECUTE,
6085 ++ },
6086 ++ {},
6087 ++};
6088 ++
6089 ++/*
6090 ++ * Tests precedence over renames: denied by default for different parent
6091 ++ * directories, *with* a rule matching a parent directory, but not directly
6092 ++ * denying access (with MAKE_REG nor REMOVE).
6093 ++ */
6094 ++TEST_F_FORK(layout1, refer_denied_by_default1)
6095 ++{
6096 ++ refer_denied_by_default(_metadata, layer_dir_s1d1_refer, 0,
6097 ++ layer_dir_s1d1_execute);
6098 ++}
6099 ++
6100 ++/*
6101 ++ * Same test but this time turning around the ABI version order: the first
6102 ++ * layer does not handle LANDLOCK_ACCESS_FS_REFER.
6103 ++ */
6104 ++TEST_F_FORK(layout1, refer_denied_by_default2)
6105 ++{
6106 ++ refer_denied_by_default(_metadata, layer_dir_s1d1_execute, EXDEV,
6107 ++ layer_dir_s1d1_refer);
6108 ++}
6109 ++
6110 ++/*
6111 ++ * Tests precedence over renames: denied by default for different parent
6112 ++ * directories, *without* a rule matching a parent directory, but not directly
6113 ++ * denying access (with MAKE_REG nor REMOVE).
6114 ++ */
6115 ++TEST_F_FORK(layout1, refer_denied_by_default3)
6116 ++{
6117 ++ refer_denied_by_default(_metadata, layer_dir_s1d1_refer, 0,
6118 ++ layer_dir_s2d1_execute);
6119 ++}
6120 ++
6121 ++/*
6122 ++ * Same test but this time turning around the ABI version order: the first
6123 ++ * layer does not handle LANDLOCK_ACCESS_FS_REFER.
6124 ++ */
6125 ++TEST_F_FORK(layout1, refer_denied_by_default4)
6126 ++{
6127 ++ refer_denied_by_default(_metadata, layer_dir_s2d1_execute, EXDEV,
6128 ++ layer_dir_s1d1_refer);
6129 ++}
6130 ++
6131 + TEST_F_FORK(layout1, reparent_link)
6132 + {
6133 + const struct rule layer1[] = {
6134 +@@ -2336,11 +2466,12 @@ TEST_F_FORK(layout1, reparent_exdev_layers_rename1)
6135 + ASSERT_EQ(EXDEV, errno);
6136 +
6137 + /*
6138 +- * However, moving the file2_s1d3 file below dir_s2d3 is allowed
6139 +- * because it cannot inherit MAKE_REG nor MAKE_DIR rights (which are
6140 +- * dedicated to directories).
6141 ++ * Moving the file2_s1d3 file below dir_s2d3 is denied because the
6142 ++ * second layer does not handle REFER, which is always denied by
6143 ++ * default.
6144 + */
6145 +- ASSERT_EQ(0, rename(file2_s1d3, file1_s2d3));
6146 ++ ASSERT_EQ(-1, rename(file2_s1d3, file1_s2d3));
6147 ++ ASSERT_EQ(EXDEV, errno);
6148 + }
6149 +
6150 + TEST_F_FORK(layout1, reparent_exdev_layers_rename2)
6151 +@@ -2373,8 +2504,12 @@ TEST_F_FORK(layout1, reparent_exdev_layers_rename2)
6152 + ASSERT_EQ(EACCES, errno);
6153 + ASSERT_EQ(-1, rename(file1_s1d1, file1_s2d3));
6154 + ASSERT_EQ(EXDEV, errno);
6155 +- /* Modify layout! */
6156 +- ASSERT_EQ(0, rename(file2_s1d2, file1_s2d3));
6157 ++ /*
6158 ++ * Modifying the layout is now denied because the second layer does not
6159 ++ * handle REFER, which is always denied by default.
6160 ++ */
6161 ++ ASSERT_EQ(-1, rename(file2_s1d2, file1_s2d3));
6162 ++ ASSERT_EQ(EXDEV, errno);
6163 +
6164 + /* Without REFER source, EACCES wins over EXDEV. */
6165 + ASSERT_EQ(-1, rename(dir_s1d1, file1_s2d2));