Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.14 commit in: /
Date: Sun, 26 Sep 2021 14:11:46
Message-Id: 1632665490.225c81bb151672296321343fb081bac386ff4dee.mpagano@gentoo
1 commit: 225c81bb151672296321343fb081bac386ff4dee
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sun Sep 26 14:11:30 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sun Sep 26 14:11:30 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=225c81bb
7
8 Linux patch 5.14.8
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1007_linux-5.14.8.patch | 6040 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 6044 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 0c8fa67..dcc9f9a 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -75,6 +75,10 @@ Patch: 1006_linux-5.14.7.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.14.7
23
24 +Patch: 1007_linux-5.14.8.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.14.8
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1007_linux-5.14.8.patch b/1007_linux-5.14.8.patch
33 new file mode 100644
34 index 0000000..15b9ec2
35 --- /dev/null
36 +++ b/1007_linux-5.14.8.patch
37 @@ -0,0 +1,6040 @@
38 +diff --git a/Documentation/driver-api/cxl/memory-devices.rst b/Documentation/driver-api/cxl/memory-devices.rst
39 +index 487ce4f41d770..a86e2c7c551ab 100644
40 +--- a/Documentation/driver-api/cxl/memory-devices.rst
41 ++++ b/Documentation/driver-api/cxl/memory-devices.rst
42 +@@ -36,7 +36,7 @@ CXL Core
43 + .. kernel-doc:: drivers/cxl/cxl.h
44 + :internal:
45 +
46 +-.. kernel-doc:: drivers/cxl/core.c
47 ++.. kernel-doc:: drivers/cxl/core/bus.c
48 + :doc: cxl core
49 +
50 + External Interfaces
51 +diff --git a/Makefile b/Makefile
52 +index efb603f06e711..d6b4737194b88 100644
53 +--- a/Makefile
54 ++++ b/Makefile
55 +@@ -1,7 +1,7 @@
56 + # SPDX-License-Identifier: GPL-2.0
57 + VERSION = 5
58 + PATCHLEVEL = 14
59 +-SUBLEVEL = 7
60 ++SUBLEVEL = 8
61 + EXTRAVERSION =
62 + NAME = Opossums on Parade
63 +
64 +diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c
65 +index 7fa6828bb488a..587543c6c51cb 100644
66 +--- a/arch/arm64/kernel/cacheinfo.c
67 ++++ b/arch/arm64/kernel/cacheinfo.c
68 +@@ -43,7 +43,7 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
69 + this_leaf->type = type;
70 + }
71 +
72 +-static int __init_cache_level(unsigned int cpu)
73 ++int init_cache_level(unsigned int cpu)
74 + {
75 + unsigned int ctype, level, leaves, fw_level;
76 + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
77 +@@ -78,7 +78,7 @@ static int __init_cache_level(unsigned int cpu)
78 + return 0;
79 + }
80 +
81 +-static int __populate_cache_leaves(unsigned int cpu)
82 ++int populate_cache_leaves(unsigned int cpu)
83 + {
84 + unsigned int level, idx;
85 + enum cache_type type;
86 +@@ -97,6 +97,3 @@ static int __populate_cache_leaves(unsigned int cpu)
87 + }
88 + return 0;
89 + }
90 +-
91 +-DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level)
92 +-DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves)
93 +diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
94 +index 1fdb7bb7c1984..0ad4afc9359b5 100644
95 +--- a/arch/arm64/mm/init.c
96 ++++ b/arch/arm64/mm/init.c
97 +@@ -319,7 +319,21 @@ static void __init fdt_enforce_memory_region(void)
98 +
99 + void __init arm64_memblock_init(void)
100 + {
101 +- const s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual);
102 ++ s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual);
103 ++
104 ++ /*
105 ++ * Corner case: 52-bit VA capable systems running KVM in nVHE mode may
106 ++ * be limited in their ability to support a linear map that exceeds 51
107 ++ * bits of VA space, depending on the placement of the ID map. Given
108 ++ * that the placement of the ID map may be randomized, let's simply
109 ++ * limit the kernel's linear map to 51 bits as well if we detect this
110 ++ * configuration.
111 ++ */
112 ++ if (IS_ENABLED(CONFIG_KVM) && vabits_actual == 52 &&
113 ++ is_hyp_mode_available() && !is_kernel_in_hyp_mode()) {
114 ++ pr_info("Capping linear region to 51 bits for KVM in nVHE mode on LVA capable hardware.\n");
115 ++ linear_region_size = min_t(u64, linear_region_size, BIT(51));
116 ++ }
117 +
118 + /* Handle linux,usable-memory-range property */
119 + fdt_enforce_memory_region();
120 +diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c
121 +index 53d8ea7d36e6d..495dd058231d9 100644
122 +--- a/arch/mips/kernel/cacheinfo.c
123 ++++ b/arch/mips/kernel/cacheinfo.c
124 +@@ -17,7 +17,7 @@ do { \
125 + leaf++; \
126 + } while (0)
127 +
128 +-static int __init_cache_level(unsigned int cpu)
129 ++int init_cache_level(unsigned int cpu)
130 + {
131 + struct cpuinfo_mips *c = &current_cpu_data;
132 + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
133 +@@ -74,7 +74,7 @@ static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map)
134 + cpumask_set_cpu(cpu1, cpu_map);
135 + }
136 +
137 +-static int __populate_cache_leaves(unsigned int cpu)
138 ++int populate_cache_leaves(unsigned int cpu)
139 + {
140 + struct cpuinfo_mips *c = &current_cpu_data;
141 + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
142 +@@ -114,6 +114,3 @@ static int __populate_cache_leaves(unsigned int cpu)
143 +
144 + return 0;
145 + }
146 +-
147 +-DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level)
148 +-DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves)
149 +diff --git a/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts b/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts
150 +index baea7d204639a..b254c60589a1c 100644
151 +--- a/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts
152 ++++ b/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts
153 +@@ -16,10 +16,14 @@
154 +
155 + aliases {
156 + ethernet0 = &emac1;
157 ++ serial0 = &serial0;
158 ++ serial1 = &serial1;
159 ++ serial2 = &serial2;
160 ++ serial3 = &serial3;
161 + };
162 +
163 + chosen {
164 +- stdout-path = &serial0;
165 ++ stdout-path = "serial0:115200n8";
166 + };
167 +
168 + cpus {
169 +diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c
170 +index d867813570442..90deabfe63eaa 100644
171 +--- a/arch/riscv/kernel/cacheinfo.c
172 ++++ b/arch/riscv/kernel/cacheinfo.c
173 +@@ -113,7 +113,7 @@ static void fill_cacheinfo(struct cacheinfo **this_leaf,
174 + }
175 + }
176 +
177 +-static int __init_cache_level(unsigned int cpu)
178 ++int init_cache_level(unsigned int cpu)
179 + {
180 + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
181 + struct device_node *np = of_cpu_device_node_get(cpu);
182 +@@ -155,7 +155,7 @@ static int __init_cache_level(unsigned int cpu)
183 + return 0;
184 + }
185 +
186 +-static int __populate_cache_leaves(unsigned int cpu)
187 ++int populate_cache_leaves(unsigned int cpu)
188 + {
189 + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
190 + struct cacheinfo *this_leaf = this_cpu_ci->info_list;
191 +@@ -187,6 +187,3 @@ static int __populate_cache_leaves(unsigned int cpu)
192 +
193 + return 0;
194 + }
195 +-
196 +-DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level)
197 +-DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves)
198 +diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h
199 +index 3d8a4b94c620b..dd00d98804ec2 100644
200 +--- a/arch/s390/include/asm/stacktrace.h
201 ++++ b/arch/s390/include/asm/stacktrace.h
202 +@@ -34,16 +34,6 @@ static inline bool on_stack(struct stack_info *info,
203 + return addr >= info->begin && addr + len <= info->end;
204 + }
205 +
206 +-static __always_inline unsigned long get_stack_pointer(struct task_struct *task,
207 +- struct pt_regs *regs)
208 +-{
209 +- if (regs)
210 +- return (unsigned long) kernel_stack_pointer(regs);
211 +- if (task == current)
212 +- return current_stack_pointer();
213 +- return (unsigned long) task->thread.ksp;
214 +-}
215 +-
216 + /*
217 + * Stack layout of a C stack frame.
218 + */
219 +@@ -74,6 +64,16 @@ struct stack_frame {
220 + ((unsigned long)__builtin_frame_address(0) - \
221 + offsetof(struct stack_frame, back_chain))
222 +
223 ++static __always_inline unsigned long get_stack_pointer(struct task_struct *task,
224 ++ struct pt_regs *regs)
225 ++{
226 ++ if (regs)
227 ++ return (unsigned long)kernel_stack_pointer(regs);
228 ++ if (task == current)
229 ++ return current_frame_address();
230 ++ return (unsigned long)task->thread.ksp;
231 ++}
232 ++
233 + /*
234 + * To keep this simple mark register 2-6 as being changed (volatile)
235 + * by the called function, even though register 6 is saved/nonvolatile.
236 +diff --git a/arch/s390/include/asm/unwind.h b/arch/s390/include/asm/unwind.h
237 +index de9006b0cfebb..5ebf534ef7533 100644
238 +--- a/arch/s390/include/asm/unwind.h
239 ++++ b/arch/s390/include/asm/unwind.h
240 +@@ -55,10 +55,10 @@ static inline bool unwind_error(struct unwind_state *state)
241 + return state->error;
242 + }
243 +
244 +-static inline void unwind_start(struct unwind_state *state,
245 +- struct task_struct *task,
246 +- struct pt_regs *regs,
247 +- unsigned long first_frame)
248 ++static __always_inline void unwind_start(struct unwind_state *state,
249 ++ struct task_struct *task,
250 ++ struct pt_regs *regs,
251 ++ unsigned long first_frame)
252 + {
253 + task = task ?: current;
254 + first_frame = first_frame ?: get_stack_pointer(task, regs);
255 +diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
256 +index b9716a7e326d0..4c9b967290ae0 100644
257 +--- a/arch/s390/kernel/entry.S
258 ++++ b/arch/s390/kernel/entry.S
259 +@@ -140,10 +140,10 @@ _LPP_OFFSET = __LC_LPP
260 + TSTMSK __LC_MCCK_CODE,(MCCK_CODE_STG_ERROR|MCCK_CODE_STG_KEY_ERROR)
261 + jnz \errlabel
262 + TSTMSK __LC_MCCK_CODE,MCCK_CODE_STG_DEGRAD
263 +- jz oklabel\@
264 ++ jz .Loklabel\@
265 + TSTMSK __LC_MCCK_CODE,MCCK_CODE_STG_FAIL_ADDR
266 + jnz \errlabel
267 +-oklabel\@:
268 ++.Loklabel\@:
269 + .endm
270 +
271 + #if IS_ENABLED(CONFIG_KVM)
272 +diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
273 +index ee23908f1b960..6f0d2d4dea74a 100644
274 +--- a/arch/s390/kernel/setup.c
275 ++++ b/arch/s390/kernel/setup.c
276 +@@ -50,6 +50,7 @@
277 + #include <linux/compat.h>
278 + #include <linux/start_kernel.h>
279 + #include <linux/hugetlb.h>
280 ++#include <linux/kmemleak.h>
281 +
282 + #include <asm/boot_data.h>
283 + #include <asm/ipl.h>
284 +@@ -312,9 +313,12 @@ void *restart_stack;
285 + unsigned long stack_alloc(void)
286 + {
287 + #ifdef CONFIG_VMAP_STACK
288 +- return (unsigned long)__vmalloc_node(THREAD_SIZE, THREAD_SIZE,
289 +- THREADINFO_GFP, NUMA_NO_NODE,
290 +- __builtin_return_address(0));
291 ++ void *ret;
292 ++
293 ++ ret = __vmalloc_node(THREAD_SIZE, THREAD_SIZE, THREADINFO_GFP,
294 ++ NUMA_NO_NODE, __builtin_return_address(0));
295 ++ kmemleak_not_leak(ret);
296 ++ return (unsigned long)ret;
297 + #else
298 + return __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
299 + #endif
300 +diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c
301 +index 4412d6febadef..6bf7bd4479aee 100644
302 +--- a/arch/um/drivers/virtio_uml.c
303 ++++ b/arch/um/drivers/virtio_uml.c
304 +@@ -1139,7 +1139,7 @@ static int virtio_uml_probe(struct platform_device *pdev)
305 + rc = os_connect_socket(pdata->socket_path);
306 + } while (rc == -EINTR);
307 + if (rc < 0)
308 +- return rc;
309 ++ goto error_free;
310 + vu_dev->sock = rc;
311 +
312 + spin_lock_init(&vu_dev->sock_lock);
313 +@@ -1160,6 +1160,8 @@ static int virtio_uml_probe(struct platform_device *pdev)
314 +
315 + error_init:
316 + os_close_file(vu_dev->sock);
317 ++error_free:
318 ++ kfree(vu_dev);
319 + return rc;
320 + }
321 +
322 +diff --git a/arch/um/kernel/skas/clone.c b/arch/um/kernel/skas/clone.c
323 +index 5afac0fef24ea..ff5061f291674 100644
324 +--- a/arch/um/kernel/skas/clone.c
325 ++++ b/arch/um/kernel/skas/clone.c
326 +@@ -24,8 +24,7 @@
327 + void __attribute__ ((__section__ (".__syscall_stub")))
328 + stub_clone_handler(void)
329 + {
330 +- int stack;
331 +- struct stub_data *data = (void *) ((unsigned long)&stack & ~(UM_KERN_PAGE_SIZE - 1));
332 ++ struct stub_data *data = get_stub_page();
333 + long err;
334 +
335 + err = stub_syscall2(__NR_clone, CLONE_PARENT | CLONE_FILES | SIGCHLD,
336 +diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
337 +index d66af2950e06e..b5e36bd0425b5 100644
338 +--- a/arch/x86/kernel/cpu/cacheinfo.c
339 ++++ b/arch/x86/kernel/cpu/cacheinfo.c
340 +@@ -985,7 +985,7 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
341 + this_leaf->priv = base->nb;
342 + }
343 +
344 +-static int __init_cache_level(unsigned int cpu)
345 ++int init_cache_level(unsigned int cpu)
346 + {
347 + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
348 +
349 +@@ -1014,7 +1014,7 @@ static void get_cache_id(int cpu, struct _cpuid4_info_regs *id4_regs)
350 + id4_regs->id = c->apicid >> index_msb;
351 + }
352 +
353 +-static int __populate_cache_leaves(unsigned int cpu)
354 ++int populate_cache_leaves(unsigned int cpu)
355 + {
356 + unsigned int idx, ret;
357 + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
358 +@@ -1033,6 +1033,3 @@ static int __populate_cache_leaves(unsigned int cpu)
359 +
360 + return 0;
361 + }
362 +-
363 +-DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level)
364 +-DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves)
365 +diff --git a/arch/x86/um/shared/sysdep/stub_32.h b/arch/x86/um/shared/sysdep/stub_32.h
366 +index b95db9daf0e82..4c6c2be0c8997 100644
367 +--- a/arch/x86/um/shared/sysdep/stub_32.h
368 ++++ b/arch/x86/um/shared/sysdep/stub_32.h
369 +@@ -101,4 +101,16 @@ static inline void remap_stack_and_trap(void)
370 + "memory");
371 + }
372 +
373 ++static __always_inline void *get_stub_page(void)
374 ++{
375 ++ unsigned long ret;
376 ++
377 ++ asm volatile (
378 ++ "movl %%esp,%0 ;"
379 ++ "andl %1,%0"
380 ++ : "=a" (ret)
381 ++ : "g" (~(UM_KERN_PAGE_SIZE - 1)));
382 ++
383 ++ return (void *)ret;
384 ++}
385 + #endif
386 +diff --git a/arch/x86/um/shared/sysdep/stub_64.h b/arch/x86/um/shared/sysdep/stub_64.h
387 +index 6e2626b77a2e4..e9c4b2b388039 100644
388 +--- a/arch/x86/um/shared/sysdep/stub_64.h
389 ++++ b/arch/x86/um/shared/sysdep/stub_64.h
390 +@@ -108,4 +108,16 @@ static inline void remap_stack_and_trap(void)
391 + __syscall_clobber, "r10", "r8", "r9");
392 + }
393 +
394 ++static __always_inline void *get_stub_page(void)
395 ++{
396 ++ unsigned long ret;
397 ++
398 ++ asm volatile (
399 ++ "movq %%rsp,%0 ;"
400 ++ "andq %1,%0"
401 ++ : "=a" (ret)
402 ++ : "g" (~(UM_KERN_PAGE_SIZE - 1)));
403 ++
404 ++ return (void *)ret;
405 ++}
406 + #endif
407 +diff --git a/arch/x86/um/stub_segv.c b/arch/x86/um/stub_segv.c
408 +index 21836eaf17259..f7eefba034f96 100644
409 +--- a/arch/x86/um/stub_segv.c
410 ++++ b/arch/x86/um/stub_segv.c
411 +@@ -11,9 +11,8 @@
412 + void __attribute__ ((__section__ (".__syscall_stub")))
413 + stub_segv_handler(int sig, siginfo_t *info, void *p)
414 + {
415 +- int stack;
416 ++ struct faultinfo *f = get_stub_page();
417 + ucontext_t *uc = p;
418 +- struct faultinfo *f = (void *)(((unsigned long)&stack) & ~(UM_KERN_PAGE_SIZE - 1));
419 +
420 + GET_FAULTINFO_FROM_MC(*f, &uc->uc_mcontext);
421 + trap_myself();
422 +diff --git a/block/blk-mq.c b/block/blk-mq.c
423 +index 9d4fdc2be88a5..9c64f0025a562 100644
424 +--- a/block/blk-mq.c
425 ++++ b/block/blk-mq.c
426 +@@ -2135,6 +2135,18 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
427 + }
428 + }
429 +
430 ++/*
431 ++ * Allow 4x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
432 ++ * queues. This is important for md arrays to benefit from merging
433 ++ * requests.
434 ++ */
435 ++static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
436 ++{
437 ++ if (plug->multiple_queues)
438 ++ return BLK_MAX_REQUEST_COUNT * 4;
439 ++ return BLK_MAX_REQUEST_COUNT;
440 ++}
441 ++
442 + /**
443 + * blk_mq_submit_bio - Create and send a request to block device.
444 + * @bio: Bio pointer.
445 +@@ -2231,7 +2243,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
446 + else
447 + last = list_entry_rq(plug->mq_list.prev);
448 +
449 +- if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
450 ++ if (request_count >= blk_plug_max_rq_count(plug) || (last &&
451 + blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
452 + blk_flush_plug_list(plug, false);
453 + trace_block_plug(q);
454 +diff --git a/block/blk-throttle.c b/block/blk-throttle.c
455 +index 55c49015e5333..7c4e7993ba970 100644
456 +--- a/block/blk-throttle.c
457 ++++ b/block/blk-throttle.c
458 +@@ -2458,6 +2458,7 @@ int blk_throtl_init(struct request_queue *q)
459 + void blk_throtl_exit(struct request_queue *q)
460 + {
461 + BUG_ON(!q->td);
462 ++ del_timer_sync(&q->td->service_queue.pending_timer);
463 + throtl_shutdown_wq(q);
464 + blkcg_deactivate_policy(q, &blkcg_policy_throtl);
465 + free_percpu(q->td->latency_buckets[READ]);
466 +diff --git a/block/genhd.c b/block/genhd.c
467 +index 298ee78c1bdac..9aba654044169 100644
468 +--- a/block/genhd.c
469 ++++ b/block/genhd.c
470 +@@ -164,6 +164,7 @@ static struct blk_major_name {
471 + void (*probe)(dev_t devt);
472 + } *major_names[BLKDEV_MAJOR_HASH_SIZE];
473 + static DEFINE_MUTEX(major_names_lock);
474 ++static DEFINE_SPINLOCK(major_names_spinlock);
475 +
476 + /* index in the above - for now: assume no multimajor ranges */
477 + static inline int major_to_index(unsigned major)
478 +@@ -176,11 +177,11 @@ void blkdev_show(struct seq_file *seqf, off_t offset)
479 + {
480 + struct blk_major_name *dp;
481 +
482 +- mutex_lock(&major_names_lock);
483 ++ spin_lock(&major_names_spinlock);
484 + for (dp = major_names[major_to_index(offset)]; dp; dp = dp->next)
485 + if (dp->major == offset)
486 + seq_printf(seqf, "%3d %s\n", dp->major, dp->name);
487 +- mutex_unlock(&major_names_lock);
488 ++ spin_unlock(&major_names_spinlock);
489 + }
490 + #endif /* CONFIG_PROC_FS */
491 +
492 +@@ -252,6 +253,7 @@ int __register_blkdev(unsigned int major, const char *name,
493 + p->next = NULL;
494 + index = major_to_index(major);
495 +
496 ++ spin_lock(&major_names_spinlock);
497 + for (n = &major_names[index]; *n; n = &(*n)->next) {
498 + if ((*n)->major == major)
499 + break;
500 +@@ -260,6 +262,7 @@ int __register_blkdev(unsigned int major, const char *name,
501 + *n = p;
502 + else
503 + ret = -EBUSY;
504 ++ spin_unlock(&major_names_spinlock);
505 +
506 + if (ret < 0) {
507 + printk("register_blkdev: cannot get major %u for %s\n",
508 +@@ -279,6 +282,7 @@ void unregister_blkdev(unsigned int major, const char *name)
509 + int index = major_to_index(major);
510 +
511 + mutex_lock(&major_names_lock);
512 ++ spin_lock(&major_names_spinlock);
513 + for (n = &major_names[index]; *n; n = &(*n)->next)
514 + if ((*n)->major == major)
515 + break;
516 +@@ -288,6 +292,7 @@ void unregister_blkdev(unsigned int major, const char *name)
517 + p = *n;
518 + *n = p->next;
519 + }
520 ++ spin_unlock(&major_names_spinlock);
521 + mutex_unlock(&major_names_lock);
522 + kfree(p);
523 + }
524 +diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
525 +index 3a308461246a8..bd92b549fd5a4 100644
526 +--- a/drivers/acpi/x86/s2idle.c
527 ++++ b/drivers/acpi/x86/s2idle.c
528 +@@ -449,25 +449,30 @@ int acpi_s2idle_prepare_late(void)
529 + if (pm_debug_messages_on)
530 + lpi_check_constraints();
531 +
532 +- if (lps0_dsm_func_mask_microsoft > 0) {
533 ++ /* Screen off */
534 ++ if (lps0_dsm_func_mask > 0)
535 ++ acpi_sleep_run_lps0_dsm(acpi_s2idle_vendor_amd() ?
536 ++ ACPI_LPS0_SCREEN_OFF_AMD :
537 ++ ACPI_LPS0_SCREEN_OFF,
538 ++ lps0_dsm_func_mask, lps0_dsm_guid);
539 ++
540 ++ if (lps0_dsm_func_mask_microsoft > 0)
541 + acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF,
542 + lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
543 +- acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
544 +- lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
545 ++
546 ++ /* LPS0 entry */
547 ++ if (lps0_dsm_func_mask > 0)
548 ++ acpi_sleep_run_lps0_dsm(acpi_s2idle_vendor_amd() ?
549 ++ ACPI_LPS0_ENTRY_AMD :
550 ++ ACPI_LPS0_ENTRY,
551 ++ lps0_dsm_func_mask, lps0_dsm_guid);
552 ++ if (lps0_dsm_func_mask_microsoft > 0) {
553 + acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
554 + lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
555 +- } else if (acpi_s2idle_vendor_amd()) {
556 +- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF_AMD,
557 +- lps0_dsm_func_mask, lps0_dsm_guid);
558 +- acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY_AMD,
559 +- lps0_dsm_func_mask, lps0_dsm_guid);
560 +- } else {
561 +- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF,
562 +- lps0_dsm_func_mask, lps0_dsm_guid);
563 +- acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
564 +- lps0_dsm_func_mask, lps0_dsm_guid);
565 ++ /* modern standby entry */
566 ++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
567 ++ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
568 + }
569 +-
570 + return 0;
571 + }
572 +
573 +@@ -476,24 +481,30 @@ void acpi_s2idle_restore_early(void)
574 + if (!lps0_device_handle || sleep_no_lps0)
575 + return;
576 +
577 +- if (lps0_dsm_func_mask_microsoft > 0) {
578 +- acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
579 +- lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
580 ++ /* Modern standby exit */
581 ++ if (lps0_dsm_func_mask_microsoft > 0)
582 + acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
583 + lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
584 +- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
585 +- lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
586 +- } else if (acpi_s2idle_vendor_amd()) {
587 +- acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT_AMD,
588 +- lps0_dsm_func_mask, lps0_dsm_guid);
589 +- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON_AMD,
590 +- lps0_dsm_func_mask, lps0_dsm_guid);
591 +- } else {
592 ++
593 ++ /* LPS0 exit */
594 ++ if (lps0_dsm_func_mask > 0)
595 ++ acpi_sleep_run_lps0_dsm(acpi_s2idle_vendor_amd() ?
596 ++ ACPI_LPS0_EXIT_AMD :
597 ++ ACPI_LPS0_EXIT,
598 ++ lps0_dsm_func_mask, lps0_dsm_guid);
599 ++ if (lps0_dsm_func_mask_microsoft > 0)
600 + acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
601 +- lps0_dsm_func_mask, lps0_dsm_guid);
602 ++ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
603 ++
604 ++ /* Screen on */
605 ++ if (lps0_dsm_func_mask_microsoft > 0)
606 + acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
607 +- lps0_dsm_func_mask, lps0_dsm_guid);
608 +- }
609 ++ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
610 ++ if (lps0_dsm_func_mask > 0)
611 ++ acpi_sleep_run_lps0_dsm(acpi_s2idle_vendor_amd() ?
612 ++ ACPI_LPS0_SCREEN_ON_AMD :
613 ++ ACPI_LPS0_SCREEN_ON,
614 ++ lps0_dsm_func_mask, lps0_dsm_guid);
615 + }
616 +
617 + static const struct platform_s2idle_ops acpi_s2idle_ops_lps0 = {
618 +diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
619 +index d568772152c2d..cbea78e79f3df 100644
620 +--- a/drivers/base/power/main.c
621 ++++ b/drivers/base/power/main.c
622 +@@ -1642,7 +1642,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
623 + }
624 +
625 + dev->power.may_skip_resume = true;
626 +- dev->power.must_resume = false;
627 ++ dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
628 +
629 + dpm_watchdog_set(&wd, dev);
630 + device_lock(dev);
631 +diff --git a/drivers/block/n64cart.c b/drivers/block/n64cart.c
632 +index c84be0028f635..26798da661bd4 100644
633 +--- a/drivers/block/n64cart.c
634 ++++ b/drivers/block/n64cart.c
635 +@@ -129,8 +129,8 @@ static int __init n64cart_probe(struct platform_device *pdev)
636 + }
637 +
638 + reg_base = devm_platform_ioremap_resource(pdev, 0);
639 +- if (!reg_base)
640 +- return -EINVAL;
641 ++ if (IS_ERR(reg_base))
642 ++ return PTR_ERR(reg_base);
643 +
644 + disk = blk_alloc_disk(NUMA_NO_NODE);
645 + if (!disk)
646 +diff --git a/drivers/cxl/Makefile b/drivers/cxl/Makefile
647 +index 32954059b37ba..d1aaabc940f3c 100644
648 +--- a/drivers/cxl/Makefile
649 ++++ b/drivers/cxl/Makefile
650 +@@ -1,11 +1,9 @@
651 + # SPDX-License-Identifier: GPL-2.0
652 +-obj-$(CONFIG_CXL_BUS) += cxl_core.o
653 ++obj-$(CONFIG_CXL_BUS) += core/
654 + obj-$(CONFIG_CXL_MEM) += cxl_pci.o
655 + obj-$(CONFIG_CXL_ACPI) += cxl_acpi.o
656 + obj-$(CONFIG_CXL_PMEM) += cxl_pmem.o
657 +
658 +-ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CXL
659 +-cxl_core-y := core.o
660 + cxl_pci-y := pci.o
661 + cxl_acpi-y := acpi.o
662 + cxl_pmem-y := pmem.o
663 +diff --git a/drivers/cxl/core.c b/drivers/cxl/core.c
664 +deleted file mode 100644
665 +index a2e4d54fc7bc4..0000000000000
666 +--- a/drivers/cxl/core.c
667 ++++ /dev/null
668 +@@ -1,1067 +0,0 @@
669 +-// SPDX-License-Identifier: GPL-2.0-only
670 +-/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
671 +-#include <linux/io-64-nonatomic-lo-hi.h>
672 +-#include <linux/device.h>
673 +-#include <linux/module.h>
674 +-#include <linux/pci.h>
675 +-#include <linux/slab.h>
676 +-#include <linux/idr.h>
677 +-#include "cxl.h"
678 +-#include "mem.h"
679 +-
680 +-/**
681 +- * DOC: cxl core
682 +- *
683 +- * The CXL core provides a sysfs hierarchy for control devices and a rendezvous
684 +- * point for cross-device interleave coordination through cxl ports.
685 +- */
686 +-
687 +-static DEFINE_IDA(cxl_port_ida);
688 +-
689 +-static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
690 +- char *buf)
691 +-{
692 +- return sysfs_emit(buf, "%s\n", dev->type->name);
693 +-}
694 +-static DEVICE_ATTR_RO(devtype);
695 +-
696 +-static struct attribute *cxl_base_attributes[] = {
697 +- &dev_attr_devtype.attr,
698 +- NULL,
699 +-};
700 +-
701 +-static struct attribute_group cxl_base_attribute_group = {
702 +- .attrs = cxl_base_attributes,
703 +-};
704 +-
705 +-static ssize_t start_show(struct device *dev, struct device_attribute *attr,
706 +- char *buf)
707 +-{
708 +- struct cxl_decoder *cxld = to_cxl_decoder(dev);
709 +-
710 +- return sysfs_emit(buf, "%#llx\n", cxld->range.start);
711 +-}
712 +-static DEVICE_ATTR_RO(start);
713 +-
714 +-static ssize_t size_show(struct device *dev, struct device_attribute *attr,
715 +- char *buf)
716 +-{
717 +- struct cxl_decoder *cxld = to_cxl_decoder(dev);
718 +-
719 +- return sysfs_emit(buf, "%#llx\n", range_len(&cxld->range));
720 +-}
721 +-static DEVICE_ATTR_RO(size);
722 +-
723 +-#define CXL_DECODER_FLAG_ATTR(name, flag) \
724 +-static ssize_t name##_show(struct device *dev, \
725 +- struct device_attribute *attr, char *buf) \
726 +-{ \
727 +- struct cxl_decoder *cxld = to_cxl_decoder(dev); \
728 +- \
729 +- return sysfs_emit(buf, "%s\n", \
730 +- (cxld->flags & (flag)) ? "1" : "0"); \
731 +-} \
732 +-static DEVICE_ATTR_RO(name)
733 +-
734 +-CXL_DECODER_FLAG_ATTR(cap_pmem, CXL_DECODER_F_PMEM);
735 +-CXL_DECODER_FLAG_ATTR(cap_ram, CXL_DECODER_F_RAM);
736 +-CXL_DECODER_FLAG_ATTR(cap_type2, CXL_DECODER_F_TYPE2);
737 +-CXL_DECODER_FLAG_ATTR(cap_type3, CXL_DECODER_F_TYPE3);
738 +-CXL_DECODER_FLAG_ATTR(locked, CXL_DECODER_F_LOCK);
739 +-
740 +-static ssize_t target_type_show(struct device *dev,
741 +- struct device_attribute *attr, char *buf)
742 +-{
743 +- struct cxl_decoder *cxld = to_cxl_decoder(dev);
744 +-
745 +- switch (cxld->target_type) {
746 +- case CXL_DECODER_ACCELERATOR:
747 +- return sysfs_emit(buf, "accelerator\n");
748 +- case CXL_DECODER_EXPANDER:
749 +- return sysfs_emit(buf, "expander\n");
750 +- }
751 +- return -ENXIO;
752 +-}
753 +-static DEVICE_ATTR_RO(target_type);
754 +-
755 +-static ssize_t target_list_show(struct device *dev,
756 +- struct device_attribute *attr, char *buf)
757 +-{
758 +- struct cxl_decoder *cxld = to_cxl_decoder(dev);
759 +- ssize_t offset = 0;
760 +- int i, rc = 0;
761 +-
762 +- device_lock(dev);
763 +- for (i = 0; i < cxld->interleave_ways; i++) {
764 +- struct cxl_dport *dport = cxld->target[i];
765 +- struct cxl_dport *next = NULL;
766 +-
767 +- if (!dport)
768 +- break;
769 +-
770 +- if (i + 1 < cxld->interleave_ways)
771 +- next = cxld->target[i + 1];
772 +- rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id,
773 +- next ? "," : "");
774 +- if (rc < 0)
775 +- break;
776 +- offset += rc;
777 +- }
778 +- device_unlock(dev);
779 +-
780 +- if (rc < 0)
781 +- return rc;
782 +-
783 +- rc = sysfs_emit_at(buf, offset, "\n");
784 +- if (rc < 0)
785 +- return rc;
786 +-
787 +- return offset + rc;
788 +-}
789 +-static DEVICE_ATTR_RO(target_list);
790 +-
791 +-static struct attribute *cxl_decoder_base_attrs[] = {
792 +- &dev_attr_start.attr,
793 +- &dev_attr_size.attr,
794 +- &dev_attr_locked.attr,
795 +- &dev_attr_target_list.attr,
796 +- NULL,
797 +-};
798 +-
799 +-static struct attribute_group cxl_decoder_base_attribute_group = {
800 +- .attrs = cxl_decoder_base_attrs,
801 +-};
802 +-
803 +-static struct attribute *cxl_decoder_root_attrs[] = {
804 +- &dev_attr_cap_pmem.attr,
805 +- &dev_attr_cap_ram.attr,
806 +- &dev_attr_cap_type2.attr,
807 +- &dev_attr_cap_type3.attr,
808 +- NULL,
809 +-};
810 +-
811 +-static struct attribute_group cxl_decoder_root_attribute_group = {
812 +- .attrs = cxl_decoder_root_attrs,
813 +-};
814 +-
815 +-static const struct attribute_group *cxl_decoder_root_attribute_groups[] = {
816 +- &cxl_decoder_root_attribute_group,
817 +- &cxl_decoder_base_attribute_group,
818 +- &cxl_base_attribute_group,
819 +- NULL,
820 +-};
821 +-
822 +-static struct attribute *cxl_decoder_switch_attrs[] = {
823 +- &dev_attr_target_type.attr,
824 +- NULL,
825 +-};
826 +-
827 +-static struct attribute_group cxl_decoder_switch_attribute_group = {
828 +- .attrs = cxl_decoder_switch_attrs,
829 +-};
830 +-
831 +-static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = {
832 +- &cxl_decoder_switch_attribute_group,
833 +- &cxl_decoder_base_attribute_group,
834 +- &cxl_base_attribute_group,
835 +- NULL,
836 +-};
837 +-
838 +-static void cxl_decoder_release(struct device *dev)
839 +-{
840 +- struct cxl_decoder *cxld = to_cxl_decoder(dev);
841 +- struct cxl_port *port = to_cxl_port(dev->parent);
842 +-
843 +- ida_free(&port->decoder_ida, cxld->id);
844 +- kfree(cxld);
845 +-}
846 +-
847 +-static const struct device_type cxl_decoder_switch_type = {
848 +- .name = "cxl_decoder_switch",
849 +- .release = cxl_decoder_release,
850 +- .groups = cxl_decoder_switch_attribute_groups,
851 +-};
852 +-
853 +-static const struct device_type cxl_decoder_root_type = {
854 +- .name = "cxl_decoder_root",
855 +- .release = cxl_decoder_release,
856 +- .groups = cxl_decoder_root_attribute_groups,
857 +-};
858 +-
859 +-bool is_root_decoder(struct device *dev)
860 +-{
861 +- return dev->type == &cxl_decoder_root_type;
862 +-}
863 +-EXPORT_SYMBOL_GPL(is_root_decoder);
864 +-
865 +-struct cxl_decoder *to_cxl_decoder(struct device *dev)
866 +-{
867 +- if (dev_WARN_ONCE(dev, dev->type->release != cxl_decoder_release,
868 +- "not a cxl_decoder device\n"))
869 +- return NULL;
870 +- return container_of(dev, struct cxl_decoder, dev);
871 +-}
872 +-EXPORT_SYMBOL_GPL(to_cxl_decoder);
873 +-
874 +-static void cxl_dport_release(struct cxl_dport *dport)
875 +-{
876 +- list_del(&dport->list);
877 +- put_device(dport->dport);
878 +- kfree(dport);
879 +-}
880 +-
881 +-static void cxl_port_release(struct device *dev)
882 +-{
883 +- struct cxl_port *port = to_cxl_port(dev);
884 +- struct cxl_dport *dport, *_d;
885 +-
886 +- device_lock(dev);
887 +- list_for_each_entry_safe(dport, _d, &port->dports, list)
888 +- cxl_dport_release(dport);
889 +- device_unlock(dev);
890 +- ida_free(&cxl_port_ida, port->id);
891 +- kfree(port);
892 +-}
893 +-
894 +-static const struct attribute_group *cxl_port_attribute_groups[] = {
895 +- &cxl_base_attribute_group,
896 +- NULL,
897 +-};
898 +-
899 +-static const struct device_type cxl_port_type = {
900 +- .name = "cxl_port",
901 +- .release = cxl_port_release,
902 +- .groups = cxl_port_attribute_groups,
903 +-};
904 +-
905 +-struct cxl_port *to_cxl_port(struct device *dev)
906 +-{
907 +- if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type,
908 +- "not a cxl_port device\n"))
909 +- return NULL;
910 +- return container_of(dev, struct cxl_port, dev);
911 +-}
912 +-
913 +-static void unregister_port(void *_port)
914 +-{
915 +- struct cxl_port *port = _port;
916 +- struct cxl_dport *dport;
917 +-
918 +- device_lock(&port->dev);
919 +- list_for_each_entry(dport, &port->dports, list) {
920 +- char link_name[CXL_TARGET_STRLEN];
921 +-
922 +- if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d",
923 +- dport->port_id) >= CXL_TARGET_STRLEN)
924 +- continue;
925 +- sysfs_remove_link(&port->dev.kobj, link_name);
926 +- }
927 +- device_unlock(&port->dev);
928 +- device_unregister(&port->dev);
929 +-}
930 +-
931 +-static void cxl_unlink_uport(void *_port)
932 +-{
933 +- struct cxl_port *port = _port;
934 +-
935 +- sysfs_remove_link(&port->dev.kobj, "uport");
936 +-}
937 +-
938 +-static int devm_cxl_link_uport(struct device *host, struct cxl_port *port)
939 +-{
940 +- int rc;
941 +-
942 +- rc = sysfs_create_link(&port->dev.kobj, &port->uport->kobj, "uport");
943 +- if (rc)
944 +- return rc;
945 +- return devm_add_action_or_reset(host, cxl_unlink_uport, port);
946 +-}
947 +-
948 +-static struct cxl_port *cxl_port_alloc(struct device *uport,
949 +- resource_size_t component_reg_phys,
950 +- struct cxl_port *parent_port)
951 +-{
952 +- struct cxl_port *port;
953 +- struct device *dev;
954 +- int rc;
955 +-
956 +- port = kzalloc(sizeof(*port), GFP_KERNEL);
957 +- if (!port)
958 +- return ERR_PTR(-ENOMEM);
959 +-
960 +- rc = ida_alloc(&cxl_port_ida, GFP_KERNEL);
961 +- if (rc < 0)
962 +- goto err;
963 +- port->id = rc;
964 +-
965 +- /*
966 +- * The top-level cxl_port "cxl_root" does not have a cxl_port as
967 +- * its parent and it does not have any corresponding component
968 +- * registers as its decode is described by a fixed platform
969 +- * description.
970 +- */
971 +- dev = &port->dev;
972 +- if (parent_port)
973 +- dev->parent = &parent_port->dev;
974 +- else
975 +- dev->parent = uport;
976 +-
977 +- port->uport = uport;
978 +- port->component_reg_phys = component_reg_phys;
979 +- ida_init(&port->decoder_ida);
980 +- INIT_LIST_HEAD(&port->dports);
981 +-
982 +- device_initialize(dev);
983 +- device_set_pm_not_required(dev);
984 +- dev->bus = &cxl_bus_type;
985 +- dev->type = &cxl_port_type;
986 +-
987 +- return port;
988 +-
989 +-err:
990 +- kfree(port);
991 +- return ERR_PTR(rc);
992 +-}
993 +-
994 +-/**
995 +- * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy
996 +- * @host: host device for devm operations
997 +- * @uport: "physical" device implementing this upstream port
998 +- * @component_reg_phys: (optional) for configurable cxl_port instances
999 +- * @parent_port: next hop up in the CXL memory decode hierarchy
1000 +- */
1001 +-struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
1002 +- resource_size_t component_reg_phys,
1003 +- struct cxl_port *parent_port)
1004 +-{
1005 +- struct cxl_port *port;
1006 +- struct device *dev;
1007 +- int rc;
1008 +-
1009 +- port = cxl_port_alloc(uport, component_reg_phys, parent_port);
1010 +- if (IS_ERR(port))
1011 +- return port;
1012 +-
1013 +- dev = &port->dev;
1014 +- if (parent_port)
1015 +- rc = dev_set_name(dev, "port%d", port->id);
1016 +- else
1017 +- rc = dev_set_name(dev, "root%d", port->id);
1018 +- if (rc)
1019 +- goto err;
1020 +-
1021 +- rc = device_add(dev);
1022 +- if (rc)
1023 +- goto err;
1024 +-
1025 +- rc = devm_add_action_or_reset(host, unregister_port, port);
1026 +- if (rc)
1027 +- return ERR_PTR(rc);
1028 +-
1029 +- rc = devm_cxl_link_uport(host, port);
1030 +- if (rc)
1031 +- return ERR_PTR(rc);
1032 +-
1033 +- return port;
1034 +-
1035 +-err:
1036 +- put_device(dev);
1037 +- return ERR_PTR(rc);
1038 +-}
1039 +-EXPORT_SYMBOL_GPL(devm_cxl_add_port);
1040 +-
1041 +-static struct cxl_dport *find_dport(struct cxl_port *port, int id)
1042 +-{
1043 +- struct cxl_dport *dport;
1044 +-
1045 +- device_lock_assert(&port->dev);
1046 +- list_for_each_entry (dport, &port->dports, list)
1047 +- if (dport->port_id == id)
1048 +- return dport;
1049 +- return NULL;
1050 +-}
1051 +-
1052 +-static int add_dport(struct cxl_port *port, struct cxl_dport *new)
1053 +-{
1054 +- struct cxl_dport *dup;
1055 +-
1056 +- device_lock(&port->dev);
1057 +- dup = find_dport(port, new->port_id);
1058 +- if (dup)
1059 +- dev_err(&port->dev,
1060 +- "unable to add dport%d-%s non-unique port id (%s)\n",
1061 +- new->port_id, dev_name(new->dport),
1062 +- dev_name(dup->dport));
1063 +- else
1064 +- list_add_tail(&new->list, &port->dports);
1065 +- device_unlock(&port->dev);
1066 +-
1067 +- return dup ? -EEXIST : 0;
1068 +-}
1069 +-
1070 +-/**
1071 +- * cxl_add_dport - append downstream port data to a cxl_port
1072 +- * @port: the cxl_port that references this dport
1073 +- * @dport_dev: firmware or PCI device representing the dport
1074 +- * @port_id: identifier for this dport in a decoder's target list
1075 +- * @component_reg_phys: optional location of CXL component registers
1076 +- *
1077 +- * Note that all allocations and links are undone by cxl_port deletion
1078 +- * and release.
1079 +- */
1080 +-int cxl_add_dport(struct cxl_port *port, struct device *dport_dev, int port_id,
1081 +- resource_size_t component_reg_phys)
1082 +-{
1083 +- char link_name[CXL_TARGET_STRLEN];
1084 +- struct cxl_dport *dport;
1085 +- int rc;
1086 +-
1087 +- if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d", port_id) >=
1088 +- CXL_TARGET_STRLEN)
1089 +- return -EINVAL;
1090 +-
1091 +- dport = kzalloc(sizeof(*dport), GFP_KERNEL);
1092 +- if (!dport)
1093 +- return -ENOMEM;
1094 +-
1095 +- INIT_LIST_HEAD(&dport->list);
1096 +- dport->dport = get_device(dport_dev);
1097 +- dport->port_id = port_id;
1098 +- dport->component_reg_phys = component_reg_phys;
1099 +- dport->port = port;
1100 +-
1101 +- rc = add_dport(port, dport);
1102 +- if (rc)
1103 +- goto err;
1104 +-
1105 +- rc = sysfs_create_link(&port->dev.kobj, &dport_dev->kobj, link_name);
1106 +- if (rc)
1107 +- goto err;
1108 +-
1109 +- return 0;
1110 +-err:
1111 +- cxl_dport_release(dport);
1112 +- return rc;
1113 +-}
1114 +-EXPORT_SYMBOL_GPL(cxl_add_dport);
1115 +-
1116 +-static struct cxl_decoder *
1117 +-cxl_decoder_alloc(struct cxl_port *port, int nr_targets, resource_size_t base,
1118 +- resource_size_t len, int interleave_ways,
1119 +- int interleave_granularity, enum cxl_decoder_type type,
1120 +- unsigned long flags)
1121 +-{
1122 +- struct cxl_decoder *cxld;
1123 +- struct device *dev;
1124 +- int rc = 0;
1125 +-
1126 +- if (interleave_ways < 1)
1127 +- return ERR_PTR(-EINVAL);
1128 +-
1129 +- device_lock(&port->dev);
1130 +- if (list_empty(&port->dports))
1131 +- rc = -EINVAL;
1132 +- device_unlock(&port->dev);
1133 +- if (rc)
1134 +- return ERR_PTR(rc);
1135 +-
1136 +- cxld = kzalloc(struct_size(cxld, target, nr_targets), GFP_KERNEL);
1137 +- if (!cxld)
1138 +- return ERR_PTR(-ENOMEM);
1139 +-
1140 +- rc = ida_alloc(&port->decoder_ida, GFP_KERNEL);
1141 +- if (rc < 0)
1142 +- goto err;
1143 +-
1144 +- *cxld = (struct cxl_decoder) {
1145 +- .id = rc,
1146 +- .range = {
1147 +- .start = base,
1148 +- .end = base + len - 1,
1149 +- },
1150 +- .flags = flags,
1151 +- .interleave_ways = interleave_ways,
1152 +- .interleave_granularity = interleave_granularity,
1153 +- .target_type = type,
1154 +- };
1155 +-
1156 +- /* handle implied target_list */
1157 +- if (interleave_ways == 1)
1158 +- cxld->target[0] =
1159 +- list_first_entry(&port->dports, struct cxl_dport, list);
1160 +- dev = &cxld->dev;
1161 +- device_initialize(dev);
1162 +- device_set_pm_not_required(dev);
1163 +- dev->parent = &port->dev;
1164 +- dev->bus = &cxl_bus_type;
1165 +-
1166 +- /* root ports do not have a cxl_port_type parent */
1167 +- if (port->dev.parent->type == &cxl_port_type)
1168 +- dev->type = &cxl_decoder_switch_type;
1169 +- else
1170 +- dev->type = &cxl_decoder_root_type;
1171 +-
1172 +- return cxld;
1173 +-err:
1174 +- kfree(cxld);
1175 +- return ERR_PTR(rc);
1176 +-}
1177 +-
1178 +-static void unregister_dev(void *dev)
1179 +-{
1180 +- device_unregister(dev);
1181 +-}
1182 +-
1183 +-struct cxl_decoder *
1184 +-devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets,
1185 +- resource_size_t base, resource_size_t len,
1186 +- int interleave_ways, int interleave_granularity,
1187 +- enum cxl_decoder_type type, unsigned long flags)
1188 +-{
1189 +- struct cxl_decoder *cxld;
1190 +- struct device *dev;
1191 +- int rc;
1192 +-
1193 +- cxld = cxl_decoder_alloc(port, nr_targets, base, len, interleave_ways,
1194 +- interleave_granularity, type, flags);
1195 +- if (IS_ERR(cxld))
1196 +- return cxld;
1197 +-
1198 +- dev = &cxld->dev;
1199 +- rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id);
1200 +- if (rc)
1201 +- goto err;
1202 +-
1203 +- rc = device_add(dev);
1204 +- if (rc)
1205 +- goto err;
1206 +-
1207 +- rc = devm_add_action_or_reset(host, unregister_dev, dev);
1208 +- if (rc)
1209 +- return ERR_PTR(rc);
1210 +- return cxld;
1211 +-
1212 +-err:
1213 +- put_device(dev);
1214 +- return ERR_PTR(rc);
1215 +-}
1216 +-EXPORT_SYMBOL_GPL(devm_cxl_add_decoder);
1217 +-
1218 +-/**
1219 +- * cxl_probe_component_regs() - Detect CXL Component register blocks
1220 +- * @dev: Host device of the @base mapping
1221 +- * @base: Mapping containing the HDM Decoder Capability Header
1222 +- * @map: Map object describing the register block information found
1223 +- *
1224 +- * See CXL 2.0 8.2.4 Component Register Layout and Definition
1225 +- * See CXL 2.0 8.2.5.5 CXL Device Register Interface
1226 +- *
1227 +- * Probe for component register information and return it in map object.
1228 +- */
1229 +-void cxl_probe_component_regs(struct device *dev, void __iomem *base,
1230 +- struct cxl_component_reg_map *map)
1231 +-{
1232 +- int cap, cap_count;
1233 +- u64 cap_array;
1234 +-
1235 +- *map = (struct cxl_component_reg_map) { 0 };
1236 +-
1237 +- /*
1238 +- * CXL.cache and CXL.mem registers are at offset 0x1000 as defined in
1239 +- * CXL 2.0 8.2.4 Table 141.
1240 +- */
1241 +- base += CXL_CM_OFFSET;
1242 +-
1243 +- cap_array = readq(base + CXL_CM_CAP_HDR_OFFSET);
1244 +-
1245 +- if (FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, cap_array) != CM_CAP_HDR_CAP_ID) {
1246 +- dev_err(dev,
1247 +- "Couldn't locate the CXL.cache and CXL.mem capability array header./n");
1248 +- return;
1249 +- }
1250 +-
1251 +- /* It's assumed that future versions will be backward compatible */
1252 +- cap_count = FIELD_GET(CXL_CM_CAP_HDR_ARRAY_SIZE_MASK, cap_array);
1253 +-
1254 +- for (cap = 1; cap <= cap_count; cap++) {
1255 +- void __iomem *register_block;
1256 +- u32 hdr;
1257 +- int decoder_cnt;
1258 +- u16 cap_id, offset;
1259 +- u32 length;
1260 +-
1261 +- hdr = readl(base + cap * 0x4);
1262 +-
1263 +- cap_id = FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, hdr);
1264 +- offset = FIELD_GET(CXL_CM_CAP_PTR_MASK, hdr);
1265 +- register_block = base + offset;
1266 +-
1267 +- switch (cap_id) {
1268 +- case CXL_CM_CAP_CAP_ID_HDM:
1269 +- dev_dbg(dev, "found HDM decoder capability (0x%x)\n",
1270 +- offset);
1271 +-
1272 +- hdr = readl(register_block);
1273 +-
1274 +- decoder_cnt = cxl_hdm_decoder_count(hdr);
1275 +- length = 0x20 * decoder_cnt + 0x10;
1276 +-
1277 +- map->hdm_decoder.valid = true;
1278 +- map->hdm_decoder.offset = CXL_CM_OFFSET + offset;
1279 +- map->hdm_decoder.size = length;
1280 +- break;
1281 +- default:
1282 +- dev_dbg(dev, "Unknown CM cap ID: %d (0x%x)\n", cap_id,
1283 +- offset);
1284 +- break;
1285 +- }
1286 +- }
1287 +-}
1288 +-EXPORT_SYMBOL_GPL(cxl_probe_component_regs);
1289 +-
1290 +-static void cxl_nvdimm_bridge_release(struct device *dev)
1291 +-{
1292 +- struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
1293 +-
1294 +- kfree(cxl_nvb);
1295 +-}
1296 +-
1297 +-static const struct attribute_group *cxl_nvdimm_bridge_attribute_groups[] = {
1298 +- &cxl_base_attribute_group,
1299 +- NULL,
1300 +-};
1301 +-
1302 +-static const struct device_type cxl_nvdimm_bridge_type = {
1303 +- .name = "cxl_nvdimm_bridge",
1304 +- .release = cxl_nvdimm_bridge_release,
1305 +- .groups = cxl_nvdimm_bridge_attribute_groups,
1306 +-};
1307 +-
1308 +-struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev)
1309 +-{
1310 +- if (dev_WARN_ONCE(dev, dev->type != &cxl_nvdimm_bridge_type,
1311 +- "not a cxl_nvdimm_bridge device\n"))
1312 +- return NULL;
1313 +- return container_of(dev, struct cxl_nvdimm_bridge, dev);
1314 +-}
1315 +-EXPORT_SYMBOL_GPL(to_cxl_nvdimm_bridge);
1316 +-
1317 +-static struct cxl_nvdimm_bridge *
1318 +-cxl_nvdimm_bridge_alloc(struct cxl_port *port)
1319 +-{
1320 +- struct cxl_nvdimm_bridge *cxl_nvb;
1321 +- struct device *dev;
1322 +-
1323 +- cxl_nvb = kzalloc(sizeof(*cxl_nvb), GFP_KERNEL);
1324 +- if (!cxl_nvb)
1325 +- return ERR_PTR(-ENOMEM);
1326 +-
1327 +- dev = &cxl_nvb->dev;
1328 +- cxl_nvb->port = port;
1329 +- cxl_nvb->state = CXL_NVB_NEW;
1330 +- device_initialize(dev);
1331 +- device_set_pm_not_required(dev);
1332 +- dev->parent = &port->dev;
1333 +- dev->bus = &cxl_bus_type;
1334 +- dev->type = &cxl_nvdimm_bridge_type;
1335 +-
1336 +- return cxl_nvb;
1337 +-}
1338 +-
1339 +-static void unregister_nvb(void *_cxl_nvb)
1340 +-{
1341 +- struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb;
1342 +- bool flush;
1343 +-
1344 +- /*
1345 +- * If the bridge was ever activated then there might be in-flight state
1346 +- * work to flush. Once the state has been changed to 'dead' then no new
1347 +- * work can be queued by user-triggered bind.
1348 +- */
1349 +- device_lock(&cxl_nvb->dev);
1350 +- flush = cxl_nvb->state != CXL_NVB_NEW;
1351 +- cxl_nvb->state = CXL_NVB_DEAD;
1352 +- device_unlock(&cxl_nvb->dev);
1353 +-
1354 +- /*
1355 +- * Even though the device core will trigger device_release_driver()
1356 +- * before the unregister, it does not know about the fact that
1357 +- * cxl_nvdimm_bridge_driver defers ->remove() work. So, do the driver
1358 +- * release not and flush it before tearing down the nvdimm device
1359 +- * hierarchy.
1360 +- */
1361 +- device_release_driver(&cxl_nvb->dev);
1362 +- if (flush)
1363 +- flush_work(&cxl_nvb->state_work);
1364 +- device_unregister(&cxl_nvb->dev);
1365 +-}
1366 +-
1367 +-struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
1368 +- struct cxl_port *port)
1369 +-{
1370 +- struct cxl_nvdimm_bridge *cxl_nvb;
1371 +- struct device *dev;
1372 +- int rc;
1373 +-
1374 +- if (!IS_ENABLED(CONFIG_CXL_PMEM))
1375 +- return ERR_PTR(-ENXIO);
1376 +-
1377 +- cxl_nvb = cxl_nvdimm_bridge_alloc(port);
1378 +- if (IS_ERR(cxl_nvb))
1379 +- return cxl_nvb;
1380 +-
1381 +- dev = &cxl_nvb->dev;
1382 +- rc = dev_set_name(dev, "nvdimm-bridge");
1383 +- if (rc)
1384 +- goto err;
1385 +-
1386 +- rc = device_add(dev);
1387 +- if (rc)
1388 +- goto err;
1389 +-
1390 +- rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb);
1391 +- if (rc)
1392 +- return ERR_PTR(rc);
1393 +-
1394 +- return cxl_nvb;
1395 +-
1396 +-err:
1397 +- put_device(dev);
1398 +- return ERR_PTR(rc);
1399 +-}
1400 +-EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm_bridge);
1401 +-
1402 +-static void cxl_nvdimm_release(struct device *dev)
1403 +-{
1404 +- struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
1405 +-
1406 +- kfree(cxl_nvd);
1407 +-}
1408 +-
1409 +-static const struct attribute_group *cxl_nvdimm_attribute_groups[] = {
1410 +- &cxl_base_attribute_group,
1411 +- NULL,
1412 +-};
1413 +-
1414 +-static const struct device_type cxl_nvdimm_type = {
1415 +- .name = "cxl_nvdimm",
1416 +- .release = cxl_nvdimm_release,
1417 +- .groups = cxl_nvdimm_attribute_groups,
1418 +-};
1419 +-
1420 +-bool is_cxl_nvdimm(struct device *dev)
1421 +-{
1422 +- return dev->type == &cxl_nvdimm_type;
1423 +-}
1424 +-EXPORT_SYMBOL_GPL(is_cxl_nvdimm);
1425 +-
1426 +-struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
1427 +-{
1428 +- if (dev_WARN_ONCE(dev, !is_cxl_nvdimm(dev),
1429 +- "not a cxl_nvdimm device\n"))
1430 +- return NULL;
1431 +- return container_of(dev, struct cxl_nvdimm, dev);
1432 +-}
1433 +-EXPORT_SYMBOL_GPL(to_cxl_nvdimm);
1434 +-
1435 +-static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd)
1436 +-{
1437 +- struct cxl_nvdimm *cxl_nvd;
1438 +- struct device *dev;
1439 +-
1440 +- cxl_nvd = kzalloc(sizeof(*cxl_nvd), GFP_KERNEL);
1441 +- if (!cxl_nvd)
1442 +- return ERR_PTR(-ENOMEM);
1443 +-
1444 +- dev = &cxl_nvd->dev;
1445 +- cxl_nvd->cxlmd = cxlmd;
1446 +- device_initialize(dev);
1447 +- device_set_pm_not_required(dev);
1448 +- dev->parent = &cxlmd->dev;
1449 +- dev->bus = &cxl_bus_type;
1450 +- dev->type = &cxl_nvdimm_type;
1451 +-
1452 +- return cxl_nvd;
1453 +-}
1454 +-
1455 +-int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd)
1456 +-{
1457 +- struct cxl_nvdimm *cxl_nvd;
1458 +- struct device *dev;
1459 +- int rc;
1460 +-
1461 +- cxl_nvd = cxl_nvdimm_alloc(cxlmd);
1462 +- if (IS_ERR(cxl_nvd))
1463 +- return PTR_ERR(cxl_nvd);
1464 +-
1465 +- dev = &cxl_nvd->dev;
1466 +- rc = dev_set_name(dev, "pmem%d", cxlmd->id);
1467 +- if (rc)
1468 +- goto err;
1469 +-
1470 +- rc = device_add(dev);
1471 +- if (rc)
1472 +- goto err;
1473 +-
1474 +- dev_dbg(host, "%s: register %s\n", dev_name(dev->parent),
1475 +- dev_name(dev));
1476 +-
1477 +- return devm_add_action_or_reset(host, unregister_dev, dev);
1478 +-
1479 +-err:
1480 +- put_device(dev);
1481 +- return rc;
1482 +-}
1483 +-EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm);
1484 +-
1485 +-/**
1486 +- * cxl_probe_device_regs() - Detect CXL Device register blocks
1487 +- * @dev: Host device of the @base mapping
1488 +- * @base: Mapping of CXL 2.0 8.2.8 CXL Device Register Interface
1489 +- * @map: Map object describing the register block information found
1490 +- *
1491 +- * Probe for device register information and return it in map object.
1492 +- */
1493 +-void cxl_probe_device_regs(struct device *dev, void __iomem *base,
1494 +- struct cxl_device_reg_map *map)
1495 +-{
1496 +- int cap, cap_count;
1497 +- u64 cap_array;
1498 +-
1499 +- *map = (struct cxl_device_reg_map){ 0 };
1500 +-
1501 +- cap_array = readq(base + CXLDEV_CAP_ARRAY_OFFSET);
1502 +- if (FIELD_GET(CXLDEV_CAP_ARRAY_ID_MASK, cap_array) !=
1503 +- CXLDEV_CAP_ARRAY_CAP_ID)
1504 +- return;
1505 +-
1506 +- cap_count = FIELD_GET(CXLDEV_CAP_ARRAY_COUNT_MASK, cap_array);
1507 +-
1508 +- for (cap = 1; cap <= cap_count; cap++) {
1509 +- u32 offset, length;
1510 +- u16 cap_id;
1511 +-
1512 +- cap_id = FIELD_GET(CXLDEV_CAP_HDR_CAP_ID_MASK,
1513 +- readl(base + cap * 0x10));
1514 +- offset = readl(base + cap * 0x10 + 0x4);
1515 +- length = readl(base + cap * 0x10 + 0x8);
1516 +-
1517 +- switch (cap_id) {
1518 +- case CXLDEV_CAP_CAP_ID_DEVICE_STATUS:
1519 +- dev_dbg(dev, "found Status capability (0x%x)\n", offset);
1520 +-
1521 +- map->status.valid = true;
1522 +- map->status.offset = offset;
1523 +- map->status.size = length;
1524 +- break;
1525 +- case CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX:
1526 +- dev_dbg(dev, "found Mailbox capability (0x%x)\n", offset);
1527 +- map->mbox.valid = true;
1528 +- map->mbox.offset = offset;
1529 +- map->mbox.size = length;
1530 +- break;
1531 +- case CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX:
1532 +- dev_dbg(dev, "found Secondary Mailbox capability (0x%x)\n", offset);
1533 +- break;
1534 +- case CXLDEV_CAP_CAP_ID_MEMDEV:
1535 +- dev_dbg(dev, "found Memory Device capability (0x%x)\n", offset);
1536 +- map->memdev.valid = true;
1537 +- map->memdev.offset = offset;
1538 +- map->memdev.size = length;
1539 +- break;
1540 +- default:
1541 +- if (cap_id >= 0x8000)
1542 +- dev_dbg(dev, "Vendor cap ID: %#x offset: %#x\n", cap_id, offset);
1543 +- else
1544 +- dev_dbg(dev, "Unknown cap ID: %#x offset: %#x\n", cap_id, offset);
1545 +- break;
1546 +- }
1547 +- }
1548 +-}
1549 +-EXPORT_SYMBOL_GPL(cxl_probe_device_regs);
1550 +-
1551 +-static void __iomem *devm_cxl_iomap_block(struct device *dev,
1552 +- resource_size_t addr,
1553 +- resource_size_t length)
1554 +-{
1555 +- void __iomem *ret_val;
1556 +- struct resource *res;
1557 +-
1558 +- res = devm_request_mem_region(dev, addr, length, dev_name(dev));
1559 +- if (!res) {
1560 +- resource_size_t end = addr + length - 1;
1561 +-
1562 +- dev_err(dev, "Failed to request region %pa-%pa\n", &addr, &end);
1563 +- return NULL;
1564 +- }
1565 +-
1566 +- ret_val = devm_ioremap(dev, addr, length);
1567 +- if (!ret_val)
1568 +- dev_err(dev, "Failed to map region %pr\n", res);
1569 +-
1570 +- return ret_val;
1571 +-}
1572 +-
1573 +-int cxl_map_component_regs(struct pci_dev *pdev,
1574 +- struct cxl_component_regs *regs,
1575 +- struct cxl_register_map *map)
1576 +-{
1577 +- struct device *dev = &pdev->dev;
1578 +- resource_size_t phys_addr;
1579 +- resource_size_t length;
1580 +-
1581 +- phys_addr = pci_resource_start(pdev, map->barno);
1582 +- phys_addr += map->block_offset;
1583 +-
1584 +- phys_addr += map->component_map.hdm_decoder.offset;
1585 +- length = map->component_map.hdm_decoder.size;
1586 +- regs->hdm_decoder = devm_cxl_iomap_block(dev, phys_addr, length);
1587 +- if (!regs->hdm_decoder)
1588 +- return -ENOMEM;
1589 +-
1590 +- return 0;
1591 +-}
1592 +-EXPORT_SYMBOL_GPL(cxl_map_component_regs);
1593 +-
1594 +-int cxl_map_device_regs(struct pci_dev *pdev,
1595 +- struct cxl_device_regs *regs,
1596 +- struct cxl_register_map *map)
1597 +-{
1598 +- struct device *dev = &pdev->dev;
1599 +- resource_size_t phys_addr;
1600 +-
1601 +- phys_addr = pci_resource_start(pdev, map->barno);
1602 +- phys_addr += map->block_offset;
1603 +-
1604 +- if (map->device_map.status.valid) {
1605 +- resource_size_t addr;
1606 +- resource_size_t length;
1607 +-
1608 +- addr = phys_addr + map->device_map.status.offset;
1609 +- length = map->device_map.status.size;
1610 +- regs->status = devm_cxl_iomap_block(dev, addr, length);
1611 +- if (!regs->status)
1612 +- return -ENOMEM;
1613 +- }
1614 +-
1615 +- if (map->device_map.mbox.valid) {
1616 +- resource_size_t addr;
1617 +- resource_size_t length;
1618 +-
1619 +- addr = phys_addr + map->device_map.mbox.offset;
1620 +- length = map->device_map.mbox.size;
1621 +- regs->mbox = devm_cxl_iomap_block(dev, addr, length);
1622 +- if (!regs->mbox)
1623 +- return -ENOMEM;
1624 +- }
1625 +-
1626 +- if (map->device_map.memdev.valid) {
1627 +- resource_size_t addr;
1628 +- resource_size_t length;
1629 +-
1630 +- addr = phys_addr + map->device_map.memdev.offset;
1631 +- length = map->device_map.memdev.size;
1632 +- regs->memdev = devm_cxl_iomap_block(dev, addr, length);
1633 +- if (!regs->memdev)
1634 +- return -ENOMEM;
1635 +- }
1636 +-
1637 +- return 0;
1638 +-}
1639 +-EXPORT_SYMBOL_GPL(cxl_map_device_regs);
1640 +-
1641 +-/**
1642 +- * __cxl_driver_register - register a driver for the cxl bus
1643 +- * @cxl_drv: cxl driver structure to attach
1644 +- * @owner: owning module/driver
1645 +- * @modname: KBUILD_MODNAME for parent driver
1646 +- */
1647 +-int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
1648 +- const char *modname)
1649 +-{
1650 +- if (!cxl_drv->probe) {
1651 +- pr_debug("%s ->probe() must be specified\n", modname);
1652 +- return -EINVAL;
1653 +- }
1654 +-
1655 +- if (!cxl_drv->name) {
1656 +- pr_debug("%s ->name must be specified\n", modname);
1657 +- return -EINVAL;
1658 +- }
1659 +-
1660 +- if (!cxl_drv->id) {
1661 +- pr_debug("%s ->id must be specified\n", modname);
1662 +- return -EINVAL;
1663 +- }
1664 +-
1665 +- cxl_drv->drv.bus = &cxl_bus_type;
1666 +- cxl_drv->drv.owner = owner;
1667 +- cxl_drv->drv.mod_name = modname;
1668 +- cxl_drv->drv.name = cxl_drv->name;
1669 +-
1670 +- return driver_register(&cxl_drv->drv);
1671 +-}
1672 +-EXPORT_SYMBOL_GPL(__cxl_driver_register);
1673 +-
1674 +-void cxl_driver_unregister(struct cxl_driver *cxl_drv)
1675 +-{
1676 +- driver_unregister(&cxl_drv->drv);
1677 +-}
1678 +-EXPORT_SYMBOL_GPL(cxl_driver_unregister);
1679 +-
1680 +-static int cxl_device_id(struct device *dev)
1681 +-{
1682 +- if (dev->type == &cxl_nvdimm_bridge_type)
1683 +- return CXL_DEVICE_NVDIMM_BRIDGE;
1684 +- if (dev->type == &cxl_nvdimm_type)
1685 +- return CXL_DEVICE_NVDIMM;
1686 +- return 0;
1687 +-}
1688 +-
1689 +-static int cxl_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
1690 +-{
1691 +- return add_uevent_var(env, "MODALIAS=" CXL_MODALIAS_FMT,
1692 +- cxl_device_id(dev));
1693 +-}
1694 +-
1695 +-static int cxl_bus_match(struct device *dev, struct device_driver *drv)
1696 +-{
1697 +- return cxl_device_id(dev) == to_cxl_drv(drv)->id;
1698 +-}
1699 +-
1700 +-static int cxl_bus_probe(struct device *dev)
1701 +-{
1702 +- return to_cxl_drv(dev->driver)->probe(dev);
1703 +-}
1704 +-
1705 +-static int cxl_bus_remove(struct device *dev)
1706 +-{
1707 +- struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver);
1708 +-
1709 +- if (cxl_drv->remove)
1710 +- cxl_drv->remove(dev);
1711 +- return 0;
1712 +-}
1713 +-
1714 +-struct bus_type cxl_bus_type = {
1715 +- .name = "cxl",
1716 +- .uevent = cxl_bus_uevent,
1717 +- .match = cxl_bus_match,
1718 +- .probe = cxl_bus_probe,
1719 +- .remove = cxl_bus_remove,
1720 +-};
1721 +-EXPORT_SYMBOL_GPL(cxl_bus_type);
1722 +-
1723 +-static __init int cxl_core_init(void)
1724 +-{
1725 +- return bus_register(&cxl_bus_type);
1726 +-}
1727 +-
1728 +-static void cxl_core_exit(void)
1729 +-{
1730 +- bus_unregister(&cxl_bus_type);
1731 +-}
1732 +-
1733 +-module_init(cxl_core_init);
1734 +-module_exit(cxl_core_exit);
1735 +-MODULE_LICENSE("GPL v2");
1736 +diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile
1737 +new file mode 100644
1738 +index 0000000000000..ad137f96e5c84
1739 +--- /dev/null
1740 ++++ b/drivers/cxl/core/Makefile
1741 +@@ -0,0 +1,5 @@
1742 ++# SPDX-License-Identifier: GPL-2.0
1743 ++obj-$(CONFIG_CXL_BUS) += cxl_core.o
1744 ++
1745 ++ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CXL -I$(srctree)/drivers/cxl
1746 ++cxl_core-y := bus.o
1747 +diff --git a/drivers/cxl/core/bus.c b/drivers/cxl/core/bus.c
1748 +new file mode 100644
1749 +index 0000000000000..0815eec239443
1750 +--- /dev/null
1751 ++++ b/drivers/cxl/core/bus.c
1752 +@@ -0,0 +1,1067 @@
1753 ++// SPDX-License-Identifier: GPL-2.0-only
1754 ++/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
1755 ++#include <linux/io-64-nonatomic-lo-hi.h>
1756 ++#include <linux/device.h>
1757 ++#include <linux/module.h>
1758 ++#include <linux/pci.h>
1759 ++#include <linux/slab.h>
1760 ++#include <linux/idr.h>
1761 ++#include <cxlmem.h>
1762 ++#include <cxl.h>
1763 ++
1764 ++/**
1765 ++ * DOC: cxl core
1766 ++ *
1767 ++ * The CXL core provides a sysfs hierarchy for control devices and a rendezvous
1768 ++ * point for cross-device interleave coordination through cxl ports.
1769 ++ */
1770 ++
1771 ++static DEFINE_IDA(cxl_port_ida);
1772 ++
1773 ++static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
1774 ++ char *buf)
1775 ++{
1776 ++ return sysfs_emit(buf, "%s\n", dev->type->name);
1777 ++}
1778 ++static DEVICE_ATTR_RO(devtype);
1779 ++
1780 ++static struct attribute *cxl_base_attributes[] = {
1781 ++ &dev_attr_devtype.attr,
1782 ++ NULL,
1783 ++};
1784 ++
1785 ++static struct attribute_group cxl_base_attribute_group = {
1786 ++ .attrs = cxl_base_attributes,
1787 ++};
1788 ++
1789 ++static ssize_t start_show(struct device *dev, struct device_attribute *attr,
1790 ++ char *buf)
1791 ++{
1792 ++ struct cxl_decoder *cxld = to_cxl_decoder(dev);
1793 ++
1794 ++ return sysfs_emit(buf, "%#llx\n", cxld->range.start);
1795 ++}
1796 ++static DEVICE_ATTR_RO(start);
1797 ++
1798 ++static ssize_t size_show(struct device *dev, struct device_attribute *attr,
1799 ++ char *buf)
1800 ++{
1801 ++ struct cxl_decoder *cxld = to_cxl_decoder(dev);
1802 ++
1803 ++ return sysfs_emit(buf, "%#llx\n", range_len(&cxld->range));
1804 ++}
1805 ++static DEVICE_ATTR_RO(size);
1806 ++
1807 ++#define CXL_DECODER_FLAG_ATTR(name, flag) \
1808 ++static ssize_t name##_show(struct device *dev, \
1809 ++ struct device_attribute *attr, char *buf) \
1810 ++{ \
1811 ++ struct cxl_decoder *cxld = to_cxl_decoder(dev); \
1812 ++ \
1813 ++ return sysfs_emit(buf, "%s\n", \
1814 ++ (cxld->flags & (flag)) ? "1" : "0"); \
1815 ++} \
1816 ++static DEVICE_ATTR_RO(name)
1817 ++
1818 ++CXL_DECODER_FLAG_ATTR(cap_pmem, CXL_DECODER_F_PMEM);
1819 ++CXL_DECODER_FLAG_ATTR(cap_ram, CXL_DECODER_F_RAM);
1820 ++CXL_DECODER_FLAG_ATTR(cap_type2, CXL_DECODER_F_TYPE2);
1821 ++CXL_DECODER_FLAG_ATTR(cap_type3, CXL_DECODER_F_TYPE3);
1822 ++CXL_DECODER_FLAG_ATTR(locked, CXL_DECODER_F_LOCK);
1823 ++
1824 ++static ssize_t target_type_show(struct device *dev,
1825 ++ struct device_attribute *attr, char *buf)
1826 ++{
1827 ++ struct cxl_decoder *cxld = to_cxl_decoder(dev);
1828 ++
1829 ++ switch (cxld->target_type) {
1830 ++ case CXL_DECODER_ACCELERATOR:
1831 ++ return sysfs_emit(buf, "accelerator\n");
1832 ++ case CXL_DECODER_EXPANDER:
1833 ++ return sysfs_emit(buf, "expander\n");
1834 ++ }
1835 ++ return -ENXIO;
1836 ++}
1837 ++static DEVICE_ATTR_RO(target_type);
1838 ++
1839 ++static ssize_t target_list_show(struct device *dev,
1840 ++ struct device_attribute *attr, char *buf)
1841 ++{
1842 ++ struct cxl_decoder *cxld = to_cxl_decoder(dev);
1843 ++ ssize_t offset = 0;
1844 ++ int i, rc = 0;
1845 ++
1846 ++ device_lock(dev);
1847 ++ for (i = 0; i < cxld->interleave_ways; i++) {
1848 ++ struct cxl_dport *dport = cxld->target[i];
1849 ++ struct cxl_dport *next = NULL;
1850 ++
1851 ++ if (!dport)
1852 ++ break;
1853 ++
1854 ++ if (i + 1 < cxld->interleave_ways)
1855 ++ next = cxld->target[i + 1];
1856 ++ rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id,
1857 ++ next ? "," : "");
1858 ++ if (rc < 0)
1859 ++ break;
1860 ++ offset += rc;
1861 ++ }
1862 ++ device_unlock(dev);
1863 ++
1864 ++ if (rc < 0)
1865 ++ return rc;
1866 ++
1867 ++ rc = sysfs_emit_at(buf, offset, "\n");
1868 ++ if (rc < 0)
1869 ++ return rc;
1870 ++
1871 ++ return offset + rc;
1872 ++}
1873 ++static DEVICE_ATTR_RO(target_list);
1874 ++
1875 ++static struct attribute *cxl_decoder_base_attrs[] = {
1876 ++ &dev_attr_start.attr,
1877 ++ &dev_attr_size.attr,
1878 ++ &dev_attr_locked.attr,
1879 ++ &dev_attr_target_list.attr,
1880 ++ NULL,
1881 ++};
1882 ++
1883 ++static struct attribute_group cxl_decoder_base_attribute_group = {
1884 ++ .attrs = cxl_decoder_base_attrs,
1885 ++};
1886 ++
1887 ++static struct attribute *cxl_decoder_root_attrs[] = {
1888 ++ &dev_attr_cap_pmem.attr,
1889 ++ &dev_attr_cap_ram.attr,
1890 ++ &dev_attr_cap_type2.attr,
1891 ++ &dev_attr_cap_type3.attr,
1892 ++ NULL,
1893 ++};
1894 ++
1895 ++static struct attribute_group cxl_decoder_root_attribute_group = {
1896 ++ .attrs = cxl_decoder_root_attrs,
1897 ++};
1898 ++
1899 ++static const struct attribute_group *cxl_decoder_root_attribute_groups[] = {
1900 ++ &cxl_decoder_root_attribute_group,
1901 ++ &cxl_decoder_base_attribute_group,
1902 ++ &cxl_base_attribute_group,
1903 ++ NULL,
1904 ++};
1905 ++
1906 ++static struct attribute *cxl_decoder_switch_attrs[] = {
1907 ++ &dev_attr_target_type.attr,
1908 ++ NULL,
1909 ++};
1910 ++
1911 ++static struct attribute_group cxl_decoder_switch_attribute_group = {
1912 ++ .attrs = cxl_decoder_switch_attrs,
1913 ++};
1914 ++
1915 ++static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = {
1916 ++ &cxl_decoder_switch_attribute_group,
1917 ++ &cxl_decoder_base_attribute_group,
1918 ++ &cxl_base_attribute_group,
1919 ++ NULL,
1920 ++};
1921 ++
1922 ++static void cxl_decoder_release(struct device *dev)
1923 ++{
1924 ++ struct cxl_decoder *cxld = to_cxl_decoder(dev);
1925 ++ struct cxl_port *port = to_cxl_port(dev->parent);
1926 ++
1927 ++ ida_free(&port->decoder_ida, cxld->id);
1928 ++ kfree(cxld);
1929 ++}
1930 ++
1931 ++static const struct device_type cxl_decoder_switch_type = {
1932 ++ .name = "cxl_decoder_switch",
1933 ++ .release = cxl_decoder_release,
1934 ++ .groups = cxl_decoder_switch_attribute_groups,
1935 ++};
1936 ++
1937 ++static const struct device_type cxl_decoder_root_type = {
1938 ++ .name = "cxl_decoder_root",
1939 ++ .release = cxl_decoder_release,
1940 ++ .groups = cxl_decoder_root_attribute_groups,
1941 ++};
1942 ++
1943 ++bool is_root_decoder(struct device *dev)
1944 ++{
1945 ++ return dev->type == &cxl_decoder_root_type;
1946 ++}
1947 ++EXPORT_SYMBOL_GPL(is_root_decoder);
1948 ++
1949 ++struct cxl_decoder *to_cxl_decoder(struct device *dev)
1950 ++{
1951 ++ if (dev_WARN_ONCE(dev, dev->type->release != cxl_decoder_release,
1952 ++ "not a cxl_decoder device\n"))
1953 ++ return NULL;
1954 ++ return container_of(dev, struct cxl_decoder, dev);
1955 ++}
1956 ++EXPORT_SYMBOL_GPL(to_cxl_decoder);
1957 ++
1958 ++static void cxl_dport_release(struct cxl_dport *dport)
1959 ++{
1960 ++ list_del(&dport->list);
1961 ++ put_device(dport->dport);
1962 ++ kfree(dport);
1963 ++}
1964 ++
1965 ++static void cxl_port_release(struct device *dev)
1966 ++{
1967 ++ struct cxl_port *port = to_cxl_port(dev);
1968 ++ struct cxl_dport *dport, *_d;
1969 ++
1970 ++ device_lock(dev);
1971 ++ list_for_each_entry_safe(dport, _d, &port->dports, list)
1972 ++ cxl_dport_release(dport);
1973 ++ device_unlock(dev);
1974 ++ ida_free(&cxl_port_ida, port->id);
1975 ++ kfree(port);
1976 ++}
1977 ++
1978 ++static const struct attribute_group *cxl_port_attribute_groups[] = {
1979 ++ &cxl_base_attribute_group,
1980 ++ NULL,
1981 ++};
1982 ++
1983 ++static const struct device_type cxl_port_type = {
1984 ++ .name = "cxl_port",
1985 ++ .release = cxl_port_release,
1986 ++ .groups = cxl_port_attribute_groups,
1987 ++};
1988 ++
1989 ++struct cxl_port *to_cxl_port(struct device *dev)
1990 ++{
1991 ++ if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type,
1992 ++ "not a cxl_port device\n"))
1993 ++ return NULL;
1994 ++ return container_of(dev, struct cxl_port, dev);
1995 ++}
1996 ++
1997 ++static void unregister_port(void *_port)
1998 ++{
1999 ++ struct cxl_port *port = _port;
2000 ++ struct cxl_dport *dport;
2001 ++
2002 ++ device_lock(&port->dev);
2003 ++ list_for_each_entry(dport, &port->dports, list) {
2004 ++ char link_name[CXL_TARGET_STRLEN];
2005 ++
2006 ++ if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d",
2007 ++ dport->port_id) >= CXL_TARGET_STRLEN)
2008 ++ continue;
2009 ++ sysfs_remove_link(&port->dev.kobj, link_name);
2010 ++ }
2011 ++ device_unlock(&port->dev);
2012 ++ device_unregister(&port->dev);
2013 ++}
2014 ++
2015 ++static void cxl_unlink_uport(void *_port)
2016 ++{
2017 ++ struct cxl_port *port = _port;
2018 ++
2019 ++ sysfs_remove_link(&port->dev.kobj, "uport");
2020 ++}
2021 ++
2022 ++static int devm_cxl_link_uport(struct device *host, struct cxl_port *port)
2023 ++{
2024 ++ int rc;
2025 ++
2026 ++ rc = sysfs_create_link(&port->dev.kobj, &port->uport->kobj, "uport");
2027 ++ if (rc)
2028 ++ return rc;
2029 ++ return devm_add_action_or_reset(host, cxl_unlink_uport, port);
2030 ++}
2031 ++
2032 ++static struct cxl_port *cxl_port_alloc(struct device *uport,
2033 ++ resource_size_t component_reg_phys,
2034 ++ struct cxl_port *parent_port)
2035 ++{
2036 ++ struct cxl_port *port;
2037 ++ struct device *dev;
2038 ++ int rc;
2039 ++
2040 ++ port = kzalloc(sizeof(*port), GFP_KERNEL);
2041 ++ if (!port)
2042 ++ return ERR_PTR(-ENOMEM);
2043 ++
2044 ++ rc = ida_alloc(&cxl_port_ida, GFP_KERNEL);
2045 ++ if (rc < 0)
2046 ++ goto err;
2047 ++ port->id = rc;
2048 ++
2049 ++ /*
2050 ++ * The top-level cxl_port "cxl_root" does not have a cxl_port as
2051 ++ * its parent and it does not have any corresponding component
2052 ++ * registers as its decode is described by a fixed platform
2053 ++ * description.
2054 ++ */
2055 ++ dev = &port->dev;
2056 ++ if (parent_port)
2057 ++ dev->parent = &parent_port->dev;
2058 ++ else
2059 ++ dev->parent = uport;
2060 ++
2061 ++ port->uport = uport;
2062 ++ port->component_reg_phys = component_reg_phys;
2063 ++ ida_init(&port->decoder_ida);
2064 ++ INIT_LIST_HEAD(&port->dports);
2065 ++
2066 ++ device_initialize(dev);
2067 ++ device_set_pm_not_required(dev);
2068 ++ dev->bus = &cxl_bus_type;
2069 ++ dev->type = &cxl_port_type;
2070 ++
2071 ++ return port;
2072 ++
2073 ++err:
2074 ++ kfree(port);
2075 ++ return ERR_PTR(rc);
2076 ++}
2077 ++
2078 ++/**
2079 ++ * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy
2080 ++ * @host: host device for devm operations
2081 ++ * @uport: "physical" device implementing this upstream port
2082 ++ * @component_reg_phys: (optional) for configurable cxl_port instances
2083 ++ * @parent_port: next hop up in the CXL memory decode hierarchy
2084 ++ */
2085 ++struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
2086 ++ resource_size_t component_reg_phys,
2087 ++ struct cxl_port *parent_port)
2088 ++{
2089 ++ struct cxl_port *port;
2090 ++ struct device *dev;
2091 ++ int rc;
2092 ++
2093 ++ port = cxl_port_alloc(uport, component_reg_phys, parent_port);
2094 ++ if (IS_ERR(port))
2095 ++ return port;
2096 ++
2097 ++ dev = &port->dev;
2098 ++ if (parent_port)
2099 ++ rc = dev_set_name(dev, "port%d", port->id);
2100 ++ else
2101 ++ rc = dev_set_name(dev, "root%d", port->id);
2102 ++ if (rc)
2103 ++ goto err;
2104 ++
2105 ++ rc = device_add(dev);
2106 ++ if (rc)
2107 ++ goto err;
2108 ++
2109 ++ rc = devm_add_action_or_reset(host, unregister_port, port);
2110 ++ if (rc)
2111 ++ return ERR_PTR(rc);
2112 ++
2113 ++ rc = devm_cxl_link_uport(host, port);
2114 ++ if (rc)
2115 ++ return ERR_PTR(rc);
2116 ++
2117 ++ return port;
2118 ++
2119 ++err:
2120 ++ put_device(dev);
2121 ++ return ERR_PTR(rc);
2122 ++}
2123 ++EXPORT_SYMBOL_GPL(devm_cxl_add_port);
2124 ++
2125 ++static struct cxl_dport *find_dport(struct cxl_port *port, int id)
2126 ++{
2127 ++ struct cxl_dport *dport;
2128 ++
2129 ++ device_lock_assert(&port->dev);
2130 ++ list_for_each_entry (dport, &port->dports, list)
2131 ++ if (dport->port_id == id)
2132 ++ return dport;
2133 ++ return NULL;
2134 ++}
2135 ++
2136 ++static int add_dport(struct cxl_port *port, struct cxl_dport *new)
2137 ++{
2138 ++ struct cxl_dport *dup;
2139 ++
2140 ++ device_lock(&port->dev);
2141 ++ dup = find_dport(port, new->port_id);
2142 ++ if (dup)
2143 ++ dev_err(&port->dev,
2144 ++ "unable to add dport%d-%s non-unique port id (%s)\n",
2145 ++ new->port_id, dev_name(new->dport),
2146 ++ dev_name(dup->dport));
2147 ++ else
2148 ++ list_add_tail(&new->list, &port->dports);
2149 ++ device_unlock(&port->dev);
2150 ++
2151 ++ return dup ? -EEXIST : 0;
2152 ++}
2153 ++
2154 ++/**
2155 ++ * cxl_add_dport - append downstream port data to a cxl_port
2156 ++ * @port: the cxl_port that references this dport
2157 ++ * @dport_dev: firmware or PCI device representing the dport
2158 ++ * @port_id: identifier for this dport in a decoder's target list
2159 ++ * @component_reg_phys: optional location of CXL component registers
2160 ++ *
2161 ++ * Note that all allocations and links are undone by cxl_port deletion
2162 ++ * and release.
2163 ++ */
2164 ++int cxl_add_dport(struct cxl_port *port, struct device *dport_dev, int port_id,
2165 ++ resource_size_t component_reg_phys)
2166 ++{
2167 ++ char link_name[CXL_TARGET_STRLEN];
2168 ++ struct cxl_dport *dport;
2169 ++ int rc;
2170 ++
2171 ++ if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d", port_id) >=
2172 ++ CXL_TARGET_STRLEN)
2173 ++ return -EINVAL;
2174 ++
2175 ++ dport = kzalloc(sizeof(*dport), GFP_KERNEL);
2176 ++ if (!dport)
2177 ++ return -ENOMEM;
2178 ++
2179 ++ INIT_LIST_HEAD(&dport->list);
2180 ++ dport->dport = get_device(dport_dev);
2181 ++ dport->port_id = port_id;
2182 ++ dport->component_reg_phys = component_reg_phys;
2183 ++ dport->port = port;
2184 ++
2185 ++ rc = add_dport(port, dport);
2186 ++ if (rc)
2187 ++ goto err;
2188 ++
2189 ++ rc = sysfs_create_link(&port->dev.kobj, &dport_dev->kobj, link_name);
2190 ++ if (rc)
2191 ++ goto err;
2192 ++
2193 ++ return 0;
2194 ++err:
2195 ++ cxl_dport_release(dport);
2196 ++ return rc;
2197 ++}
2198 ++EXPORT_SYMBOL_GPL(cxl_add_dport);
2199 ++
2200 ++static struct cxl_decoder *
2201 ++cxl_decoder_alloc(struct cxl_port *port, int nr_targets, resource_size_t base,
2202 ++ resource_size_t len, int interleave_ways,
2203 ++ int interleave_granularity, enum cxl_decoder_type type,
2204 ++ unsigned long flags)
2205 ++{
2206 ++ struct cxl_decoder *cxld;
2207 ++ struct device *dev;
2208 ++ int rc = 0;
2209 ++
2210 ++ if (interleave_ways < 1)
2211 ++ return ERR_PTR(-EINVAL);
2212 ++
2213 ++ device_lock(&port->dev);
2214 ++ if (list_empty(&port->dports))
2215 ++ rc = -EINVAL;
2216 ++ device_unlock(&port->dev);
2217 ++ if (rc)
2218 ++ return ERR_PTR(rc);
2219 ++
2220 ++ cxld = kzalloc(struct_size(cxld, target, nr_targets), GFP_KERNEL);
2221 ++ if (!cxld)
2222 ++ return ERR_PTR(-ENOMEM);
2223 ++
2224 ++ rc = ida_alloc(&port->decoder_ida, GFP_KERNEL);
2225 ++ if (rc < 0)
2226 ++ goto err;
2227 ++
2228 ++ *cxld = (struct cxl_decoder) {
2229 ++ .id = rc,
2230 ++ .range = {
2231 ++ .start = base,
2232 ++ .end = base + len - 1,
2233 ++ },
2234 ++ .flags = flags,
2235 ++ .interleave_ways = interleave_ways,
2236 ++ .interleave_granularity = interleave_granularity,
2237 ++ .target_type = type,
2238 ++ };
2239 ++
2240 ++ /* handle implied target_list */
2241 ++ if (interleave_ways == 1)
2242 ++ cxld->target[0] =
2243 ++ list_first_entry(&port->dports, struct cxl_dport, list);
2244 ++ dev = &cxld->dev;
2245 ++ device_initialize(dev);
2246 ++ device_set_pm_not_required(dev);
2247 ++ dev->parent = &port->dev;
2248 ++ dev->bus = &cxl_bus_type;
2249 ++
2250 ++ /* root ports do not have a cxl_port_type parent */
2251 ++ if (port->dev.parent->type == &cxl_port_type)
2252 ++ dev->type = &cxl_decoder_switch_type;
2253 ++ else
2254 ++ dev->type = &cxl_decoder_root_type;
2255 ++
2256 ++ return cxld;
2257 ++err:
2258 ++ kfree(cxld);
2259 ++ return ERR_PTR(rc);
2260 ++}
2261 ++
2262 ++static void unregister_dev(void *dev)
2263 ++{
2264 ++ device_unregister(dev);
2265 ++}
2266 ++
2267 ++struct cxl_decoder *
2268 ++devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets,
2269 ++ resource_size_t base, resource_size_t len,
2270 ++ int interleave_ways, int interleave_granularity,
2271 ++ enum cxl_decoder_type type, unsigned long flags)
2272 ++{
2273 ++ struct cxl_decoder *cxld;
2274 ++ struct device *dev;
2275 ++ int rc;
2276 ++
2277 ++ cxld = cxl_decoder_alloc(port, nr_targets, base, len, interleave_ways,
2278 ++ interleave_granularity, type, flags);
2279 ++ if (IS_ERR(cxld))
2280 ++ return cxld;
2281 ++
2282 ++ dev = &cxld->dev;
2283 ++ rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id);
2284 ++ if (rc)
2285 ++ goto err;
2286 ++
2287 ++ rc = device_add(dev);
2288 ++ if (rc)
2289 ++ goto err;
2290 ++
2291 ++ rc = devm_add_action_or_reset(host, unregister_dev, dev);
2292 ++ if (rc)
2293 ++ return ERR_PTR(rc);
2294 ++ return cxld;
2295 ++
2296 ++err:
2297 ++ put_device(dev);
2298 ++ return ERR_PTR(rc);
2299 ++}
2300 ++EXPORT_SYMBOL_GPL(devm_cxl_add_decoder);
2301 ++
2302 ++/**
2303 ++ * cxl_probe_component_regs() - Detect CXL Component register blocks
2304 ++ * @dev: Host device of the @base mapping
2305 ++ * @base: Mapping containing the HDM Decoder Capability Header
2306 ++ * @map: Map object describing the register block information found
2307 ++ *
2308 ++ * See CXL 2.0 8.2.4 Component Register Layout and Definition
2309 ++ * See CXL 2.0 8.2.5.5 CXL Device Register Interface
2310 ++ *
2311 ++ * Probe for component register information and return it in map object.
2312 ++ */
2313 ++void cxl_probe_component_regs(struct device *dev, void __iomem *base,
2314 ++ struct cxl_component_reg_map *map)
2315 ++{
2316 ++ int cap, cap_count;
2317 ++ u64 cap_array;
2318 ++
2319 ++ *map = (struct cxl_component_reg_map) { 0 };
2320 ++
2321 ++ /*
2322 ++ * CXL.cache and CXL.mem registers are at offset 0x1000 as defined in
2323 ++ * CXL 2.0 8.2.4 Table 141.
2324 ++ */
2325 ++ base += CXL_CM_OFFSET;
2326 ++
2327 ++ cap_array = readq(base + CXL_CM_CAP_HDR_OFFSET);
2328 ++
2329 ++ if (FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, cap_array) != CM_CAP_HDR_CAP_ID) {
2330 ++ dev_err(dev,
2331 ++ "Couldn't locate the CXL.cache and CXL.mem capability array header./n");
2332 ++ return;
2333 ++ }
2334 ++
2335 ++ /* It's assumed that future versions will be backward compatible */
2336 ++ cap_count = FIELD_GET(CXL_CM_CAP_HDR_ARRAY_SIZE_MASK, cap_array);
2337 ++
2338 ++ for (cap = 1; cap <= cap_count; cap++) {
2339 ++ void __iomem *register_block;
2340 ++ u32 hdr;
2341 ++ int decoder_cnt;
2342 ++ u16 cap_id, offset;
2343 ++ u32 length;
2344 ++
2345 ++ hdr = readl(base + cap * 0x4);
2346 ++
2347 ++ cap_id = FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, hdr);
2348 ++ offset = FIELD_GET(CXL_CM_CAP_PTR_MASK, hdr);
2349 ++ register_block = base + offset;
2350 ++
2351 ++ switch (cap_id) {
2352 ++ case CXL_CM_CAP_CAP_ID_HDM:
2353 ++ dev_dbg(dev, "found HDM decoder capability (0x%x)\n",
2354 ++ offset);
2355 ++
2356 ++ hdr = readl(register_block);
2357 ++
2358 ++ decoder_cnt = cxl_hdm_decoder_count(hdr);
2359 ++ length = 0x20 * decoder_cnt + 0x10;
2360 ++
2361 ++ map->hdm_decoder.valid = true;
2362 ++ map->hdm_decoder.offset = CXL_CM_OFFSET + offset;
2363 ++ map->hdm_decoder.size = length;
2364 ++ break;
2365 ++ default:
2366 ++ dev_dbg(dev, "Unknown CM cap ID: %d (0x%x)\n", cap_id,
2367 ++ offset);
2368 ++ break;
2369 ++ }
2370 ++ }
2371 ++}
2372 ++EXPORT_SYMBOL_GPL(cxl_probe_component_regs);
2373 ++
2374 ++static void cxl_nvdimm_bridge_release(struct device *dev)
2375 ++{
2376 ++ struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
2377 ++
2378 ++ kfree(cxl_nvb);
2379 ++}
2380 ++
2381 ++static const struct attribute_group *cxl_nvdimm_bridge_attribute_groups[] = {
2382 ++ &cxl_base_attribute_group,
2383 ++ NULL,
2384 ++};
2385 ++
2386 ++static const struct device_type cxl_nvdimm_bridge_type = {
2387 ++ .name = "cxl_nvdimm_bridge",
2388 ++ .release = cxl_nvdimm_bridge_release,
2389 ++ .groups = cxl_nvdimm_bridge_attribute_groups,
2390 ++};
2391 ++
2392 ++struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev)
2393 ++{
2394 ++ if (dev_WARN_ONCE(dev, dev->type != &cxl_nvdimm_bridge_type,
2395 ++ "not a cxl_nvdimm_bridge device\n"))
2396 ++ return NULL;
2397 ++ return container_of(dev, struct cxl_nvdimm_bridge, dev);
2398 ++}
2399 ++EXPORT_SYMBOL_GPL(to_cxl_nvdimm_bridge);
2400 ++
2401 ++static struct cxl_nvdimm_bridge *
2402 ++cxl_nvdimm_bridge_alloc(struct cxl_port *port)
2403 ++{
2404 ++ struct cxl_nvdimm_bridge *cxl_nvb;
2405 ++ struct device *dev;
2406 ++
2407 ++ cxl_nvb = kzalloc(sizeof(*cxl_nvb), GFP_KERNEL);
2408 ++ if (!cxl_nvb)
2409 ++ return ERR_PTR(-ENOMEM);
2410 ++
2411 ++ dev = &cxl_nvb->dev;
2412 ++ cxl_nvb->port = port;
2413 ++ cxl_nvb->state = CXL_NVB_NEW;
2414 ++ device_initialize(dev);
2415 ++ device_set_pm_not_required(dev);
2416 ++ dev->parent = &port->dev;
2417 ++ dev->bus = &cxl_bus_type;
2418 ++ dev->type = &cxl_nvdimm_bridge_type;
2419 ++
2420 ++ return cxl_nvb;
2421 ++}
2422 ++
2423 ++static void unregister_nvb(void *_cxl_nvb)
2424 ++{
2425 ++ struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb;
2426 ++ bool flush;
2427 ++
2428 ++ /*
2429 ++ * If the bridge was ever activated then there might be in-flight state
2430 ++ * work to flush. Once the state has been changed to 'dead' then no new
2431 ++ * work can be queued by user-triggered bind.
2432 ++ */
2433 ++ device_lock(&cxl_nvb->dev);
2434 ++ flush = cxl_nvb->state != CXL_NVB_NEW;
2435 ++ cxl_nvb->state = CXL_NVB_DEAD;
2436 ++ device_unlock(&cxl_nvb->dev);
2437 ++
2438 ++ /*
2439 ++ * Even though the device core will trigger device_release_driver()
2440 ++ * before the unregister, it does not know about the fact that
2441 ++ * cxl_nvdimm_bridge_driver defers ->remove() work. So, do the driver
2442 ++ * release not and flush it before tearing down the nvdimm device
2443 ++ * hierarchy.
2444 ++ */
2445 ++ device_release_driver(&cxl_nvb->dev);
2446 ++ if (flush)
2447 ++ flush_work(&cxl_nvb->state_work);
2448 ++ device_unregister(&cxl_nvb->dev);
2449 ++}
2450 ++
2451 ++struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
2452 ++ struct cxl_port *port)
2453 ++{
2454 ++ struct cxl_nvdimm_bridge *cxl_nvb;
2455 ++ struct device *dev;
2456 ++ int rc;
2457 ++
2458 ++ if (!IS_ENABLED(CONFIG_CXL_PMEM))
2459 ++ return ERR_PTR(-ENXIO);
2460 ++
2461 ++ cxl_nvb = cxl_nvdimm_bridge_alloc(port);
2462 ++ if (IS_ERR(cxl_nvb))
2463 ++ return cxl_nvb;
2464 ++
2465 ++ dev = &cxl_nvb->dev;
2466 ++ rc = dev_set_name(dev, "nvdimm-bridge");
2467 ++ if (rc)
2468 ++ goto err;
2469 ++
2470 ++ rc = device_add(dev);
2471 ++ if (rc)
2472 ++ goto err;
2473 ++
2474 ++ rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb);
2475 ++ if (rc)
2476 ++ return ERR_PTR(rc);
2477 ++
2478 ++ return cxl_nvb;
2479 ++
2480 ++err:
2481 ++ put_device(dev);
2482 ++ return ERR_PTR(rc);
2483 ++}
2484 ++EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm_bridge);
2485 ++
2486 ++static void cxl_nvdimm_release(struct device *dev)
2487 ++{
2488 ++ struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
2489 ++
2490 ++ kfree(cxl_nvd);
2491 ++}
2492 ++
2493 ++static const struct attribute_group *cxl_nvdimm_attribute_groups[] = {
2494 ++ &cxl_base_attribute_group,
2495 ++ NULL,
2496 ++};
2497 ++
2498 ++static const struct device_type cxl_nvdimm_type = {
2499 ++ .name = "cxl_nvdimm",
2500 ++ .release = cxl_nvdimm_release,
2501 ++ .groups = cxl_nvdimm_attribute_groups,
2502 ++};
2503 ++
2504 ++bool is_cxl_nvdimm(struct device *dev)
2505 ++{
2506 ++ return dev->type == &cxl_nvdimm_type;
2507 ++}
2508 ++EXPORT_SYMBOL_GPL(is_cxl_nvdimm);
2509 ++
2510 ++struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
2511 ++{
2512 ++ if (dev_WARN_ONCE(dev, !is_cxl_nvdimm(dev),
2513 ++ "not a cxl_nvdimm device\n"))
2514 ++ return NULL;
2515 ++ return container_of(dev, struct cxl_nvdimm, dev);
2516 ++}
2517 ++EXPORT_SYMBOL_GPL(to_cxl_nvdimm);
2518 ++
2519 ++static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd)
2520 ++{
2521 ++ struct cxl_nvdimm *cxl_nvd;
2522 ++ struct device *dev;
2523 ++
2524 ++ cxl_nvd = kzalloc(sizeof(*cxl_nvd), GFP_KERNEL);
2525 ++ if (!cxl_nvd)
2526 ++ return ERR_PTR(-ENOMEM);
2527 ++
2528 ++ dev = &cxl_nvd->dev;
2529 ++ cxl_nvd->cxlmd = cxlmd;
2530 ++ device_initialize(dev);
2531 ++ device_set_pm_not_required(dev);
2532 ++ dev->parent = &cxlmd->dev;
2533 ++ dev->bus = &cxl_bus_type;
2534 ++ dev->type = &cxl_nvdimm_type;
2535 ++
2536 ++ return cxl_nvd;
2537 ++}
2538 ++
2539 ++int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd)
2540 ++{
2541 ++ struct cxl_nvdimm *cxl_nvd;
2542 ++ struct device *dev;
2543 ++ int rc;
2544 ++
2545 ++ cxl_nvd = cxl_nvdimm_alloc(cxlmd);
2546 ++ if (IS_ERR(cxl_nvd))
2547 ++ return PTR_ERR(cxl_nvd);
2548 ++
2549 ++ dev = &cxl_nvd->dev;
2550 ++ rc = dev_set_name(dev, "pmem%d", cxlmd->id);
2551 ++ if (rc)
2552 ++ goto err;
2553 ++
2554 ++ rc = device_add(dev);
2555 ++ if (rc)
2556 ++ goto err;
2557 ++
2558 ++ dev_dbg(host, "%s: register %s\n", dev_name(dev->parent),
2559 ++ dev_name(dev));
2560 ++
2561 ++ return devm_add_action_or_reset(host, unregister_dev, dev);
2562 ++
2563 ++err:
2564 ++ put_device(dev);
2565 ++ return rc;
2566 ++}
2567 ++EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm);
2568 ++
2569 ++/**
2570 ++ * cxl_probe_device_regs() - Detect CXL Device register blocks
2571 ++ * @dev: Host device of the @base mapping
2572 ++ * @base: Mapping of CXL 2.0 8.2.8 CXL Device Register Interface
2573 ++ * @map: Map object describing the register block information found
2574 ++ *
2575 ++ * Probe for device register information and return it in map object.
2576 ++ */
2577 ++void cxl_probe_device_regs(struct device *dev, void __iomem *base,
2578 ++ struct cxl_device_reg_map *map)
2579 ++{
2580 ++ int cap, cap_count;
2581 ++ u64 cap_array;
2582 ++
2583 ++ *map = (struct cxl_device_reg_map){ 0 };
2584 ++
2585 ++ cap_array = readq(base + CXLDEV_CAP_ARRAY_OFFSET);
2586 ++ if (FIELD_GET(CXLDEV_CAP_ARRAY_ID_MASK, cap_array) !=
2587 ++ CXLDEV_CAP_ARRAY_CAP_ID)
2588 ++ return;
2589 ++
2590 ++ cap_count = FIELD_GET(CXLDEV_CAP_ARRAY_COUNT_MASK, cap_array);
2591 ++
2592 ++ for (cap = 1; cap <= cap_count; cap++) {
2593 ++ u32 offset, length;
2594 ++ u16 cap_id;
2595 ++
2596 ++ cap_id = FIELD_GET(CXLDEV_CAP_HDR_CAP_ID_MASK,
2597 ++ readl(base + cap * 0x10));
2598 ++ offset = readl(base + cap * 0x10 + 0x4);
2599 ++ length = readl(base + cap * 0x10 + 0x8);
2600 ++
2601 ++ switch (cap_id) {
2602 ++ case CXLDEV_CAP_CAP_ID_DEVICE_STATUS:
2603 ++ dev_dbg(dev, "found Status capability (0x%x)\n", offset);
2604 ++
2605 ++ map->status.valid = true;
2606 ++ map->status.offset = offset;
2607 ++ map->status.size = length;
2608 ++ break;
2609 ++ case CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX:
2610 ++ dev_dbg(dev, "found Mailbox capability (0x%x)\n", offset);
2611 ++ map->mbox.valid = true;
2612 ++ map->mbox.offset = offset;
2613 ++ map->mbox.size = length;
2614 ++ break;
2615 ++ case CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX:
2616 ++ dev_dbg(dev, "found Secondary Mailbox capability (0x%x)\n", offset);
2617 ++ break;
2618 ++ case CXLDEV_CAP_CAP_ID_MEMDEV:
2619 ++ dev_dbg(dev, "found Memory Device capability (0x%x)\n", offset);
2620 ++ map->memdev.valid = true;
2621 ++ map->memdev.offset = offset;
2622 ++ map->memdev.size = length;
2623 ++ break;
2624 ++ default:
2625 ++ if (cap_id >= 0x8000)
2626 ++ dev_dbg(dev, "Vendor cap ID: %#x offset: %#x\n", cap_id, offset);
2627 ++ else
2628 ++ dev_dbg(dev, "Unknown cap ID: %#x offset: %#x\n", cap_id, offset);
2629 ++ break;
2630 ++ }
2631 ++ }
2632 ++}
2633 ++EXPORT_SYMBOL_GPL(cxl_probe_device_regs);
2634 ++
2635 ++static void __iomem *devm_cxl_iomap_block(struct device *dev,
2636 ++ resource_size_t addr,
2637 ++ resource_size_t length)
2638 ++{
2639 ++ void __iomem *ret_val;
2640 ++ struct resource *res;
2641 ++
2642 ++ res = devm_request_mem_region(dev, addr, length, dev_name(dev));
2643 ++ if (!res) {
2644 ++ resource_size_t end = addr + length - 1;
2645 ++
2646 ++ dev_err(dev, "Failed to request region %pa-%pa\n", &addr, &end);
2647 ++ return NULL;
2648 ++ }
2649 ++
2650 ++ ret_val = devm_ioremap(dev, addr, length);
2651 ++ if (!ret_val)
2652 ++ dev_err(dev, "Failed to map region %pr\n", res);
2653 ++
2654 ++ return ret_val;
2655 ++}
2656 ++
2657 ++int cxl_map_component_regs(struct pci_dev *pdev,
2658 ++ struct cxl_component_regs *regs,
2659 ++ struct cxl_register_map *map)
2660 ++{
2661 ++ struct device *dev = &pdev->dev;
2662 ++ resource_size_t phys_addr;
2663 ++ resource_size_t length;
2664 ++
2665 ++ phys_addr = pci_resource_start(pdev, map->barno);
2666 ++ phys_addr += map->block_offset;
2667 ++
2668 ++ phys_addr += map->component_map.hdm_decoder.offset;
2669 ++ length = map->component_map.hdm_decoder.size;
2670 ++ regs->hdm_decoder = devm_cxl_iomap_block(dev, phys_addr, length);
2671 ++ if (!regs->hdm_decoder)
2672 ++ return -ENOMEM;
2673 ++
2674 ++ return 0;
2675 ++}
2676 ++EXPORT_SYMBOL_GPL(cxl_map_component_regs);
2677 ++
2678 ++int cxl_map_device_regs(struct pci_dev *pdev,
2679 ++ struct cxl_device_regs *regs,
2680 ++ struct cxl_register_map *map)
2681 ++{
2682 ++ struct device *dev = &pdev->dev;
2683 ++ resource_size_t phys_addr;
2684 ++
2685 ++ phys_addr = pci_resource_start(pdev, map->barno);
2686 ++ phys_addr += map->block_offset;
2687 ++
2688 ++ if (map->device_map.status.valid) {
2689 ++ resource_size_t addr;
2690 ++ resource_size_t length;
2691 ++
2692 ++ addr = phys_addr + map->device_map.status.offset;
2693 ++ length = map->device_map.status.size;
2694 ++ regs->status = devm_cxl_iomap_block(dev, addr, length);
2695 ++ if (!regs->status)
2696 ++ return -ENOMEM;
2697 ++ }
2698 ++
2699 ++ if (map->device_map.mbox.valid) {
2700 ++ resource_size_t addr;
2701 ++ resource_size_t length;
2702 ++
2703 ++ addr = phys_addr + map->device_map.mbox.offset;
2704 ++ length = map->device_map.mbox.size;
2705 ++ regs->mbox = devm_cxl_iomap_block(dev, addr, length);
2706 ++ if (!regs->mbox)
2707 ++ return -ENOMEM;
2708 ++ }
2709 ++
2710 ++ if (map->device_map.memdev.valid) {
2711 ++ resource_size_t addr;
2712 ++ resource_size_t length;
2713 ++
2714 ++ addr = phys_addr + map->device_map.memdev.offset;
2715 ++ length = map->device_map.memdev.size;
2716 ++ regs->memdev = devm_cxl_iomap_block(dev, addr, length);
2717 ++ if (!regs->memdev)
2718 ++ return -ENOMEM;
2719 ++ }
2720 ++
2721 ++ return 0;
2722 ++}
2723 ++EXPORT_SYMBOL_GPL(cxl_map_device_regs);
2724 ++
2725 ++/**
2726 ++ * __cxl_driver_register - register a driver for the cxl bus
2727 ++ * @cxl_drv: cxl driver structure to attach
2728 ++ * @owner: owning module/driver
2729 ++ * @modname: KBUILD_MODNAME for parent driver
2730 ++ */
2731 ++int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
2732 ++ const char *modname)
2733 ++{
2734 ++ if (!cxl_drv->probe) {
2735 ++ pr_debug("%s ->probe() must be specified\n", modname);
2736 ++ return -EINVAL;
2737 ++ }
2738 ++
2739 ++ if (!cxl_drv->name) {
2740 ++ pr_debug("%s ->name must be specified\n", modname);
2741 ++ return -EINVAL;
2742 ++ }
2743 ++
2744 ++ if (!cxl_drv->id) {
2745 ++ pr_debug("%s ->id must be specified\n", modname);
2746 ++ return -EINVAL;
2747 ++ }
2748 ++
2749 ++ cxl_drv->drv.bus = &cxl_bus_type;
2750 ++ cxl_drv->drv.owner = owner;
2751 ++ cxl_drv->drv.mod_name = modname;
2752 ++ cxl_drv->drv.name = cxl_drv->name;
2753 ++
2754 ++ return driver_register(&cxl_drv->drv);
2755 ++}
2756 ++EXPORT_SYMBOL_GPL(__cxl_driver_register);
2757 ++
2758 ++void cxl_driver_unregister(struct cxl_driver *cxl_drv)
2759 ++{
2760 ++ driver_unregister(&cxl_drv->drv);
2761 ++}
2762 ++EXPORT_SYMBOL_GPL(cxl_driver_unregister);
2763 ++
2764 ++static int cxl_device_id(struct device *dev)
2765 ++{
2766 ++ if (dev->type == &cxl_nvdimm_bridge_type)
2767 ++ return CXL_DEVICE_NVDIMM_BRIDGE;
2768 ++ if (dev->type == &cxl_nvdimm_type)
2769 ++ return CXL_DEVICE_NVDIMM;
2770 ++ return 0;
2771 ++}
2772 ++
2773 ++static int cxl_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
2774 ++{
2775 ++ return add_uevent_var(env, "MODALIAS=" CXL_MODALIAS_FMT,
2776 ++ cxl_device_id(dev));
2777 ++}
2778 ++
2779 ++static int cxl_bus_match(struct device *dev, struct device_driver *drv)
2780 ++{
2781 ++ return cxl_device_id(dev) == to_cxl_drv(drv)->id;
2782 ++}
2783 ++
2784 ++static int cxl_bus_probe(struct device *dev)
2785 ++{
2786 ++ return to_cxl_drv(dev->driver)->probe(dev);
2787 ++}
2788 ++
2789 ++static int cxl_bus_remove(struct device *dev)
2790 ++{
2791 ++ struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver);
2792 ++
2793 ++ if (cxl_drv->remove)
2794 ++ cxl_drv->remove(dev);
2795 ++ return 0;
2796 ++}
2797 ++
2798 ++struct bus_type cxl_bus_type = {
2799 ++ .name = "cxl",
2800 ++ .uevent = cxl_bus_uevent,
2801 ++ .match = cxl_bus_match,
2802 ++ .probe = cxl_bus_probe,
2803 ++ .remove = cxl_bus_remove,
2804 ++};
2805 ++EXPORT_SYMBOL_GPL(cxl_bus_type);
2806 ++
2807 ++static __init int cxl_core_init(void)
2808 ++{
2809 ++ return bus_register(&cxl_bus_type);
2810 ++}
2811 ++
2812 ++static void cxl_core_exit(void)
2813 ++{
2814 ++ bus_unregister(&cxl_bus_type);
2815 ++}
2816 ++
2817 ++module_init(cxl_core_init);
2818 ++module_exit(cxl_core_exit);
2819 ++MODULE_LICENSE("GPL v2");
2820 +diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
2821 +new file mode 100644
2822 +index 0000000000000..0cd463de13423
2823 +--- /dev/null
2824 ++++ b/drivers/cxl/cxlmem.h
2825 +@@ -0,0 +1,96 @@
2826 ++/* SPDX-License-Identifier: GPL-2.0-only */
2827 ++/* Copyright(c) 2020-2021 Intel Corporation. */
2828 ++#ifndef __CXL_MEM_H__
2829 ++#define __CXL_MEM_H__
2830 ++#include <linux/cdev.h>
2831 ++#include "cxl.h"
2832 ++
2833 ++/* CXL 2.0 8.2.8.5.1.1 Memory Device Status Register */
2834 ++#define CXLMDEV_STATUS_OFFSET 0x0
2835 ++#define CXLMDEV_DEV_FATAL BIT(0)
2836 ++#define CXLMDEV_FW_HALT BIT(1)
2837 ++#define CXLMDEV_STATUS_MEDIA_STATUS_MASK GENMASK(3, 2)
2838 ++#define CXLMDEV_MS_NOT_READY 0
2839 ++#define CXLMDEV_MS_READY 1
2840 ++#define CXLMDEV_MS_ERROR 2
2841 ++#define CXLMDEV_MS_DISABLED 3
2842 ++#define CXLMDEV_READY(status) \
2843 ++ (FIELD_GET(CXLMDEV_STATUS_MEDIA_STATUS_MASK, status) == \
2844 ++ CXLMDEV_MS_READY)
2845 ++#define CXLMDEV_MBOX_IF_READY BIT(4)
2846 ++#define CXLMDEV_RESET_NEEDED_MASK GENMASK(7, 5)
2847 ++#define CXLMDEV_RESET_NEEDED_NOT 0
2848 ++#define CXLMDEV_RESET_NEEDED_COLD 1
2849 ++#define CXLMDEV_RESET_NEEDED_WARM 2
2850 ++#define CXLMDEV_RESET_NEEDED_HOT 3
2851 ++#define CXLMDEV_RESET_NEEDED_CXL 4
2852 ++#define CXLMDEV_RESET_NEEDED(status) \
2853 ++ (FIELD_GET(CXLMDEV_RESET_NEEDED_MASK, status) != \
2854 ++ CXLMDEV_RESET_NEEDED_NOT)
2855 ++
2856 ++/*
2857 ++ * An entire PCI topology full of devices should be enough for any
2858 ++ * config
2859 ++ */
2860 ++#define CXL_MEM_MAX_DEVS 65536
2861 ++
2862 ++/**
2863 ++ * struct cdevm_file_operations - devm coordinated cdev file operations
2864 ++ * @fops: file operations that are synchronized against @shutdown
2865 ++ * @shutdown: disconnect driver data
2866 ++ *
2867 ++ * @shutdown is invoked in the devres release path to disconnect any
2868 ++ * driver instance data from @dev. It assumes synchronization with any
2869 ++ * fops operation that requires driver data. After @shutdown an
2870 ++ * operation may only reference @device data.
2871 ++ */
2872 ++struct cdevm_file_operations {
2873 ++ struct file_operations fops;
2874 ++ void (*shutdown)(struct device *dev);
2875 ++};
2876 ++
2877 ++/**
2878 ++ * struct cxl_memdev - CXL bus object representing a Type-3 Memory Device
2879 ++ * @dev: driver core device object
2880 ++ * @cdev: char dev core object for ioctl operations
2881 ++ * @cxlm: pointer to the parent device driver data
2882 ++ * @id: id number of this memdev instance.
2883 ++ */
2884 ++struct cxl_memdev {
2885 ++ struct device dev;
2886 ++ struct cdev cdev;
2887 ++ struct cxl_mem *cxlm;
2888 ++ int id;
2889 ++};
2890 ++
2891 ++/**
2892 ++ * struct cxl_mem - A CXL memory device
2893 ++ * @pdev: The PCI device associated with this CXL device.
2894 ++ * @cxlmd: Logical memory device chardev / interface
2895 ++ * @regs: Parsed register blocks
2896 ++ * @payload_size: Size of space for payload
2897 ++ * (CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register)
2898 ++ * @lsa_size: Size of Label Storage Area
2899 ++ * (CXL 2.0 8.2.9.5.1.1 Identify Memory Device)
2900 ++ * @mbox_mutex: Mutex to synchronize mailbox access.
2901 ++ * @firmware_version: Firmware version for the memory device.
2902 ++ * @enabled_cmds: Hardware commands found enabled in CEL.
2903 ++ * @pmem_range: Persistent memory capacity information.
2904 ++ * @ram_range: Volatile memory capacity information.
2905 ++ */
2906 ++struct cxl_mem {
2907 ++ struct pci_dev *pdev;
2908 ++ struct cxl_memdev *cxlmd;
2909 ++
2910 ++ struct cxl_regs regs;
2911 ++
2912 ++ size_t payload_size;
2913 ++ size_t lsa_size;
2914 ++ struct mutex mbox_mutex; /* Protects device mailbox and firmware */
2915 ++ char firmware_version[0x10];
2916 ++ unsigned long *enabled_cmds;
2917 ++
2918 ++ struct range pmem_range;
2919 ++ struct range ram_range;
2920 ++};
2921 ++#endif /* __CXL_MEM_H__ */
2922 +diff --git a/drivers/cxl/mem.h b/drivers/cxl/mem.h
2923 +deleted file mode 100644
2924 +index 8f02d02b26b45..0000000000000
2925 +--- a/drivers/cxl/mem.h
2926 ++++ /dev/null
2927 +@@ -1,81 +0,0 @@
2928 +-/* SPDX-License-Identifier: GPL-2.0-only */
2929 +-/* Copyright(c) 2020-2021 Intel Corporation. */
2930 +-#ifndef __CXL_MEM_H__
2931 +-#define __CXL_MEM_H__
2932 +-#include <linux/cdev.h>
2933 +-#include "cxl.h"
2934 +-
2935 +-/* CXL 2.0 8.2.8.5.1.1 Memory Device Status Register */
2936 +-#define CXLMDEV_STATUS_OFFSET 0x0
2937 +-#define CXLMDEV_DEV_FATAL BIT(0)
2938 +-#define CXLMDEV_FW_HALT BIT(1)
2939 +-#define CXLMDEV_STATUS_MEDIA_STATUS_MASK GENMASK(3, 2)
2940 +-#define CXLMDEV_MS_NOT_READY 0
2941 +-#define CXLMDEV_MS_READY 1
2942 +-#define CXLMDEV_MS_ERROR 2
2943 +-#define CXLMDEV_MS_DISABLED 3
2944 +-#define CXLMDEV_READY(status) \
2945 +- (FIELD_GET(CXLMDEV_STATUS_MEDIA_STATUS_MASK, status) == \
2946 +- CXLMDEV_MS_READY)
2947 +-#define CXLMDEV_MBOX_IF_READY BIT(4)
2948 +-#define CXLMDEV_RESET_NEEDED_MASK GENMASK(7, 5)
2949 +-#define CXLMDEV_RESET_NEEDED_NOT 0
2950 +-#define CXLMDEV_RESET_NEEDED_COLD 1
2951 +-#define CXLMDEV_RESET_NEEDED_WARM 2
2952 +-#define CXLMDEV_RESET_NEEDED_HOT 3
2953 +-#define CXLMDEV_RESET_NEEDED_CXL 4
2954 +-#define CXLMDEV_RESET_NEEDED(status) \
2955 +- (FIELD_GET(CXLMDEV_RESET_NEEDED_MASK, status) != \
2956 +- CXLMDEV_RESET_NEEDED_NOT)
2957 +-
2958 +-/*
2959 +- * An entire PCI topology full of devices should be enough for any
2960 +- * config
2961 +- */
2962 +-#define CXL_MEM_MAX_DEVS 65536
2963 +-
2964 +-/**
2965 +- * struct cxl_memdev - CXL bus object representing a Type-3 Memory Device
2966 +- * @dev: driver core device object
2967 +- * @cdev: char dev core object for ioctl operations
2968 +- * @cxlm: pointer to the parent device driver data
2969 +- * @id: id number of this memdev instance.
2970 +- */
2971 +-struct cxl_memdev {
2972 +- struct device dev;
2973 +- struct cdev cdev;
2974 +- struct cxl_mem *cxlm;
2975 +- int id;
2976 +-};
2977 +-
2978 +-/**
2979 +- * struct cxl_mem - A CXL memory device
2980 +- * @pdev: The PCI device associated with this CXL device.
2981 +- * @cxlmd: Logical memory device chardev / interface
2982 +- * @regs: Parsed register blocks
2983 +- * @payload_size: Size of space for payload
2984 +- * (CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register)
2985 +- * @lsa_size: Size of Label Storage Area
2986 +- * (CXL 2.0 8.2.9.5.1.1 Identify Memory Device)
2987 +- * @mbox_mutex: Mutex to synchronize mailbox access.
2988 +- * @firmware_version: Firmware version for the memory device.
2989 +- * @enabled_cmds: Hardware commands found enabled in CEL.
2990 +- * @pmem_range: Persistent memory capacity information.
2991 +- * @ram_range: Volatile memory capacity information.
2992 +- */
2993 +-struct cxl_mem {
2994 +- struct pci_dev *pdev;
2995 +- struct cxl_memdev *cxlmd;
2996 +-
2997 +- struct cxl_regs regs;
2998 +-
2999 +- size_t payload_size;
3000 +- size_t lsa_size;
3001 +- struct mutex mbox_mutex; /* Protects device mailbox and firmware */
3002 +- char firmware_version[0x10];
3003 +- unsigned long *enabled_cmds;
3004 +-
3005 +- struct range pmem_range;
3006 +- struct range ram_range;
3007 +-};
3008 +-#endif /* __CXL_MEM_H__ */
3009 +diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
3010 +index 145ad4bc305fc..e809596049b66 100644
3011 +--- a/drivers/cxl/pci.c
3012 ++++ b/drivers/cxl/pci.c
3013 +@@ -12,9 +12,9 @@
3014 + #include <linux/pci.h>
3015 + #include <linux/io.h>
3016 + #include <linux/io-64-nonatomic-lo-hi.h>
3017 ++#include "cxlmem.h"
3018 + #include "pci.h"
3019 + #include "cxl.h"
3020 +-#include "mem.h"
3021 +
3022 + /**
3023 + * DOC: cxl pci
3024 +@@ -806,13 +806,30 @@ static int cxl_memdev_release_file(struct inode *inode, struct file *file)
3025 + return 0;
3026 + }
3027 +
3028 +-static const struct file_operations cxl_memdev_fops = {
3029 +- .owner = THIS_MODULE,
3030 +- .unlocked_ioctl = cxl_memdev_ioctl,
3031 +- .open = cxl_memdev_open,
3032 +- .release = cxl_memdev_release_file,
3033 +- .compat_ioctl = compat_ptr_ioctl,
3034 +- .llseek = noop_llseek,
3035 ++static struct cxl_memdev *to_cxl_memdev(struct device *dev)
3036 ++{
3037 ++ return container_of(dev, struct cxl_memdev, dev);
3038 ++}
3039 ++
3040 ++static void cxl_memdev_shutdown(struct device *dev)
3041 ++{
3042 ++ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
3043 ++
3044 ++ down_write(&cxl_memdev_rwsem);
3045 ++ cxlmd->cxlm = NULL;
3046 ++ up_write(&cxl_memdev_rwsem);
3047 ++}
3048 ++
3049 ++static const struct cdevm_file_operations cxl_memdev_fops = {
3050 ++ .fops = {
3051 ++ .owner = THIS_MODULE,
3052 ++ .unlocked_ioctl = cxl_memdev_ioctl,
3053 ++ .open = cxl_memdev_open,
3054 ++ .release = cxl_memdev_release_file,
3055 ++ .compat_ioctl = compat_ptr_ioctl,
3056 ++ .llseek = noop_llseek,
3057 ++ },
3058 ++ .shutdown = cxl_memdev_shutdown,
3059 + };
3060 +
3061 + static inline struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
3062 +@@ -1161,11 +1178,6 @@ free_maps:
3063 + return ret;
3064 + }
3065 +
3066 +-static struct cxl_memdev *to_cxl_memdev(struct device *dev)
3067 +-{
3068 +- return container_of(dev, struct cxl_memdev, dev);
3069 +-}
3070 +-
3071 + static void cxl_memdev_release(struct device *dev)
3072 + {
3073 + struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
3074 +@@ -1281,24 +1293,22 @@ static const struct device_type cxl_memdev_type = {
3075 + .groups = cxl_memdev_attribute_groups,
3076 + };
3077 +
3078 +-static void cxl_memdev_shutdown(struct cxl_memdev *cxlmd)
3079 +-{
3080 +- down_write(&cxl_memdev_rwsem);
3081 +- cxlmd->cxlm = NULL;
3082 +- up_write(&cxl_memdev_rwsem);
3083 +-}
3084 +-
3085 + static void cxl_memdev_unregister(void *_cxlmd)
3086 + {
3087 + struct cxl_memdev *cxlmd = _cxlmd;
3088 + struct device *dev = &cxlmd->dev;
3089 ++ struct cdev *cdev = &cxlmd->cdev;
3090 ++ const struct cdevm_file_operations *cdevm_fops;
3091 ++
3092 ++ cdevm_fops = container_of(cdev->ops, typeof(*cdevm_fops), fops);
3093 ++ cdevm_fops->shutdown(dev);
3094 +
3095 + cdev_device_del(&cxlmd->cdev, dev);
3096 +- cxl_memdev_shutdown(cxlmd);
3097 + put_device(dev);
3098 + }
3099 +
3100 +-static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm)
3101 ++static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm,
3102 ++ const struct file_operations *fops)
3103 + {
3104 + struct pci_dev *pdev = cxlm->pdev;
3105 + struct cxl_memdev *cxlmd;
3106 +@@ -1324,7 +1334,7 @@ static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm)
3107 + device_set_pm_not_required(dev);
3108 +
3109 + cdev = &cxlmd->cdev;
3110 +- cdev_init(cdev, &cxl_memdev_fops);
3111 ++ cdev_init(cdev, fops);
3112 + return cxlmd;
3113 +
3114 + err:
3115 +@@ -1332,15 +1342,16 @@ err:
3116 + return ERR_PTR(rc);
3117 + }
3118 +
3119 +-static struct cxl_memdev *devm_cxl_add_memdev(struct device *host,
3120 +- struct cxl_mem *cxlm)
3121 ++static struct cxl_memdev *
3122 ++devm_cxl_add_memdev(struct device *host, struct cxl_mem *cxlm,
3123 ++ const struct cdevm_file_operations *cdevm_fops)
3124 + {
3125 + struct cxl_memdev *cxlmd;
3126 + struct device *dev;
3127 + struct cdev *cdev;
3128 + int rc;
3129 +
3130 +- cxlmd = cxl_memdev_alloc(cxlm);
3131 ++ cxlmd = cxl_memdev_alloc(cxlm, &cdevm_fops->fops);
3132 + if (IS_ERR(cxlmd))
3133 + return cxlmd;
3134 +
3135 +@@ -1370,7 +1381,7 @@ err:
3136 + * The cdev was briefly live, shutdown any ioctl operations that
3137 + * saw that state.
3138 + */
3139 +- cxl_memdev_shutdown(cxlmd);
3140 ++ cdevm_fops->shutdown(dev);
3141 + put_device(dev);
3142 + return ERR_PTR(rc);
3143 + }
3144 +@@ -1611,7 +1622,7 @@ static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3145 + if (rc)
3146 + return rc;
3147 +
3148 +- cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlm);
3149 ++ cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlm, &cxl_memdev_fops);
3150 + if (IS_ERR(cxlmd))
3151 + return PTR_ERR(cxlmd);
3152 +
3153 +diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
3154 +index 0088e41dd2f32..9652c3ee41e7f 100644
3155 +--- a/drivers/cxl/pmem.c
3156 ++++ b/drivers/cxl/pmem.c
3157 +@@ -6,7 +6,7 @@
3158 + #include <linux/ndctl.h>
3159 + #include <linux/async.h>
3160 + #include <linux/slab.h>
3161 +-#include "mem.h"
3162 ++#include "cxlmem.h"
3163 + #include "cxl.h"
3164 +
3165 + /*
3166 +diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
3167 +index 4e16c71c24b71..6eb4d13f426ee 100644
3168 +--- a/drivers/dma-buf/Kconfig
3169 ++++ b/drivers/dma-buf/Kconfig
3170 +@@ -42,6 +42,7 @@ config UDMABUF
3171 + config DMABUF_MOVE_NOTIFY
3172 + bool "Move notify between drivers (EXPERIMENTAL)"
3173 + default n
3174 ++ depends on DMA_SHARED_BUFFER
3175 + help
3176 + Don't pin buffers if the dynamic DMA-buf interface is available on
3177 + both the exporter as well as the importer. This fixes a security
3178 +@@ -52,6 +53,7 @@ config DMABUF_MOVE_NOTIFY
3179 +
3180 + config DMABUF_DEBUG
3181 + bool "DMA-BUF debug checks"
3182 ++ depends on DMA_SHARED_BUFFER
3183 + default y if DMA_API_DEBUG
3184 + help
3185 + This option enables additional checks for DMA-BUF importers and
3186 +diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
3187 +index 39b5b46e880f2..4f70cf57471a9 100644
3188 +--- a/drivers/dma/Kconfig
3189 ++++ b/drivers/dma/Kconfig
3190 +@@ -279,7 +279,7 @@ config INTEL_IDMA64
3191 +
3192 + config INTEL_IDXD
3193 + tristate "Intel Data Accelerators support"
3194 +- depends on PCI && X86_64
3195 ++ depends on PCI && X86_64 && !UML
3196 + depends on PCI_MSI
3197 + depends on SBITMAP
3198 + select DMA_ENGINE
3199 +@@ -315,7 +315,7 @@ config INTEL_IDXD_PERFMON
3200 +
3201 + config INTEL_IOATDMA
3202 + tristate "Intel I/OAT DMA support"
3203 +- depends on PCI && X86_64
3204 ++ depends on PCI && X86_64 && !UML
3205 + select DMA_ENGINE
3206 + select DMA_ENGINE_RAID
3207 + select DCA
3208 +diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
3209 +index 235f1396f9686..52768dc8ce124 100644
3210 +--- a/drivers/dma/acpi-dma.c
3211 ++++ b/drivers/dma/acpi-dma.c
3212 +@@ -70,10 +70,14 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
3213 +
3214 + si = (const struct acpi_csrt_shared_info *)&grp[1];
3215 +
3216 +- /* Match device by MMIO and IRQ */
3217 ++ /* Match device by MMIO */
3218 + if (si->mmio_base_low != lower_32_bits(mem) ||
3219 +- si->mmio_base_high != upper_32_bits(mem) ||
3220 +- si->gsi_interrupt != irq)
3221 ++ si->mmio_base_high != upper_32_bits(mem))
3222 ++ return 0;
3223 ++
3224 ++ /* Match device by Linux vIRQ */
3225 ++ ret = acpi_register_gsi(NULL, si->gsi_interrupt, si->interrupt_mode, si->interrupt_polarity);
3226 ++ if (ret != irq)
3227 + return 0;
3228 +
3229 + dev_dbg(&adev->dev, "matches with %.4s%04X (rev %u)\n",
3230 +diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
3231 +index 420b93fe5febc..9c6760ae5aef2 100644
3232 +--- a/drivers/dma/idxd/device.c
3233 ++++ b/drivers/dma/idxd/device.c
3234 +@@ -15,6 +15,8 @@
3235 +
3236 + static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
3237 + u32 *status);
3238 ++static void idxd_device_wqs_clear_state(struct idxd_device *idxd);
3239 ++static void idxd_wq_disable_cleanup(struct idxd_wq *wq);
3240 +
3241 + /* Interrupt control bits */
3242 + void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
3243 +@@ -234,7 +236,7 @@ int idxd_wq_enable(struct idxd_wq *wq)
3244 + return 0;
3245 + }
3246 +
3247 +-int idxd_wq_disable(struct idxd_wq *wq)
3248 ++int idxd_wq_disable(struct idxd_wq *wq, bool reset_config)
3249 + {
3250 + struct idxd_device *idxd = wq->idxd;
3251 + struct device *dev = &idxd->pdev->dev;
3252 +@@ -255,6 +257,8 @@ int idxd_wq_disable(struct idxd_wq *wq)
3253 + return -ENXIO;
3254 + }
3255 +
3256 ++ if (reset_config)
3257 ++ idxd_wq_disable_cleanup(wq);
3258 + wq->state = IDXD_WQ_DISABLED;
3259 + dev_dbg(dev, "WQ %d disabled\n", wq->id);
3260 + return 0;
3261 +@@ -289,6 +293,7 @@ void idxd_wq_reset(struct idxd_wq *wq)
3262 +
3263 + operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
3264 + idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
3265 ++ idxd_wq_disable_cleanup(wq);
3266 + wq->state = IDXD_WQ_DISABLED;
3267 + }
3268 +
3269 +@@ -337,7 +342,7 @@ int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid)
3270 + unsigned int offset;
3271 + unsigned long flags;
3272 +
3273 +- rc = idxd_wq_disable(wq);
3274 ++ rc = idxd_wq_disable(wq, false);
3275 + if (rc < 0)
3276 + return rc;
3277 +
3278 +@@ -364,7 +369,7 @@ int idxd_wq_disable_pasid(struct idxd_wq *wq)
3279 + unsigned int offset;
3280 + unsigned long flags;
3281 +
3282 +- rc = idxd_wq_disable(wq);
3283 ++ rc = idxd_wq_disable(wq, false);
3284 + if (rc < 0)
3285 + return rc;
3286 +
3287 +@@ -383,11 +388,11 @@ int idxd_wq_disable_pasid(struct idxd_wq *wq)
3288 + return 0;
3289 + }
3290 +
3291 +-void idxd_wq_disable_cleanup(struct idxd_wq *wq)
3292 ++static void idxd_wq_disable_cleanup(struct idxd_wq *wq)
3293 + {
3294 + struct idxd_device *idxd = wq->idxd;
3295 +
3296 +- lockdep_assert_held(&idxd->dev_lock);
3297 ++ lockdep_assert_held(&wq->wq_lock);
3298 + memset(wq->wqcfg, 0, idxd->wqcfg_size);
3299 + wq->type = IDXD_WQT_NONE;
3300 + wq->size = 0;
3301 +@@ -396,6 +401,7 @@ void idxd_wq_disable_cleanup(struct idxd_wq *wq)
3302 + wq->priority = 0;
3303 + wq->ats_dis = 0;
3304 + clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
3305 ++ clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
3306 + memset(wq->name, 0, WQ_NAME_SIZE);
3307 + }
3308 +
3309 +@@ -481,6 +487,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
3310 + union idxd_command_reg cmd;
3311 + DECLARE_COMPLETION_ONSTACK(done);
3312 + unsigned long flags;
3313 ++ u32 stat;
3314 +
3315 + if (idxd_device_is_halted(idxd)) {
3316 + dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
3317 +@@ -513,11 +520,11 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
3318 + */
3319 + spin_unlock_irqrestore(&idxd->cmd_lock, flags);
3320 + wait_for_completion(&done);
3321 ++ stat = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
3322 + spin_lock_irqsave(&idxd->cmd_lock, flags);
3323 +- if (status) {
3324 +- *status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
3325 +- idxd->cmd_status = *status & GENMASK(7, 0);
3326 +- }
3327 ++ if (status)
3328 ++ *status = stat;
3329 ++ idxd->cmd_status = stat & GENMASK(7, 0);
3330 +
3331 + __clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
3332 + /* Wake up other pending commands */
3333 +@@ -548,22 +555,6 @@ int idxd_device_enable(struct idxd_device *idxd)
3334 + return 0;
3335 + }
3336 +
3337 +-void idxd_device_wqs_clear_state(struct idxd_device *idxd)
3338 +-{
3339 +- int i;
3340 +-
3341 +- lockdep_assert_held(&idxd->dev_lock);
3342 +-
3343 +- for (i = 0; i < idxd->max_wqs; i++) {
3344 +- struct idxd_wq *wq = idxd->wqs[i];
3345 +-
3346 +- if (wq->state == IDXD_WQ_ENABLED) {
3347 +- idxd_wq_disable_cleanup(wq);
3348 +- wq->state = IDXD_WQ_DISABLED;
3349 +- }
3350 +- }
3351 +-}
3352 +-
3353 + int idxd_device_disable(struct idxd_device *idxd)
3354 + {
3355 + struct device *dev = &idxd->pdev->dev;
3356 +@@ -585,7 +576,7 @@ int idxd_device_disable(struct idxd_device *idxd)
3357 + }
3358 +
3359 + spin_lock_irqsave(&idxd->dev_lock, flags);
3360 +- idxd_device_wqs_clear_state(idxd);
3361 ++ idxd_device_clear_state(idxd);
3362 + idxd->state = IDXD_DEV_CONF_READY;
3363 + spin_unlock_irqrestore(&idxd->dev_lock, flags);
3364 + return 0;
3365 +@@ -597,7 +588,7 @@ void idxd_device_reset(struct idxd_device *idxd)
3366 +
3367 + idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
3368 + spin_lock_irqsave(&idxd->dev_lock, flags);
3369 +- idxd_device_wqs_clear_state(idxd);
3370 ++ idxd_device_clear_state(idxd);
3371 + idxd->state = IDXD_DEV_CONF_READY;
3372 + spin_unlock_irqrestore(&idxd->dev_lock, flags);
3373 + }
3374 +@@ -685,6 +676,59 @@ int idxd_device_release_int_handle(struct idxd_device *idxd, int handle,
3375 + }
3376 +
3377 + /* Device configuration bits */
3378 ++static void idxd_engines_clear_state(struct idxd_device *idxd)
3379 ++{
3380 ++ struct idxd_engine *engine;
3381 ++ int i;
3382 ++
3383 ++ lockdep_assert_held(&idxd->dev_lock);
3384 ++ for (i = 0; i < idxd->max_engines; i++) {
3385 ++ engine = idxd->engines[i];
3386 ++ engine->group = NULL;
3387 ++ }
3388 ++}
3389 ++
3390 ++static void idxd_groups_clear_state(struct idxd_device *idxd)
3391 ++{
3392 ++ struct idxd_group *group;
3393 ++ int i;
3394 ++
3395 ++ lockdep_assert_held(&idxd->dev_lock);
3396 ++ for (i = 0; i < idxd->max_groups; i++) {
3397 ++ group = idxd->groups[i];
3398 ++ memset(&group->grpcfg, 0, sizeof(group->grpcfg));
3399 ++ group->num_engines = 0;
3400 ++ group->num_wqs = 0;
3401 ++ group->use_token_limit = false;
3402 ++ group->tokens_allowed = 0;
3403 ++ group->tokens_reserved = 0;
3404 ++ group->tc_a = -1;
3405 ++ group->tc_b = -1;
3406 ++ }
3407 ++}
3408 ++
3409 ++static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
3410 ++{
3411 ++ int i;
3412 ++
3413 ++ lockdep_assert_held(&idxd->dev_lock);
3414 ++ for (i = 0; i < idxd->max_wqs; i++) {
3415 ++ struct idxd_wq *wq = idxd->wqs[i];
3416 ++
3417 ++ if (wq->state == IDXD_WQ_ENABLED) {
3418 ++ idxd_wq_disable_cleanup(wq);
3419 ++ wq->state = IDXD_WQ_DISABLED;
3420 ++ }
3421 ++ }
3422 ++}
3423 ++
3424 ++void idxd_device_clear_state(struct idxd_device *idxd)
3425 ++{
3426 ++ idxd_groups_clear_state(idxd);
3427 ++ idxd_engines_clear_state(idxd);
3428 ++ idxd_device_wqs_clear_state(idxd);
3429 ++}
3430 ++
3431 + void idxd_msix_perm_setup(struct idxd_device *idxd)
3432 + {
3433 + union msix_perm mperm;
3434 +diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
3435 +index fc708be7ad9a2..0f27374eae4b3 100644
3436 +--- a/drivers/dma/idxd/idxd.h
3437 ++++ b/drivers/dma/idxd/idxd.h
3438 +@@ -428,9 +428,8 @@ int idxd_device_init_reset(struct idxd_device *idxd);
3439 + int idxd_device_enable(struct idxd_device *idxd);
3440 + int idxd_device_disable(struct idxd_device *idxd);
3441 + void idxd_device_reset(struct idxd_device *idxd);
3442 +-void idxd_device_cleanup(struct idxd_device *idxd);
3443 ++void idxd_device_clear_state(struct idxd_device *idxd);
3444 + int idxd_device_config(struct idxd_device *idxd);
3445 +-void idxd_device_wqs_clear_state(struct idxd_device *idxd);
3446 + void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid);
3447 + int idxd_device_load_config(struct idxd_device *idxd);
3448 + int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle,
3449 +@@ -443,12 +442,11 @@ void idxd_wqs_unmap_portal(struct idxd_device *idxd);
3450 + int idxd_wq_alloc_resources(struct idxd_wq *wq);
3451 + void idxd_wq_free_resources(struct idxd_wq *wq);
3452 + int idxd_wq_enable(struct idxd_wq *wq);
3453 +-int idxd_wq_disable(struct idxd_wq *wq);
3454 ++int idxd_wq_disable(struct idxd_wq *wq, bool reset_config);
3455 + void idxd_wq_drain(struct idxd_wq *wq);
3456 + void idxd_wq_reset(struct idxd_wq *wq);
3457 + int idxd_wq_map_portal(struct idxd_wq *wq);
3458 + void idxd_wq_unmap_portal(struct idxd_wq *wq);
3459 +-void idxd_wq_disable_cleanup(struct idxd_wq *wq);
3460 + int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid);
3461 + int idxd_wq_disable_pasid(struct idxd_wq *wq);
3462 + void idxd_wq_quiesce(struct idxd_wq *wq);
3463 +diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
3464 +index 4e3a7198c0caf..ba839d3569cdf 100644
3465 +--- a/drivers/dma/idxd/irq.c
3466 ++++ b/drivers/dma/idxd/irq.c
3467 +@@ -59,7 +59,7 @@ static void idxd_device_reinit(struct work_struct *work)
3468 + return;
3469 +
3470 + out:
3471 +- idxd_device_wqs_clear_state(idxd);
3472 ++ idxd_device_clear_state(idxd);
3473 + }
3474 +
3475 + static void idxd_device_fault_work(struct work_struct *work)
3476 +@@ -192,7 +192,7 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
3477 + spin_lock_bh(&idxd->dev_lock);
3478 + idxd_wqs_quiesce(idxd);
3479 + idxd_wqs_unmap_portal(idxd);
3480 +- idxd_device_wqs_clear_state(idxd);
3481 ++ idxd_device_clear_state(idxd);
3482 + dev_err(&idxd->pdev->dev,
3483 + "idxd halted, need %s.\n",
3484 + gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
3485 +@@ -269,7 +269,11 @@ static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
3486 + u8 status = desc->completion->status & DSA_COMP_STATUS_MASK;
3487 +
3488 + if (status) {
3489 +- if (unlikely(status == IDXD_COMP_DESC_ABORT)) {
3490 ++ /*
3491 ++ * Check against the original status as ABORT is software defined
3492 ++ * and 0xff, which DSA_COMP_STATUS_MASK can mask out.
3493 ++ */
3494 ++ if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
3495 + complete_desc(desc, IDXD_COMPLETE_ABORT);
3496 + (*processed)++;
3497 + continue;
3498 +@@ -333,7 +337,11 @@ static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
3499 + list_for_each_entry(desc, &flist, list) {
3500 + u8 status = desc->completion->status & DSA_COMP_STATUS_MASK;
3501 +
3502 +- if (unlikely(status == IDXD_COMP_DESC_ABORT)) {
3503 ++ /*
3504 ++ * Check against the original status as ABORT is software defined
3505 ++ * and 0xff, which DSA_COMP_STATUS_MASK can mask out.
3506 ++ */
3507 ++ if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
3508 + complete_desc(desc, IDXD_COMPLETE_ABORT);
3509 + continue;
3510 + }
3511 +diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
3512 +index 36c9c1a89b7e7..196d6cf119656 100644
3513 +--- a/drivers/dma/idxd/submit.c
3514 ++++ b/drivers/dma/idxd/submit.c
3515 +@@ -67,7 +67,7 @@ struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype)
3516 + if (signal_pending_state(TASK_INTERRUPTIBLE, current))
3517 + break;
3518 + idx = sbitmap_queue_get(sbq, &cpu);
3519 +- if (idx > 0)
3520 ++ if (idx >= 0)
3521 + break;
3522 + schedule();
3523 + }
3524 +diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
3525 +index bb4df63906a72..528cde54724b4 100644
3526 +--- a/drivers/dma/idxd/sysfs.c
3527 ++++ b/drivers/dma/idxd/sysfs.c
3528 +@@ -129,7 +129,7 @@ static int enable_wq(struct idxd_wq *wq)
3529 + rc = idxd_wq_map_portal(wq);
3530 + if (rc < 0) {
3531 + dev_warn(dev, "wq portal mapping failed: %d\n", rc);
3532 +- rc = idxd_wq_disable(wq);
3533 ++ rc = idxd_wq_disable(wq, false);
3534 + if (rc < 0)
3535 + dev_warn(dev, "IDXD wq disable failed\n");
3536 + mutex_unlock(&wq->wq_lock);
3537 +@@ -262,8 +262,6 @@ static void disable_wq(struct idxd_wq *wq)
3538 +
3539 + static int idxd_config_bus_remove(struct device *dev)
3540 + {
3541 +- int rc;
3542 +-
3543 + dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev));
3544 +
3545 + /* disable workqueue here */
3546 +@@ -288,22 +286,12 @@ static int idxd_config_bus_remove(struct device *dev)
3547 + }
3548 +
3549 + idxd_unregister_dma_device(idxd);
3550 +- rc = idxd_device_disable(idxd);
3551 +- if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
3552 +- for (i = 0; i < idxd->max_wqs; i++) {
3553 +- struct idxd_wq *wq = idxd->wqs[i];
3554 +-
3555 +- mutex_lock(&wq->wq_lock);
3556 +- idxd_wq_disable_cleanup(wq);
3557 +- mutex_unlock(&wq->wq_lock);
3558 +- }
3559 +- }
3560 ++ idxd_device_disable(idxd);
3561 ++ if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
3562 ++ idxd_device_reset(idxd);
3563 + module_put(THIS_MODULE);
3564 +- if (rc < 0)
3565 +- dev_warn(dev, "Device disable failed\n");
3566 +- else
3567 +- dev_info(dev, "Device %s disabled\n", dev_name(dev));
3568 +
3569 ++ dev_info(dev, "Device %s disabled\n", dev_name(dev));
3570 + }
3571 +
3572 + return 0;
3573 +diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
3574 +index 0ef5ca81ba4d0..4357d2395e6b7 100644
3575 +--- a/drivers/dma/sprd-dma.c
3576 ++++ b/drivers/dma/sprd-dma.c
3577 +@@ -1265,6 +1265,7 @@ static const struct of_device_id sprd_dma_match[] = {
3578 + { .compatible = "sprd,sc9860-dma", },
3579 + {},
3580 + };
3581 ++MODULE_DEVICE_TABLE(of, sprd_dma_match);
3582 +
3583 + static int __maybe_unused sprd_dma_runtime_suspend(struct device *dev)
3584 + {
3585 +diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
3586 +index 4b9530a7bf652..434b1ff22e318 100644
3587 +--- a/drivers/dma/xilinx/xilinx_dma.c
3588 ++++ b/drivers/dma/xilinx/xilinx_dma.c
3589 +@@ -3077,7 +3077,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
3590 + xdev->ext_addr = false;
3591 +
3592 + /* Set the dma mask bits */
3593 +- dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width));
3594 ++ dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
3595 +
3596 + /* Initialize the DMA engine */
3597 + xdev->common.dev = &pdev->dev;
3598 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
3599 +index 8f53837d4d3ee..97178b307ed6f 100644
3600 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
3601 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
3602 +@@ -468,14 +468,18 @@ bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *ade
3603 + return (fw_cap & ATOM_FIRMWARE_CAP_DYNAMIC_BOOT_CFG_ENABLE) ? true : false;
3604 + }
3605 +
3606 +-/*
3607 +- * Helper function to query RAS EEPROM address
3608 +- *
3609 +- * @adev: amdgpu_device pointer
3610 ++/**
3611 ++ * amdgpu_atomfirmware_ras_rom_addr -- Get the RAS EEPROM addr from VBIOS
3612 ++ * adev: amdgpu_device pointer
3613 ++ * i2c_address: pointer to u8; if not NULL, will contain
3614 ++ * the RAS EEPROM address if the function returns true
3615 + *
3616 +- * Return true if vbios supports ras rom address reporting
3617 ++ * Return true if VBIOS supports RAS EEPROM address reporting,
3618 ++ * else return false. If true and @i2c_address is not NULL,
3619 ++ * will contain the RAS ROM address.
3620 + */
3621 +-bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t* i2c_address)
3622 ++bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev,
3623 ++ u8 *i2c_address)
3624 + {
3625 + struct amdgpu_mode_info *mode_info = &adev->mode_info;
3626 + int index;
3627 +@@ -483,27 +487,39 @@ bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t* i2c_a
3628 + union firmware_info *firmware_info;
3629 + u8 frev, crev;
3630 +
3631 +- if (i2c_address == NULL)
3632 +- return false;
3633 +-
3634 +- *i2c_address = 0;
3635 +-
3636 + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
3637 +- firmwareinfo);
3638 ++ firmwareinfo);
3639 +
3640 + if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
3641 +- index, &size, &frev, &crev, &data_offset)) {
3642 ++ index, &size, &frev, &crev,
3643 ++ &data_offset)) {
3644 + /* support firmware_info 3.4 + */
3645 + if ((frev == 3 && crev >=4) || (frev > 3)) {
3646 + firmware_info = (union firmware_info *)
3647 + (mode_info->atom_context->bios + data_offset);
3648 +- *i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr;
3649 ++ /* The ras_rom_i2c_slave_addr should ideally
3650 ++ * be a 19-bit EEPROM address, which would be
3651 ++ * used as is by the driver; see top of
3652 ++ * amdgpu_eeprom.c.
3653 ++ *
3654 ++ * When this is the case, 0 is of course a
3655 ++ * valid RAS EEPROM address, in which case,
3656 ++ * we'll drop the first "if (firm...)" and only
3657 ++ * leave the check for the pointer.
3658 ++ *
3659 ++ * The reason this works right now is because
3660 ++ * ras_rom_i2c_slave_addr contains the EEPROM
3661 ++ * device type qualifier 1010b in the top 4
3662 ++ * bits.
3663 ++ */
3664 ++ if (firmware_info->v34.ras_rom_i2c_slave_addr) {
3665 ++ if (i2c_address)
3666 ++ *i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr;
3667 ++ return true;
3668 ++ }
3669 + }
3670 + }
3671 +
3672 +- if (*i2c_address != 0)
3673 +- return true;
3674 +-
3675 + return false;
3676 + }
3677 +
3678 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
3679 +index d94c5419ec25c..5a6857c44bb66 100644
3680 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
3681 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
3682 +@@ -59,6 +59,7 @@ void amdgpu_show_fdinfo(struct seq_file *m, struct file *f)
3683 + uint64_t vram_mem = 0, gtt_mem = 0, cpu_mem = 0;
3684 + struct drm_file *file = f->private_data;
3685 + struct amdgpu_device *adev = drm_to_adev(file->minor->dev);
3686 ++ struct amdgpu_bo *root;
3687 + int ret;
3688 +
3689 + ret = amdgpu_file_to_fpriv(f, &fpriv);
3690 +@@ -69,13 +70,19 @@ void amdgpu_show_fdinfo(struct seq_file *m, struct file *f)
3691 + dev = PCI_SLOT(adev->pdev->devfn);
3692 + fn = PCI_FUNC(adev->pdev->devfn);
3693 +
3694 +- ret = amdgpu_bo_reserve(fpriv->vm.root.bo, false);
3695 ++ root = amdgpu_bo_ref(fpriv->vm.root.bo);
3696 ++ if (!root)
3697 ++ return;
3698 ++
3699 ++ ret = amdgpu_bo_reserve(root, false);
3700 + if (ret) {
3701 + DRM_ERROR("Fail to reserve bo\n");
3702 + return;
3703 + }
3704 + amdgpu_vm_get_memory(&fpriv->vm, &vram_mem, &gtt_mem, &cpu_mem);
3705 +- amdgpu_bo_unreserve(fpriv->vm.root.bo);
3706 ++ amdgpu_bo_unreserve(root);
3707 ++ amdgpu_bo_unref(&root);
3708 ++
3709 + seq_printf(m, "pdev:\t%04x:%02x:%02x.%d\npasid:\t%u\n", domain, bus,
3710 + dev, fn, fpriv->vm.pasid);
3711 + seq_printf(m, "vram mem:\t%llu kB\n", vram_mem/1024UL);
3712 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
3713 +index dc7823d23ba89..dd38796ba30ad 100644
3714 +--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
3715 ++++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
3716 +@@ -510,8 +510,12 @@ static struct stream_encoder *dcn303_stream_encoder_create(enum engine_id eng_id
3717 + vpg = dcn303_vpg_create(ctx, vpg_inst);
3718 + afmt = dcn303_afmt_create(ctx, afmt_inst);
3719 +
3720 +- if (!enc1 || !vpg || !afmt)
3721 ++ if (!enc1 || !vpg || !afmt) {
3722 ++ kfree(enc1);
3723 ++ kfree(vpg);
3724 ++ kfree(afmt);
3725 + return NULL;
3726 ++ }
3727 +
3728 + dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, vpg, afmt, &stream_enc_regs[eng_id],
3729 + &se_shift, &se_mask);
3730 +diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
3731 +index 0541bfc81c1b4..1d76cf7cd85d5 100644
3732 +--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
3733 ++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
3734 +@@ -27,6 +27,9 @@
3735 + #include <linux/pci.h>
3736 + #include <linux/slab.h>
3737 + #include <asm/div64.h>
3738 ++#if IS_ENABLED(CONFIG_X86_64)
3739 ++#include <asm/intel-family.h>
3740 ++#endif
3741 + #include <drm/amdgpu_drm.h>
3742 + #include "ppatomctrl.h"
3743 + #include "atombios.h"
3744 +@@ -1733,6 +1736,17 @@ static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
3745 + return result;
3746 + }
3747 +
3748 ++static bool intel_core_rkl_chk(void)
3749 ++{
3750 ++#if IS_ENABLED(CONFIG_X86_64)
3751 ++ struct cpuinfo_x86 *c = &cpu_data(0);
3752 ++
3753 ++ return (c->x86 == 6 && c->x86_model == INTEL_FAM6_ROCKETLAKE);
3754 ++#else
3755 ++ return false;
3756 ++#endif
3757 ++}
3758 ++
3759 + static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
3760 + {
3761 + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3762 +@@ -1758,7 +1772,8 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
3763 +
3764 + data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
3765 + data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
3766 +- data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
3767 ++ data->pcie_dpm_key_disabled =
3768 ++ intel_core_rkl_chk() || !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
3769 + /* need to set voltage control types before EVV patching */
3770 + data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
3771 + data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
3772 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
3773 +index b0ece71aefdee..ce774579c89d1 100644
3774 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
3775 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
3776 +@@ -57,7 +57,7 @@ nvkm_control_mthd_pstate_info(struct nvkm_control *ctrl, void *data, u32 size)
3777 + args->v0.count = 0;
3778 + args->v0.ustate_ac = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE;
3779 + args->v0.ustate_dc = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE;
3780 +- args->v0.pwrsrc = -ENOSYS;
3781 ++ args->v0.pwrsrc = -ENODEV;
3782 + args->v0.pstate = NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_UNKNOWN;
3783 + }
3784 +
3785 +diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
3786 +index 32202385073a2..b47a5053eb854 100644
3787 +--- a/drivers/gpu/drm/ttm/ttm_bo.c
3788 ++++ b/drivers/gpu/drm/ttm/ttm_bo.c
3789 +@@ -1157,9 +1157,9 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
3790 + }
3791 +
3792 + if (bo->deleted) {
3793 +- ttm_bo_cleanup_refs(bo, false, false, locked);
3794 ++ ret = ttm_bo_cleanup_refs(bo, false, false, locked);
3795 + ttm_bo_put(bo);
3796 +- return 0;
3797 ++ return ret == -EBUSY ? -ENOSPC : ret;
3798 + }
3799 +
3800 + ttm_bo_del_from_lru(bo);
3801 +@@ -1213,7 +1213,7 @@ out:
3802 + if (locked)
3803 + dma_resv_unlock(bo->base.resv);
3804 + ttm_bo_put(bo);
3805 +- return ret;
3806 ++ return ret == -EBUSY ? -ENOSPC : ret;
3807 + }
3808 +
3809 + void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
3810 +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
3811 +index bf4d9f6658ff9..c320891c8763c 100644
3812 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
3813 ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
3814 +@@ -2004,6 +2004,7 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
3815 + caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
3816 +
3817 + if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
3818 ++ caps->flags |= HNS_ROCE_CAP_FLAG_STASH;
3819 + caps->max_sq_inline = HNS_ROCE_V3_MAX_SQ_INLINE;
3820 + } else {
3821 + caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
3822 +diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
3823 +index 19713cdd7b789..061dbee55cac1 100644
3824 +--- a/drivers/infiniband/hw/mlx5/mr.c
3825 ++++ b/drivers/infiniband/hw/mlx5/mr.c
3826 +@@ -995,7 +995,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
3827 + static void *mlx5_ib_alloc_xlt(size_t *nents, size_t ent_size, gfp_t gfp_mask)
3828 + {
3829 + const size_t xlt_chunk_align =
3830 +- MLX5_UMR_MTT_ALIGNMENT / sizeof(ent_size);
3831 ++ MLX5_UMR_MTT_ALIGNMENT / ent_size;
3832 + size_t size;
3833 + void *res = NULL;
3834 +
3835 +diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
3836 +index 46280e6e1535b..5c21f1ee50983 100644
3837 +--- a/drivers/iommu/amd/init.c
3838 ++++ b/drivers/iommu/amd/init.c
3839 +@@ -298,6 +298,22 @@ int amd_iommu_get_num_iommus(void)
3840 + return amd_iommus_present;
3841 + }
3842 +
3843 ++#ifdef CONFIG_IRQ_REMAP
3844 ++static bool check_feature_on_all_iommus(u64 mask)
3845 ++{
3846 ++ bool ret = false;
3847 ++ struct amd_iommu *iommu;
3848 ++
3849 ++ for_each_iommu(iommu) {
3850 ++ ret = iommu_feature(iommu, mask);
3851 ++ if (!ret)
3852 ++ return false;
3853 ++ }
3854 ++
3855 ++ return true;
3856 ++}
3857 ++#endif
3858 ++
3859 + /*
3860 + * For IVHD type 0x11/0x40, EFR is also available via IVHD.
3861 + * Default to IVHD EFR since it is available sooner
3862 +@@ -854,13 +870,6 @@ static int iommu_init_ga(struct amd_iommu *iommu)
3863 + int ret = 0;
3864 +
3865 + #ifdef CONFIG_IRQ_REMAP
3866 +- /* Note: We have already checked GASup from IVRS table.
3867 +- * Now, we need to make sure that GAMSup is set.
3868 +- */
3869 +- if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
3870 +- !iommu_feature(iommu, FEATURE_GAM_VAPIC))
3871 +- amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
3872 +-
3873 + ret = iommu_init_ga_log(iommu);
3874 + #endif /* CONFIG_IRQ_REMAP */
3875 +
3876 +@@ -2477,6 +2486,14 @@ static void early_enable_iommus(void)
3877 + }
3878 +
3879 + #ifdef CONFIG_IRQ_REMAP
3880 ++ /*
3881 ++ * Note: We have already checked GASup from IVRS table.
3882 ++ * Now, we need to make sure that GAMSup is set.
3883 ++ */
3884 ++ if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
3885 ++ !check_feature_on_all_iommus(FEATURE_GAM_VAPIC))
3886 ++ amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
3887 ++
3888 + if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
3889 + amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
3890 + #endif
3891 +diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
3892 +index 4b9b3f35ba0ea..d575082567ca7 100644
3893 +--- a/drivers/iommu/intel/svm.c
3894 ++++ b/drivers/iommu/intel/svm.c
3895 +@@ -516,9 +516,6 @@ static void load_pasid(struct mm_struct *mm, u32 pasid)
3896 + {
3897 + mutex_lock(&mm->context.lock);
3898 +
3899 +- /* Synchronize with READ_ONCE in update_pasid(). */
3900 +- smp_store_release(&mm->pasid, pasid);
3901 +-
3902 + /* Update PASID MSR on all CPUs running the mm's tasks. */
3903 + on_each_cpu_mask(mm_cpumask(mm), _load_pasid, NULL, true);
3904 +
3905 +@@ -796,7 +793,19 @@ prq_retry:
3906 + goto prq_retry;
3907 + }
3908 +
3909 ++ /*
3910 ++ * A work in IO page fault workqueue may try to lock pasid_mutex now.
3911 ++ * Holding pasid_mutex while waiting in iopf_queue_flush_dev() for
3912 ++ * all works in the workqueue to finish may cause deadlock.
3913 ++ *
3914 ++ * It's unnecessary to hold pasid_mutex in iopf_queue_flush_dev().
3915 ++ * Unlock it to allow the works to be handled while waiting for
3916 ++ * them to finish.
3917 ++ */
3918 ++ lockdep_assert_held(&pasid_mutex);
3919 ++ mutex_unlock(&pasid_mutex);
3920 + iopf_queue_flush_dev(dev);
3921 ++ mutex_lock(&pasid_mutex);
3922 +
3923 + /*
3924 + * Perform steps described in VT-d spec CH7.10 to drain page
3925 +diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c
3926 +index 719168c980a45..402ac2395fc82 100644
3927 +--- a/drivers/misc/habanalabs/common/command_buffer.c
3928 ++++ b/drivers/misc/habanalabs/common/command_buffer.c
3929 +@@ -314,8 +314,6 @@ int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
3930 +
3931 + spin_lock(&mgr->cb_lock);
3932 + rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC);
3933 +- if (rc < 0)
3934 +- rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_KERNEL);
3935 + spin_unlock(&mgr->cb_lock);
3936 +
3937 + if (rc < 0) {
3938 +diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c
3939 +index 703d79fb6f3f5..379529bffc700 100644
3940 +--- a/drivers/misc/habanalabs/common/debugfs.c
3941 ++++ b/drivers/misc/habanalabs/common/debugfs.c
3942 +@@ -349,7 +349,7 @@ static int mmu_show(struct seq_file *s, void *data)
3943 + return 0;
3944 + }
3945 +
3946 +- phys_addr = hops_info.hop_info[hops_info.used_hops - 1].hop_pte_val;
3947 ++ hl_mmu_va_to_pa(ctx, virt_addr, &phys_addr);
3948 +
3949 + if (hops_info.scrambled_vaddr &&
3950 + (dev_entry->mmu_addr != hops_info.scrambled_vaddr))
3951 +diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
3952 +index ff4cbde289c0b..4e9b677460bad 100644
3953 +--- a/drivers/misc/habanalabs/common/device.c
3954 ++++ b/drivers/misc/habanalabs/common/device.c
3955 +@@ -23,6 +23,8 @@ enum hl_device_status hl_device_status(struct hl_device *hdev)
3956 + status = HL_DEVICE_STATUS_NEEDS_RESET;
3957 + else if (hdev->disabled)
3958 + status = HL_DEVICE_STATUS_MALFUNCTION;
3959 ++ else if (!hdev->init_done)
3960 ++ status = HL_DEVICE_STATUS_IN_DEVICE_CREATION;
3961 + else
3962 + status = HL_DEVICE_STATUS_OPERATIONAL;
3963 +
3964 +@@ -44,6 +46,7 @@ bool hl_device_operational(struct hl_device *hdev,
3965 + case HL_DEVICE_STATUS_NEEDS_RESET:
3966 + return false;
3967 + case HL_DEVICE_STATUS_OPERATIONAL:
3968 ++ case HL_DEVICE_STATUS_IN_DEVICE_CREATION:
3969 + default:
3970 + return true;
3971 + }
3972 +diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
3973 +index 6b3cdd7e068a3..61db72ecec0e0 100644
3974 +--- a/drivers/misc/habanalabs/common/habanalabs.h
3975 ++++ b/drivers/misc/habanalabs/common/habanalabs.h
3976 +@@ -1798,7 +1798,7 @@ struct hl_dbg_device_entry {
3977 +
3978 + #define HL_STR_MAX 32
3979 +
3980 +-#define HL_DEV_STS_MAX (HL_DEVICE_STATUS_NEEDS_RESET + 1)
3981 ++#define HL_DEV_STS_MAX (HL_DEVICE_STATUS_LAST + 1)
3982 +
3983 + /* Theoretical limit only. A single host can only contain up to 4 or 8 PCIe
3984 + * x16 cards. In extreme cases, there are hosts that can accommodate 16 cards.
3985 +diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
3986 +index 4194cda2d04c3..536451a9a16c9 100644
3987 +--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
3988 ++++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
3989 +@@ -318,12 +318,16 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
3990 + hdev->asic_prop.fw_security_enabled = false;
3991 +
3992 + /* Assign status description string */
3993 +- strncpy(hdev->status[HL_DEVICE_STATUS_MALFUNCTION],
3994 +- "disabled", HL_STR_MAX);
3995 ++ strncpy(hdev->status[HL_DEVICE_STATUS_OPERATIONAL],
3996 ++ "operational", HL_STR_MAX);
3997 + strncpy(hdev->status[HL_DEVICE_STATUS_IN_RESET],
3998 + "in reset", HL_STR_MAX);
3999 ++ strncpy(hdev->status[HL_DEVICE_STATUS_MALFUNCTION],
4000 ++ "disabled", HL_STR_MAX);
4001 + strncpy(hdev->status[HL_DEVICE_STATUS_NEEDS_RESET],
4002 + "needs reset", HL_STR_MAX);
4003 ++ strncpy(hdev->status[HL_DEVICE_STATUS_IN_DEVICE_CREATION],
4004 ++ "in device creation", HL_STR_MAX);
4005 +
4006 + hdev->major = hl_major;
4007 + hdev->reset_on_lockup = reset_on_lockup;
4008 +diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c
4009 +index af339ce1ab4f2..fcadde594a580 100644
4010 +--- a/drivers/misc/habanalabs/common/memory.c
4011 ++++ b/drivers/misc/habanalabs/common/memory.c
4012 +@@ -124,7 +124,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
4013 +
4014 + spin_lock(&vm->idr_lock);
4015 + handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0,
4016 +- GFP_KERNEL);
4017 ++ GFP_ATOMIC);
4018 + spin_unlock(&vm->idr_lock);
4019 +
4020 + if (handle < 0) {
4021 +diff --git a/drivers/misc/habanalabs/common/mmu/mmu_v1.c b/drivers/misc/habanalabs/common/mmu/mmu_v1.c
4022 +index c5e93ff325866..0f536f79dd9c9 100644
4023 +--- a/drivers/misc/habanalabs/common/mmu/mmu_v1.c
4024 ++++ b/drivers/misc/habanalabs/common/mmu/mmu_v1.c
4025 +@@ -470,13 +470,13 @@ static void hl_mmu_v1_fini(struct hl_device *hdev)
4026 + if (!ZERO_OR_NULL_PTR(hdev->mmu_priv.hr.mmu_shadow_hop0)) {
4027 + kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
4028 + gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
4029 +- }
4030 +
4031 +- /* Make sure that if we arrive here again without init was called we
4032 +- * won't cause kernel panic. This can happen for example if we fail
4033 +- * during hard reset code at certain points
4034 +- */
4035 +- hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL;
4036 ++ /* Make sure that if we arrive here again without init was
4037 ++ * called we won't cause kernel panic. This can happen for
4038 ++ * example if we fail during hard reset code at certain points
4039 ++ */
4040 ++ hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL;
4041 ++ }
4042 + }
4043 +
4044 + /**
4045 +diff --git a/drivers/misc/habanalabs/common/sysfs.c b/drivers/misc/habanalabs/common/sysfs.c
4046 +index db72df282ef8d..34f9f2779962a 100644
4047 +--- a/drivers/misc/habanalabs/common/sysfs.c
4048 ++++ b/drivers/misc/habanalabs/common/sysfs.c
4049 +@@ -9,8 +9,7 @@
4050 +
4051 + #include <linux/pci.h>
4052 +
4053 +-long hl_get_frequency(struct hl_device *hdev, u32 pll_index,
4054 +- bool curr)
4055 ++long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
4056 + {
4057 + struct cpucp_packet pkt;
4058 + u32 used_pll_idx;
4059 +@@ -44,8 +43,7 @@ long hl_get_frequency(struct hl_device *hdev, u32 pll_index,
4060 + return (long) result;
4061 + }
4062 +
4063 +-void hl_set_frequency(struct hl_device *hdev, u32 pll_index,
4064 +- u64 freq)
4065 ++void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq)
4066 + {
4067 + struct cpucp_packet pkt;
4068 + u32 used_pll_idx;
4069 +@@ -285,16 +283,12 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
4070 + char *buf)
4071 + {
4072 + struct hl_device *hdev = dev_get_drvdata(dev);
4073 +- char *str;
4074 ++ char str[HL_STR_MAX];
4075 +
4076 +- if (atomic_read(&hdev->in_reset))
4077 +- str = "In reset";
4078 +- else if (hdev->disabled)
4079 +- str = "Malfunction";
4080 +- else if (hdev->needs_reset)
4081 +- str = "Needs Reset";
4082 +- else
4083 +- str = "Operational";
4084 ++ strscpy(str, hdev->status[hl_device_status(hdev)], HL_STR_MAX);
4085 ++
4086 ++ /* use uppercase for backward compatibility */
4087 ++ str[0] = 'A' + (str[0] - 'a');
4088 +
4089 + return sprintf(buf, "%s\n", str);
4090 + }
4091 +diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
4092 +index aa8a0ca5aca24..409f05c962f24 100644
4093 +--- a/drivers/misc/habanalabs/gaudi/gaudi.c
4094 ++++ b/drivers/misc/habanalabs/gaudi/gaudi.c
4095 +@@ -7809,6 +7809,12 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
4096 + u8 cause;
4097 + bool reset_required;
4098 +
4099 ++ if (event_type >= GAUDI_EVENT_SIZE) {
4100 ++ dev_err(hdev->dev, "Event type %u exceeds maximum of %u",
4101 ++ event_type, GAUDI_EVENT_SIZE - 1);
4102 ++ return;
4103 ++ }
4104 ++
4105 + gaudi->events_stat[event_type]++;
4106 + gaudi->events_stat_aggregate[event_type]++;
4107 +
4108 +diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
4109 +index 755e08cf2ecc8..bfb22f96c1a33 100644
4110 +--- a/drivers/misc/habanalabs/goya/goya.c
4111 ++++ b/drivers/misc/habanalabs/goya/goya.c
4112 +@@ -4797,6 +4797,12 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
4113 + >> EQ_CTL_EVENT_TYPE_SHIFT);
4114 + struct goya_device *goya = hdev->asic_specific;
4115 +
4116 ++ if (event_type >= GOYA_ASYNC_EVENT_ID_SIZE) {
4117 ++ dev_err(hdev->dev, "Event type %u exceeds maximum of %u",
4118 ++ event_type, GOYA_ASYNC_EVENT_ID_SIZE - 1);
4119 ++ return;
4120 ++ }
4121 ++
4122 + goya->events_stat[event_type]++;
4123 + goya->events_stat_aggregate[event_type]++;
4124 +
4125 +diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
4126 +index 2735551271889..fa88bf9cba4d0 100644
4127 +--- a/drivers/nvme/target/configfs.c
4128 ++++ b/drivers/nvme/target/configfs.c
4129 +@@ -1067,7 +1067,8 @@ static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
4130 + {
4131 + struct nvmet_subsys *subsys = to_subsys(item);
4132 +
4133 +- return snprintf(page, PAGE_SIZE, "%s\n", subsys->serial);
4134 ++ return snprintf(page, PAGE_SIZE, "%*s\n",
4135 ++ NVMET_SN_MAX_SIZE, subsys->serial);
4136 + }
4137 +
4138 + static ssize_t
4139 +diff --git a/drivers/of/property.c b/drivers/of/property.c
4140 +index 6c028632f425f..0b9c2fb843e79 100644
4141 +--- a/drivers/of/property.c
4142 ++++ b/drivers/of/property.c
4143 +@@ -1434,6 +1434,9 @@ static int of_fwnode_add_links(struct fwnode_handle *fwnode)
4144 + struct property *p;
4145 + struct device_node *con_np = to_of_node(fwnode);
4146 +
4147 ++ if (IS_ENABLED(CONFIG_X86))
4148 ++ return 0;
4149 ++
4150 + if (!con_np)
4151 + return -EINVAL;
4152 +
4153 +diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
4154 +index 889d7ce282ebb..952a92504df69 100644
4155 +--- a/drivers/parisc/dino.c
4156 ++++ b/drivers/parisc/dino.c
4157 +@@ -156,15 +156,6 @@ static inline struct dino_device *DINO_DEV(struct pci_hba_data *hba)
4158 + return container_of(hba, struct dino_device, hba);
4159 + }
4160 +
4161 +-/* Check if PCI device is behind a Card-mode Dino. */
4162 +-static int pci_dev_is_behind_card_dino(struct pci_dev *dev)
4163 +-{
4164 +- struct dino_device *dino_dev;
4165 +-
4166 +- dino_dev = DINO_DEV(parisc_walk_tree(dev->bus->bridge));
4167 +- return is_card_dino(&dino_dev->hba.dev->id);
4168 +-}
4169 +-
4170 + /*
4171 + * Dino Configuration Space Accessor Functions
4172 + */
4173 +@@ -447,6 +438,15 @@ static void quirk_cirrus_cardbus(struct pci_dev *dev)
4174 + DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_6832, quirk_cirrus_cardbus );
4175 +
4176 + #ifdef CONFIG_TULIP
4177 ++/* Check if PCI device is behind a Card-mode Dino. */
4178 ++static int pci_dev_is_behind_card_dino(struct pci_dev *dev)
4179 ++{
4180 ++ struct dino_device *dino_dev;
4181 ++
4182 ++ dino_dev = DINO_DEV(parisc_walk_tree(dev->bus->bridge));
4183 ++ return is_card_dino(&dino_dev->hba.dev->id);
4184 ++}
4185 ++
4186 + static void pci_fixup_tulip(struct pci_dev *dev)
4187 + {
4188 + if (!pci_dev_is_behind_card_dino(dev))
4189 +diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
4190 +index fdbf051586970..0e4a46af82288 100644
4191 +--- a/drivers/pci/controller/pci-aardvark.c
4192 ++++ b/drivers/pci/controller/pci-aardvark.c
4193 +@@ -218,6 +218,8 @@
4194 +
4195 + #define MSI_IRQ_NUM 32
4196 +
4197 ++#define CFG_RD_CRS_VAL 0xffff0001
4198 ++
4199 + struct advk_pcie {
4200 + struct platform_device *pdev;
4201 + void __iomem *base;
4202 +@@ -587,7 +589,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
4203 + advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG);
4204 + }
4205 +
4206 +-static int advk_pcie_check_pio_status(struct advk_pcie *pcie, u32 *val)
4207 ++static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u32 *val)
4208 + {
4209 + struct device *dev = &pcie->pdev->dev;
4210 + u32 reg;
4211 +@@ -629,9 +631,30 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, u32 *val)
4212 + strcomp_status = "UR";
4213 + break;
4214 + case PIO_COMPLETION_STATUS_CRS:
4215 ++ if (allow_crs && val) {
4216 ++ /* PCIe r4.0, sec 2.3.2, says:
4217 ++ * If CRS Software Visibility is enabled:
4218 ++ * For a Configuration Read Request that includes both
4219 ++ * bytes of the Vendor ID field of a device Function's
4220 ++ * Configuration Space Header, the Root Complex must
4221 ++ * complete the Request to the host by returning a
4222 ++ * read-data value of 0001h for the Vendor ID field and
4223 ++ * all '1's for any additional bytes included in the
4224 ++ * request.
4225 ++ *
4226 ++ * So CRS in this case is not an error status.
4227 ++ */
4228 ++ *val = CFG_RD_CRS_VAL;
4229 ++ strcomp_status = NULL;
4230 ++ break;
4231 ++ }
4232 + /* PCIe r4.0, sec 2.3.2, says:
4233 + * If CRS Software Visibility is not enabled, the Root Complex
4234 + * must re-issue the Configuration Request as a new Request.
4235 ++ * If CRS Software Visibility is enabled: For a Configuration
4236 ++ * Write Request or for any other Configuration Read Request,
4237 ++ * the Root Complex must re-issue the Configuration Request as
4238 ++ * a new Request.
4239 + * A Root Complex implementation may choose to limit the number
4240 + * of Configuration Request/CRS Completion Status loops before
4241 + * determining that something is wrong with the target of the
4242 +@@ -700,6 +723,7 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
4243 + case PCI_EXP_RTCTL: {
4244 + u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG);
4245 + *value = (val & PCIE_MSG_PM_PME_MASK) ? 0 : PCI_EXP_RTCTL_PMEIE;
4246 ++ *value |= PCI_EXP_RTCAP_CRSVIS << 16;
4247 + return PCI_BRIDGE_EMUL_HANDLED;
4248 + }
4249 +
4250 +@@ -781,6 +805,7 @@ static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
4251 + static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
4252 + {
4253 + struct pci_bridge_emul *bridge = &pcie->bridge;
4254 ++ int ret;
4255 +
4256 + bridge->conf.vendor =
4257 + cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff);
4258 +@@ -804,7 +829,15 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
4259 + bridge->data = pcie;
4260 + bridge->ops = &advk_pci_bridge_emul_ops;
4261 +
4262 +- return pci_bridge_emul_init(bridge, 0);
4263 ++ /* PCIe config space can be initialized after pci_bridge_emul_init() */
4264 ++ ret = pci_bridge_emul_init(bridge, 0);
4265 ++ if (ret < 0)
4266 ++ return ret;
4267 ++
4268 ++ /* Indicates supports for Completion Retry Status */
4269 ++ bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
4270 ++
4271 ++ return 0;
4272 + }
4273 +
4274 + static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
4275 +@@ -856,6 +889,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
4276 + int where, int size, u32 *val)
4277 + {
4278 + struct advk_pcie *pcie = bus->sysdata;
4279 ++ bool allow_crs;
4280 + u32 reg;
4281 + int ret;
4282 +
4283 +@@ -868,7 +902,24 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
4284 + return pci_bridge_emul_conf_read(&pcie->bridge, where,
4285 + size, val);
4286 +
4287 ++ /*
4288 ++ * Completion Retry Status is possible to return only when reading all
4289 ++ * 4 bytes from PCI_VENDOR_ID and PCI_DEVICE_ID registers at once and
4290 ++ * CRSSVE flag on Root Bridge is enabled.
4291 ++ */
4292 ++ allow_crs = (where == PCI_VENDOR_ID) && (size == 4) &&
4293 ++ (le16_to_cpu(pcie->bridge.pcie_conf.rootctl) &
4294 ++ PCI_EXP_RTCTL_CRSSVE);
4295 ++
4296 + if (advk_pcie_pio_is_running(pcie)) {
4297 ++ /*
4298 ++ * If it is possible return Completion Retry Status so caller
4299 ++ * tries to issue the request again instead of failing.
4300 ++ */
4301 ++ if (allow_crs) {
4302 ++ *val = CFG_RD_CRS_VAL;
4303 ++ return PCIBIOS_SUCCESSFUL;
4304 ++ }
4305 + *val = 0xffffffff;
4306 + return PCIBIOS_SET_FAILED;
4307 + }
4308 +@@ -896,12 +947,20 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
4309 +
4310 + ret = advk_pcie_wait_pio(pcie);
4311 + if (ret < 0) {
4312 ++ /*
4313 ++ * If it is possible return Completion Retry Status so caller
4314 ++ * tries to issue the request again instead of failing.
4315 ++ */
4316 ++ if (allow_crs) {
4317 ++ *val = CFG_RD_CRS_VAL;
4318 ++ return PCIBIOS_SUCCESSFUL;
4319 ++ }
4320 + *val = 0xffffffff;
4321 + return PCIBIOS_SET_FAILED;
4322 + }
4323 +
4324 + /* Check PIO status and get the read result */
4325 +- ret = advk_pcie_check_pio_status(pcie, val);
4326 ++ ret = advk_pcie_check_pio_status(pcie, allow_crs, val);
4327 + if (ret < 0) {
4328 + *val = 0xffffffff;
4329 + return PCIBIOS_SET_FAILED;
4330 +@@ -970,7 +1029,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
4331 + if (ret < 0)
4332 + return PCIBIOS_SET_FAILED;
4333 +
4334 +- ret = advk_pcie_check_pio_status(pcie, NULL);
4335 ++ ret = advk_pcie_check_pio_status(pcie, false, NULL);
4336 + if (ret < 0)
4337 + return PCIBIOS_SET_FAILED;
4338 +
4339 +diff --git a/drivers/pci/pci-bridge-emul.h b/drivers/pci/pci-bridge-emul.h
4340 +index b31883022a8e6..49bbd37ee318a 100644
4341 +--- a/drivers/pci/pci-bridge-emul.h
4342 ++++ b/drivers/pci/pci-bridge-emul.h
4343 +@@ -54,7 +54,7 @@ struct pci_bridge_emul_pcie_conf {
4344 + __le16 slotctl;
4345 + __le16 slotsta;
4346 + __le16 rootctl;
4347 +- __le16 rsvd;
4348 ++ __le16 rootcap;
4349 + __le32 rootsta;
4350 + __le32 devcap2;
4351 + __le16 devctl2;
4352 +diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
4353 +index 41baccba033f7..f901d2e43166c 100644
4354 +--- a/drivers/platform/chrome/Makefile
4355 ++++ b/drivers/platform/chrome/Makefile
4356 +@@ -20,7 +20,7 @@ obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_chardev.o
4357 + obj-$(CONFIG_CROS_EC_LIGHTBAR) += cros_ec_lightbar.o
4358 + obj-$(CONFIG_CROS_EC_VBC) += cros_ec_vbc.o
4359 + obj-$(CONFIG_CROS_EC_DEBUGFS) += cros_ec_debugfs.o
4360 +-cros-ec-sensorhub-objs := cros_ec_sensorhub.o cros_ec_sensorhub_ring.o
4361 ++cros-ec-sensorhub-objs := cros_ec_sensorhub.o cros_ec_sensorhub_ring.o cros_ec_trace.o
4362 + obj-$(CONFIG_CROS_EC_SENSORHUB) += cros-ec-sensorhub.o
4363 + obj-$(CONFIG_CROS_EC_SYSFS) += cros_ec_sysfs.o
4364 + obj-$(CONFIG_CROS_USBPD_LOGGER) += cros_usbpd_logger.o
4365 +diff --git a/drivers/platform/chrome/cros_ec_sensorhub_ring.c b/drivers/platform/chrome/cros_ec_sensorhub_ring.c
4366 +index 8921f24e83bac..98e37080f7609 100644
4367 +--- a/drivers/platform/chrome/cros_ec_sensorhub_ring.c
4368 ++++ b/drivers/platform/chrome/cros_ec_sensorhub_ring.c
4369 +@@ -17,6 +17,8 @@
4370 + #include <linux/sort.h>
4371 + #include <linux/slab.h>
4372 +
4373 ++#include "cros_ec_trace.h"
4374 ++
4375 + /* Precision of fixed point for the m values from the filter */
4376 + #define M_PRECISION BIT(23)
4377 +
4378 +@@ -291,6 +293,7 @@ cros_ec_sensor_ring_ts_filter_update(struct cros_ec_sensors_ts_filter_state
4379 + state->median_m = 0;
4380 + state->median_error = 0;
4381 + }
4382 ++ trace_cros_ec_sensorhub_filter(state, dx, dy);
4383 + }
4384 +
4385 + /**
4386 +@@ -427,6 +430,11 @@ cros_ec_sensor_ring_process_event(struct cros_ec_sensorhub *sensorhub,
4387 + if (new_timestamp - *current_timestamp > 0)
4388 + *current_timestamp = new_timestamp;
4389 + }
4390 ++ trace_cros_ec_sensorhub_timestamp(in->timestamp,
4391 ++ fifo_info->timestamp,
4392 ++ fifo_timestamp,
4393 ++ *current_timestamp,
4394 ++ now);
4395 + }
4396 +
4397 + if (in->flags & MOTIONSENSE_SENSOR_FLAG_ODR) {
4398 +@@ -460,6 +468,12 @@ cros_ec_sensor_ring_process_event(struct cros_ec_sensorhub *sensorhub,
4399 +
4400 + /* Regular sample */
4401 + out->sensor_id = in->sensor_num;
4402 ++ trace_cros_ec_sensorhub_data(in->sensor_num,
4403 ++ fifo_info->timestamp,
4404 ++ fifo_timestamp,
4405 ++ *current_timestamp,
4406 ++ now);
4407 ++
4408 + if (*current_timestamp - now > 0) {
4409 + /*
4410 + * This fix is needed to overcome the timestamp filter putting
4411 +diff --git a/drivers/platform/chrome/cros_ec_trace.h b/drivers/platform/chrome/cros_ec_trace.h
4412 +index f744b21bc655f..7e7cfc98657a4 100644
4413 +--- a/drivers/platform/chrome/cros_ec_trace.h
4414 ++++ b/drivers/platform/chrome/cros_ec_trace.h
4415 +@@ -15,6 +15,7 @@
4416 + #include <linux/types.h>
4417 + #include <linux/platform_data/cros_ec_commands.h>
4418 + #include <linux/platform_data/cros_ec_proto.h>
4419 ++#include <linux/platform_data/cros_ec_sensorhub.h>
4420 +
4421 + #include <linux/tracepoint.h>
4422 +
4423 +@@ -70,6 +71,99 @@ TRACE_EVENT(cros_ec_request_done,
4424 + __entry->retval)
4425 + );
4426 +
4427 ++TRACE_EVENT(cros_ec_sensorhub_timestamp,
4428 ++ TP_PROTO(u32 ec_sample_timestamp, u32 ec_fifo_timestamp, s64 fifo_timestamp,
4429 ++ s64 current_timestamp, s64 current_time),
4430 ++ TP_ARGS(ec_sample_timestamp, ec_fifo_timestamp, fifo_timestamp, current_timestamp,
4431 ++ current_time),
4432 ++ TP_STRUCT__entry(
4433 ++ __field(u32, ec_sample_timestamp)
4434 ++ __field(u32, ec_fifo_timestamp)
4435 ++ __field(s64, fifo_timestamp)
4436 ++ __field(s64, current_timestamp)
4437 ++ __field(s64, current_time)
4438 ++ __field(s64, delta)
4439 ++ ),
4440 ++ TP_fast_assign(
4441 ++ __entry->ec_sample_timestamp = ec_sample_timestamp;
4442 ++ __entry->ec_fifo_timestamp = ec_fifo_timestamp;
4443 ++ __entry->fifo_timestamp = fifo_timestamp;
4444 ++ __entry->current_timestamp = current_timestamp;
4445 ++ __entry->current_time = current_time;
4446 ++ __entry->delta = current_timestamp - current_time;
4447 ++ ),
4448 ++ TP_printk("ec_ts: %9u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld",
4449 ++ __entry->ec_sample_timestamp,
4450 ++ __entry->ec_fifo_timestamp,
4451 ++ __entry->fifo_timestamp,
4452 ++ __entry->current_timestamp,
4453 ++ __entry->current_time,
4454 ++ __entry->delta
4455 ++ )
4456 ++);
4457 ++
4458 ++TRACE_EVENT(cros_ec_sensorhub_data,
4459 ++ TP_PROTO(u32 ec_sensor_num, u32 ec_fifo_timestamp, s64 fifo_timestamp,
4460 ++ s64 current_timestamp, s64 current_time),
4461 ++ TP_ARGS(ec_sensor_num, ec_fifo_timestamp, fifo_timestamp, current_timestamp, current_time),
4462 ++ TP_STRUCT__entry(
4463 ++ __field(u32, ec_sensor_num)
4464 ++ __field(u32, ec_fifo_timestamp)
4465 ++ __field(s64, fifo_timestamp)
4466 ++ __field(s64, current_timestamp)
4467 ++ __field(s64, current_time)
4468 ++ __field(s64, delta)
4469 ++ ),
4470 ++ TP_fast_assign(
4471 ++ __entry->ec_sensor_num = ec_sensor_num;
4472 ++ __entry->ec_fifo_timestamp = ec_fifo_timestamp;
4473 ++ __entry->fifo_timestamp = fifo_timestamp;
4474 ++ __entry->current_timestamp = current_timestamp;
4475 ++ __entry->current_time = current_time;
4476 ++ __entry->delta = current_timestamp - current_time;
4477 ++ ),
4478 ++ TP_printk("ec_num: %4u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld",
4479 ++ __entry->ec_sensor_num,
4480 ++ __entry->ec_fifo_timestamp,
4481 ++ __entry->fifo_timestamp,
4482 ++ __entry->current_timestamp,
4483 ++ __entry->current_time,
4484 ++ __entry->delta
4485 ++ )
4486 ++);
4487 ++
4488 ++TRACE_EVENT(cros_ec_sensorhub_filter,
4489 ++ TP_PROTO(struct cros_ec_sensors_ts_filter_state *state, s64 dx, s64 dy),
4490 ++ TP_ARGS(state, dx, dy),
4491 ++ TP_STRUCT__entry(
4492 ++ __field(s64, dx)
4493 ++ __field(s64, dy)
4494 ++ __field(s64, median_m)
4495 ++ __field(s64, median_error)
4496 ++ __field(s64, history_len)
4497 ++ __field(s64, x)
4498 ++ __field(s64, y)
4499 ++ ),
4500 ++ TP_fast_assign(
4501 ++ __entry->dx = dx;
4502 ++ __entry->dy = dy;
4503 ++ __entry->median_m = state->median_m;
4504 ++ __entry->median_error = state->median_error;
4505 ++ __entry->history_len = state->history_len;
4506 ++ __entry->x = state->x_offset;
4507 ++ __entry->y = state->y_offset;
4508 ++ ),
4509 ++ TP_printk("dx: %12lld. dy: %12lld median_m: %12lld median_error: %12lld len: %lld x: %12lld y: %12lld",
4510 ++ __entry->dx,
4511 ++ __entry->dy,
4512 ++ __entry->median_m,
4513 ++ __entry->median_error,
4514 ++ __entry->history_len,
4515 ++ __entry->x,
4516 ++ __entry->y
4517 ++ )
4518 ++);
4519 ++
4520 +
4521 + #endif /* _CROS_EC_TRACE_H_ */
4522 +
4523 +diff --git a/drivers/pwm/pwm-ab8500.c b/drivers/pwm/pwm-ab8500.c
4524 +index e2a26d9da25b3..281f74a1c50bd 100644
4525 +--- a/drivers/pwm/pwm-ab8500.c
4526 ++++ b/drivers/pwm/pwm-ab8500.c
4527 +@@ -22,14 +22,21 @@
4528 +
4529 + struct ab8500_pwm_chip {
4530 + struct pwm_chip chip;
4531 ++ unsigned int hwid;
4532 + };
4533 +
4534 ++static struct ab8500_pwm_chip *ab8500_pwm_from_chip(struct pwm_chip *chip)
4535 ++{
4536 ++ return container_of(chip, struct ab8500_pwm_chip, chip);
4537 ++}
4538 ++
4539 + static int ab8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
4540 + const struct pwm_state *state)
4541 + {
4542 + int ret;
4543 + u8 reg;
4544 + unsigned int higher_val, lower_val;
4545 ++ struct ab8500_pwm_chip *ab8500 = ab8500_pwm_from_chip(chip);
4546 +
4547 + if (state->polarity != PWM_POLARITY_NORMAL)
4548 + return -EINVAL;
4549 +@@ -37,7 +44,7 @@ static int ab8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
4550 + if (!state->enabled) {
4551 + ret = abx500_mask_and_set_register_interruptible(chip->dev,
4552 + AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
4553 +- 1 << (chip->base - 1), 0);
4554 ++ 1 << ab8500->hwid, 0);
4555 +
4556 + if (ret < 0)
4557 + dev_err(chip->dev, "%s: Failed to disable PWM, Error %d\n",
4558 +@@ -56,7 +63,7 @@ static int ab8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
4559 + */
4560 + higher_val = ((state->duty_cycle & 0x0300) >> 8);
4561 +
4562 +- reg = AB8500_PWM_OUT_CTRL1_REG + ((chip->base - 1) * 2);
4563 ++ reg = AB8500_PWM_OUT_CTRL1_REG + (ab8500->hwid * 2);
4564 +
4565 + ret = abx500_set_register_interruptible(chip->dev, AB8500_MISC,
4566 + reg, (u8)lower_val);
4567 +@@ -70,7 +77,7 @@ static int ab8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
4568 +
4569 + ret = abx500_mask_and_set_register_interruptible(chip->dev,
4570 + AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
4571 +- 1 << (chip->base - 1), 1 << (chip->base - 1));
4572 ++ 1 << ab8500->hwid, 1 << ab8500->hwid);
4573 + if (ret < 0)
4574 + dev_err(chip->dev, "%s: Failed to enable PWM, Error %d\n",
4575 + pwm->label, ret);
4576 +@@ -88,6 +95,9 @@ static int ab8500_pwm_probe(struct platform_device *pdev)
4577 + struct ab8500_pwm_chip *ab8500;
4578 + int err;
4579 +
4580 ++ if (pdev->id < 1 || pdev->id > 31)
4581 ++ return dev_err_probe(&pdev->dev, EINVAL, "Invalid device id %d\n", pdev->id);
4582 ++
4583 + /*
4584 + * Nothing to be done in probe, this is required to get the
4585 + * device which is required for ab8500 read and write
4586 +@@ -99,6 +109,7 @@ static int ab8500_pwm_probe(struct platform_device *pdev)
4587 + ab8500->chip.dev = &pdev->dev;
4588 + ab8500->chip.ops = &ab8500_pwm_ops;
4589 + ab8500->chip.npwm = 1;
4590 ++ ab8500->hwid = pdev->id - 1;
4591 +
4592 + err = pwmchip_add(&ab8500->chip);
4593 + if (err < 0)
4594 +diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
4595 +index 11b16ecc4f967..18d8e34d0d08a 100644
4596 +--- a/drivers/pwm/pwm-img.c
4597 ++++ b/drivers/pwm/pwm-img.c
4598 +@@ -326,23 +326,7 @@ err_pm_disable:
4599 + static int img_pwm_remove(struct platform_device *pdev)
4600 + {
4601 + struct img_pwm_chip *pwm_chip = platform_get_drvdata(pdev);
4602 +- u32 val;
4603 +- unsigned int i;
4604 +- int ret;
4605 +-
4606 +- ret = pm_runtime_get_sync(&pdev->dev);
4607 +- if (ret < 0) {
4608 +- pm_runtime_put(&pdev->dev);
4609 +- return ret;
4610 +- }
4611 +-
4612 +- for (i = 0; i < pwm_chip->chip.npwm; i++) {
4613 +- val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
4614 +- val &= ~BIT(i);
4615 +- img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val);
4616 +- }
4617 +
4618 +- pm_runtime_put(&pdev->dev);
4619 + pm_runtime_disable(&pdev->dev);
4620 + if (!pm_runtime_status_suspended(&pdev->dev))
4621 + img_pwm_runtime_suspend(&pdev->dev);
4622 +diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
4623 +index 2834a0f001d3a..719e8e9136569 100644
4624 +--- a/drivers/pwm/pwm-lpc32xx.c
4625 ++++ b/drivers/pwm/pwm-lpc32xx.c
4626 +@@ -117,17 +117,17 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
4627 + lpc32xx->chip.ops = &lpc32xx_pwm_ops;
4628 + lpc32xx->chip.npwm = 1;
4629 +
4630 ++ /* If PWM is disabled, configure the output to the default value */
4631 ++ val = readl(lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2));
4632 ++ val &= ~PWM_PIN_LEVEL;
4633 ++ writel(val, lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2));
4634 ++
4635 + ret = pwmchip_add(&lpc32xx->chip);
4636 + if (ret < 0) {
4637 + dev_err(&pdev->dev, "failed to add PWM chip, error %d\n", ret);
4638 + return ret;
4639 + }
4640 +
4641 +- /* When PWM is disable, configure the output to the default value */
4642 +- val = readl(lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2));
4643 +- val &= ~PWM_PIN_LEVEL;
4644 +- writel(val, lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2));
4645 +-
4646 + platform_set_drvdata(pdev, lpc32xx);
4647 +
4648 + return 0;
4649 +diff --git a/drivers/pwm/pwm-mxs.c b/drivers/pwm/pwm-mxs.c
4650 +index a22180803bd7d..558dc1de8f5d5 100644
4651 +--- a/drivers/pwm/pwm-mxs.c
4652 ++++ b/drivers/pwm/pwm-mxs.c
4653 +@@ -145,6 +145,11 @@ static int mxs_pwm_probe(struct platform_device *pdev)
4654 + return ret;
4655 + }
4656 +
4657 ++ /* FIXME: Only do this if the PWM isn't already running */
4658 ++ ret = stmp_reset_block(mxs->base);
4659 ++ if (ret)
4660 ++ return dev_err_probe(&pdev->dev, ret, "failed to reset PWM\n");
4661 ++
4662 + ret = pwmchip_add(&mxs->chip);
4663 + if (ret < 0) {
4664 + dev_err(&pdev->dev, "failed to add pwm chip %d\n", ret);
4665 +@@ -153,15 +158,7 @@ static int mxs_pwm_probe(struct platform_device *pdev)
4666 +
4667 + platform_set_drvdata(pdev, mxs);
4668 +
4669 +- ret = stmp_reset_block(mxs->base);
4670 +- if (ret)
4671 +- goto pwm_remove;
4672 +-
4673 + return 0;
4674 +-
4675 +-pwm_remove:
4676 +- pwmchip_remove(&mxs->chip);
4677 +- return ret;
4678 + }
4679 +
4680 + static int mxs_pwm_remove(struct platform_device *pdev)
4681 +diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
4682 +index cbe900877724f..8fcef29948d77 100644
4683 +--- a/drivers/pwm/pwm-rockchip.c
4684 ++++ b/drivers/pwm/pwm-rockchip.c
4685 +@@ -384,20 +384,6 @@ static int rockchip_pwm_remove(struct platform_device *pdev)
4686 + {
4687 + struct rockchip_pwm_chip *pc = platform_get_drvdata(pdev);
4688 +
4689 +- /*
4690 +- * Disable the PWM clk before unpreparing it if the PWM device is still
4691 +- * running. This should only happen when the last PWM user left it
4692 +- * enabled, or when nobody requested a PWM that was previously enabled
4693 +- * by the bootloader.
4694 +- *
4695 +- * FIXME: Maybe the core should disable all PWM devices in
4696 +- * pwmchip_remove(). In this case we'd only have to call
4697 +- * clk_unprepare() after pwmchip_remove().
4698 +- *
4699 +- */
4700 +- if (pwm_is_enabled(pc->chip.pwms))
4701 +- clk_disable(pc->clk);
4702 +-
4703 + clk_unprepare(pc->pclk);
4704 + clk_unprepare(pc->clk);
4705 +
4706 +diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
4707 +index 93dd03618465b..e4a10aac354d6 100644
4708 +--- a/drivers/pwm/pwm-stm32-lp.c
4709 ++++ b/drivers/pwm/pwm-stm32-lp.c
4710 +@@ -222,8 +222,6 @@ static int stm32_pwm_lp_remove(struct platform_device *pdev)
4711 + {
4712 + struct stm32_pwm_lp *priv = platform_get_drvdata(pdev);
4713 +
4714 +- pwm_disable(&priv->chip.pwms[0]);
4715 +-
4716 + return pwmchip_remove(&priv->chip);
4717 + }
4718 +
4719 +diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
4720 +index 12153d5801ce1..f7bf87097a9fb 100644
4721 +--- a/drivers/rtc/Kconfig
4722 ++++ b/drivers/rtc/Kconfig
4723 +@@ -624,6 +624,7 @@ config RTC_DRV_FM3130
4724 +
4725 + config RTC_DRV_RX8010
4726 + tristate "Epson RX8010SJ"
4727 ++ select REGMAP_I2C
4728 + help
4729 + If you say yes here you get support for the Epson RX8010SJ RTC
4730 + chip.
4731 +diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
4732 +index db26edeccea6e..b6698656fc014 100644
4733 +--- a/drivers/staging/rtl8192u/r8192U_core.c
4734 ++++ b/drivers/staging/rtl8192u/r8192U_core.c
4735 +@@ -4265,7 +4265,7 @@ static void TranslateRxSignalStuff819xUsb(struct sk_buff *skb,
4736 + bpacket_match_bssid = (type != IEEE80211_FTYPE_CTL) &&
4737 + (ether_addr_equal(priv->ieee80211->current_network.bssid, (fc & IEEE80211_FCTL_TODS) ? hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 : hdr->addr3))
4738 + && (!pstats->bHwError) && (!pstats->bCRC) && (!pstats->bICV);
4739 +- bpacket_toself = bpacket_match_bssid &
4740 ++ bpacket_toself = bpacket_match_bssid &&
4741 + (ether_addr_equal(praddr, priv->ieee80211->dev->dev_addr));
4742 +
4743 + if (WLAN_FC_GET_FRAMETYPE(fc) == IEEE80211_STYPE_BEACON)
4744 +diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
4745 +index f95000df89422..965558516cbdc 100644
4746 +--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
4747 ++++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
4748 +@@ -349,16 +349,16 @@ static int wpa_set_auth_algs(struct net_device *dev, u32 value)
4749 + struct adapter *padapter = rtw_netdev_priv(dev);
4750 + int ret = 0;
4751 +
4752 +- if ((value & WLAN_AUTH_SHARED_KEY) && (value & WLAN_AUTH_OPEN)) {
4753 ++ if ((value & IW_AUTH_ALG_SHARED_KEY) && (value & IW_AUTH_ALG_OPEN_SYSTEM)) {
4754 + padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
4755 + padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeAutoSwitch;
4756 + padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Auto;
4757 +- } else if (value & WLAN_AUTH_SHARED_KEY) {
4758 ++ } else if (value & IW_AUTH_ALG_SHARED_KEY) {
4759 + padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
4760 +
4761 + padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeShared;
4762 + padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Shared;
4763 +- } else if (value & WLAN_AUTH_OPEN) {
4764 ++ } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
4765 + /* padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled; */
4766 + if (padapter->securitypriv.ndisauthtype < Ndis802_11AuthModeWPAPSK) {
4767 + padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen;
4768 +diff --git a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
4769 +index 232fd0b333251..8494cc04aa210 100644
4770 +--- a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
4771 ++++ b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
4772 +@@ -359,6 +359,12 @@ static int adc_tm5_register_tzd(struct adc_tm5_chip *adc_tm)
4773 + &adc_tm->channels[i],
4774 + &adc_tm5_ops);
4775 + if (IS_ERR(tzd)) {
4776 ++ if (PTR_ERR(tzd) == -ENODEV) {
4777 ++ dev_warn(adc_tm->dev, "thermal sensor on channel %d is not used\n",
4778 ++ adc_tm->channels[i].channel);
4779 ++ continue;
4780 ++ }
4781 ++
4782 + dev_err(adc_tm->dev, "Error registering TZ zone for channel %d: %ld\n",
4783 + adc_tm->channels[i].channel, PTR_ERR(tzd));
4784 + return PTR_ERR(tzd);
4785 +diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
4786 +index fdf16aa34eb47..702696cf58b67 100644
4787 +--- a/drivers/thermal/rcar_gen3_thermal.c
4788 ++++ b/drivers/thermal/rcar_gen3_thermal.c
4789 +@@ -84,7 +84,7 @@ struct rcar_gen3_thermal_tsc {
4790 + struct thermal_zone_device *zone;
4791 + struct equation_coefs coef;
4792 + int tj_t;
4793 +- int id; /* thermal channel id */
4794 ++ unsigned int id; /* thermal channel id */
4795 + };
4796 +
4797 + struct rcar_gen3_thermal_priv {
4798 +@@ -310,7 +310,8 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
4799 + const int *ths_tj_1 = of_device_get_match_data(dev);
4800 + struct resource *res;
4801 + struct thermal_zone_device *zone;
4802 +- int ret, i;
4803 ++ unsigned int i;
4804 ++ int ret;
4805 +
4806 + /* default values if FUSEs are missing */
4807 + /* TODO: Read values from hardware on supported platforms */
4808 +@@ -376,7 +377,7 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
4809 + if (ret < 0)
4810 + goto error_unregister;
4811 +
4812 +- dev_info(dev, "TSC%d: Loaded %d trip points\n", i, ret);
4813 ++ dev_info(dev, "TSC%u: Loaded %d trip points\n", i, ret);
4814 + }
4815 +
4816 + priv->num_tscs = i;
4817 +diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
4818 +index e9a90bc23b11d..f4ab4c5b4b626 100644
4819 +--- a/drivers/thermal/samsung/exynos_tmu.c
4820 ++++ b/drivers/thermal/samsung/exynos_tmu.c
4821 +@@ -1073,6 +1073,7 @@ static int exynos_tmu_probe(struct platform_device *pdev)
4822 + data->sclk = devm_clk_get(&pdev->dev, "tmu_sclk");
4823 + if (IS_ERR(data->sclk)) {
4824 + dev_err(&pdev->dev, "Failed to get sclk\n");
4825 ++ ret = PTR_ERR(data->sclk);
4826 + goto err_clk;
4827 + } else {
4828 + ret = clk_prepare_enable(data->sclk);
4829 +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
4830 +index ef981d3b7bb49..cb72393f92d3a 100644
4831 +--- a/drivers/tty/vt/vt.c
4832 ++++ b/drivers/tty/vt/vt.c
4833 +@@ -2059,7 +2059,7 @@ static void restore_cur(struct vc_data *vc)
4834 +
4835 + enum { ESnormal, ESesc, ESsquare, ESgetpars, ESfunckey,
4836 + EShash, ESsetG0, ESsetG1, ESpercent, EScsiignore, ESnonstd,
4837 +- ESpalette, ESosc };
4838 ++ ESpalette, ESosc, ESapc, ESpm, ESdcs };
4839 +
4840 + /* console_lock is held (except via vc_init()) */
4841 + static void reset_terminal(struct vc_data *vc, int do_clear)
4842 +@@ -2133,20 +2133,28 @@ static void vc_setGx(struct vc_data *vc, unsigned int which, int c)
4843 + vc->vc_translate = set_translate(*charset, vc);
4844 + }
4845 +
4846 ++/* is this state an ANSI control string? */
4847 ++static bool ansi_control_string(unsigned int state)
4848 ++{
4849 ++ if (state == ESosc || state == ESapc || state == ESpm || state == ESdcs)
4850 ++ return true;
4851 ++ return false;
4852 ++}
4853 ++
4854 + /* console_lock is held */
4855 + static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
4856 + {
4857 + /*
4858 + * Control characters can be used in the _middle_
4859 +- * of an escape sequence.
4860 ++ * of an escape sequence, aside from ANSI control strings.
4861 + */
4862 +- if (vc->vc_state == ESosc && c>=8 && c<=13) /* ... except for OSC */
4863 ++ if (ansi_control_string(vc->vc_state) && c >= 8 && c <= 13)
4864 + return;
4865 + switch (c) {
4866 + case 0:
4867 + return;
4868 + case 7:
4869 +- if (vc->vc_state == ESosc)
4870 ++ if (ansi_control_string(vc->vc_state))
4871 + vc->vc_state = ESnormal;
4872 + else if (vc->vc_bell_duration)
4873 + kd_mksound(vc->vc_bell_pitch, vc->vc_bell_duration);
4874 +@@ -2207,6 +2215,12 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
4875 + case ']':
4876 + vc->vc_state = ESnonstd;
4877 + return;
4878 ++ case '_':
4879 ++ vc->vc_state = ESapc;
4880 ++ return;
4881 ++ case '^':
4882 ++ vc->vc_state = ESpm;
4883 ++ return;
4884 + case '%':
4885 + vc->vc_state = ESpercent;
4886 + return;
4887 +@@ -2224,6 +2238,9 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
4888 + if (vc->state.x < VC_TABSTOPS_COUNT)
4889 + set_bit(vc->state.x, vc->vc_tab_stop);
4890 + return;
4891 ++ case 'P':
4892 ++ vc->vc_state = ESdcs;
4893 ++ return;
4894 + case 'Z':
4895 + respond_ID(tty);
4896 + return;
4897 +@@ -2520,8 +2537,14 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
4898 + vc_setGx(vc, 1, c);
4899 + vc->vc_state = ESnormal;
4900 + return;
4901 ++ case ESapc:
4902 ++ return;
4903 + case ESosc:
4904 + return;
4905 ++ case ESpm:
4906 ++ return;
4907 ++ case ESdcs:
4908 ++ return;
4909 + default:
4910 + vc->vc_state = ESnormal;
4911 + }
4912 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
4913 +index 0ba98e08a0290..50e12989e84a1 100644
4914 +--- a/fs/btrfs/ioctl.c
4915 ++++ b/fs/btrfs/ioctl.c
4916 +@@ -3205,6 +3205,8 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
4917 + struct inode *inode = file_inode(file);
4918 + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4919 + struct btrfs_ioctl_vol_args_v2 *vol_args;
4920 ++ struct block_device *bdev = NULL;
4921 ++ fmode_t mode;
4922 + int ret;
4923 + bool cancel = false;
4924 +
4925 +@@ -3237,9 +3239,9 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
4926 + /* Exclusive operation is now claimed */
4927 +
4928 + if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID)
4929 +- ret = btrfs_rm_device(fs_info, NULL, vol_args->devid);
4930 ++ ret = btrfs_rm_device(fs_info, NULL, vol_args->devid, &bdev, &mode);
4931 + else
4932 +- ret = btrfs_rm_device(fs_info, vol_args->name, 0);
4933 ++ ret = btrfs_rm_device(fs_info, vol_args->name, 0, &bdev, &mode);
4934 +
4935 + btrfs_exclop_finish(fs_info);
4936 +
4937 +@@ -3255,6 +3257,8 @@ out:
4938 + kfree(vol_args);
4939 + err_drop:
4940 + mnt_drop_write_file(file);
4941 ++ if (bdev)
4942 ++ blkdev_put(bdev, mode);
4943 + return ret;
4944 + }
4945 +
4946 +@@ -3263,6 +3267,8 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
4947 + struct inode *inode = file_inode(file);
4948 + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4949 + struct btrfs_ioctl_vol_args *vol_args;
4950 ++ struct block_device *bdev = NULL;
4951 ++ fmode_t mode;
4952 + int ret;
4953 + bool cancel;
4954 +
4955 +@@ -3284,7 +3290,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
4956 + ret = exclop_start_or_cancel_reloc(fs_info, BTRFS_EXCLOP_DEV_REMOVE,
4957 + cancel);
4958 + if (ret == 0) {
4959 +- ret = btrfs_rm_device(fs_info, vol_args->name, 0);
4960 ++ ret = btrfs_rm_device(fs_info, vol_args->name, 0, &bdev, &mode);
4961 + if (!ret)
4962 + btrfs_info(fs_info, "disk deleted %s", vol_args->name);
4963 + btrfs_exclop_finish(fs_info);
4964 +@@ -3293,7 +3299,8 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
4965 + kfree(vol_args);
4966 + out_drop_write:
4967 + mnt_drop_write_file(file);
4968 +-
4969 ++ if (bdev)
4970 ++ blkdev_put(bdev, mode);
4971 + return ret;
4972 + }
4973 +
4974 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
4975 +index 10dd2d210b0f4..682416d4edefa 100644
4976 +--- a/fs/btrfs/volumes.c
4977 ++++ b/fs/btrfs/volumes.c
4978 +@@ -570,6 +570,8 @@ static int btrfs_free_stale_devices(const char *path,
4979 + struct btrfs_device *device, *tmp_device;
4980 + int ret = 0;
4981 +
4982 ++ lockdep_assert_held(&uuid_mutex);
4983 ++
4984 + if (path)
4985 + ret = -ENOENT;
4986 +
4987 +@@ -1000,11 +1002,12 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
4988 + struct btrfs_device *orig_dev;
4989 + int ret = 0;
4990 +
4991 ++ lockdep_assert_held(&uuid_mutex);
4992 ++
4993 + fs_devices = alloc_fs_devices(orig->fsid, NULL);
4994 + if (IS_ERR(fs_devices))
4995 + return fs_devices;
4996 +
4997 +- mutex_lock(&orig->device_list_mutex);
4998 + fs_devices->total_devices = orig->total_devices;
4999 +
5000 + list_for_each_entry(orig_dev, &orig->devices, dev_list) {
5001 +@@ -1036,10 +1039,8 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
5002 + device->fs_devices = fs_devices;
5003 + fs_devices->num_devices++;
5004 + }
5005 +- mutex_unlock(&orig->device_list_mutex);
5006 + return fs_devices;
5007 + error:
5008 +- mutex_unlock(&orig->device_list_mutex);
5009 + free_fs_devices(fs_devices);
5010 + return ERR_PTR(ret);
5011 + }
5012 +@@ -1928,15 +1929,17 @@ out:
5013 + * Function to update ctime/mtime for a given device path.
5014 + * Mainly used for ctime/mtime based probe like libblkid.
5015 + */
5016 +-static void update_dev_time(const char *path_name)
5017 ++static void update_dev_time(struct block_device *bdev)
5018 + {
5019 +- struct file *filp;
5020 ++ struct inode *inode = bdev->bd_inode;
5021 ++ struct timespec64 now;
5022 +
5023 +- filp = filp_open(path_name, O_RDWR, 0);
5024 +- if (IS_ERR(filp))
5025 ++ /* Shouldn't happen but just in case. */
5026 ++ if (!inode)
5027 + return;
5028 +- file_update_time(filp);
5029 +- filp_close(filp, NULL);
5030 ++
5031 ++ now = current_time(inode);
5032 ++ generic_update_time(inode, &now, S_MTIME | S_CTIME);
5033 + }
5034 +
5035 + static int btrfs_rm_dev_item(struct btrfs_device *device)
5036 +@@ -2116,11 +2119,11 @@ void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
5037 + btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
5038 +
5039 + /* Update ctime/mtime for device path for libblkid */
5040 +- update_dev_time(device_path);
5041 ++ update_dev_time(bdev);
5042 + }
5043 +
5044 + int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
5045 +- u64 devid)
5046 ++ u64 devid, struct block_device **bdev, fmode_t *mode)
5047 + {
5048 + struct btrfs_device *device;
5049 + struct btrfs_fs_devices *cur_devices;
5050 +@@ -2234,15 +2237,26 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
5051 + mutex_unlock(&fs_devices->device_list_mutex);
5052 +
5053 + /*
5054 +- * at this point, the device is zero sized and detached from
5055 +- * the devices list. All that's left is to zero out the old
5056 +- * supers and free the device.
5057 ++ * At this point, the device is zero sized and detached from the
5058 ++ * devices list. All that's left is to zero out the old supers and
5059 ++ * free the device.
5060 ++ *
5061 ++ * We cannot call btrfs_close_bdev() here because we're holding the sb
5062 ++ * write lock, and blkdev_put() will pull in the ->open_mutex on the
5063 ++ * block device and it's dependencies. Instead just flush the device
5064 ++ * and let the caller do the final blkdev_put.
5065 + */
5066 +- if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
5067 ++ if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
5068 + btrfs_scratch_superblocks(fs_info, device->bdev,
5069 + device->name->str);
5070 ++ if (device->bdev) {
5071 ++ sync_blockdev(device->bdev);
5072 ++ invalidate_bdev(device->bdev);
5073 ++ }
5074 ++ }
5075 +
5076 +- btrfs_close_bdev(device);
5077 ++ *bdev = device->bdev;
5078 ++ *mode = device->mode;
5079 + synchronize_rcu();
5080 + btrfs_free_device(device);
5081 +
5082 +@@ -2769,7 +2783,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
5083 + btrfs_forget_devices(device_path);
5084 +
5085 + /* Update ctime/mtime for blkid or udev */
5086 +- update_dev_time(device_path);
5087 ++ update_dev_time(bdev);
5088 +
5089 + return ret;
5090 +
5091 +diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
5092 +index 55a8ba244716b..f77f869dfd2cf 100644
5093 +--- a/fs/btrfs/volumes.h
5094 ++++ b/fs/btrfs/volumes.h
5095 +@@ -472,7 +472,8 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
5096 + const u8 *uuid);
5097 + void btrfs_free_device(struct btrfs_device *device);
5098 + int btrfs_rm_device(struct btrfs_fs_info *fs_info,
5099 +- const char *device_path, u64 devid);
5100 ++ const char *device_path, u64 devid,
5101 ++ struct block_device **bdev, fmode_t *mode);
5102 + void __exit btrfs_cleanup_fs_uuids(void);
5103 + int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len);
5104 + int btrfs_grow_device(struct btrfs_trans_handle *trans,
5105 +diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
5106 +index ba562efdf07b8..3296a93be907c 100644
5107 +--- a/fs/ceph/caps.c
5108 ++++ b/fs/ceph/caps.c
5109 +@@ -1859,6 +1859,8 @@ static u64 __mark_caps_flushing(struct inode *inode,
5110 + * try to invalidate mapping pages without blocking.
5111 + */
5112 + static int try_nonblocking_invalidate(struct inode *inode)
5113 ++ __releases(ci->i_ceph_lock)
5114 ++ __acquires(ci->i_ceph_lock)
5115 + {
5116 + struct ceph_inode_info *ci = ceph_inode(inode);
5117 + u32 invalidating_gen = ci->i_rdcache_gen;
5118 +@@ -3117,7 +3119,16 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
5119 + break;
5120 + }
5121 + }
5122 +- BUG_ON(!found);
5123 ++
5124 ++ if (!found) {
5125 ++ /*
5126 ++ * The capsnap should already be removed when removing
5127 ++ * auth cap in the case of a forced unmount.
5128 ++ */
5129 ++ WARN_ON_ONCE(ci->i_auth_cap);
5130 ++ goto unlock;
5131 ++ }
5132 ++
5133 + capsnap->dirty_pages -= nr;
5134 + if (capsnap->dirty_pages == 0) {
5135 + complete_capsnap = true;
5136 +@@ -3139,6 +3150,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
5137 + complete_capsnap ? " (complete capsnap)" : "");
5138 + }
5139 +
5140 ++unlock:
5141 + spin_unlock(&ci->i_ceph_lock);
5142 +
5143 + if (last) {
5144 +@@ -3609,6 +3621,43 @@ out:
5145 + iput(inode);
5146 + }
5147 +
5148 ++void __ceph_remove_capsnap(struct inode *inode, struct ceph_cap_snap *capsnap,
5149 ++ bool *wake_ci, bool *wake_mdsc)
5150 ++{
5151 ++ struct ceph_inode_info *ci = ceph_inode(inode);
5152 ++ struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
5153 ++ bool ret;
5154 ++
5155 ++ lockdep_assert_held(&ci->i_ceph_lock);
5156 ++
5157 ++ dout("removing capsnap %p, inode %p ci %p\n", capsnap, inode, ci);
5158 ++
5159 ++ list_del_init(&capsnap->ci_item);
5160 ++ ret = __detach_cap_flush_from_ci(ci, &capsnap->cap_flush);
5161 ++ if (wake_ci)
5162 ++ *wake_ci = ret;
5163 ++
5164 ++ spin_lock(&mdsc->cap_dirty_lock);
5165 ++ if (list_empty(&ci->i_cap_flush_list))
5166 ++ list_del_init(&ci->i_flushing_item);
5167 ++
5168 ++ ret = __detach_cap_flush_from_mdsc(mdsc, &capsnap->cap_flush);
5169 ++ if (wake_mdsc)
5170 ++ *wake_mdsc = ret;
5171 ++ spin_unlock(&mdsc->cap_dirty_lock);
5172 ++}
5173 ++
5174 ++void ceph_remove_capsnap(struct inode *inode, struct ceph_cap_snap *capsnap,
5175 ++ bool *wake_ci, bool *wake_mdsc)
5176 ++{
5177 ++ struct ceph_inode_info *ci = ceph_inode(inode);
5178 ++
5179 ++ lockdep_assert_held(&ci->i_ceph_lock);
5180 ++
5181 ++ WARN_ON_ONCE(capsnap->dirty_pages || capsnap->writing);
5182 ++ __ceph_remove_capsnap(inode, capsnap, wake_ci, wake_mdsc);
5183 ++}
5184 ++
5185 + /*
5186 + * Handle FLUSHSNAP_ACK. MDS has flushed snap data to disk and we can
5187 + * throw away our cap_snap.
5188 +@@ -3646,23 +3695,10 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
5189 + capsnap, capsnap->follows);
5190 + }
5191 + }
5192 +- if (flushed) {
5193 +- WARN_ON(capsnap->dirty_pages || capsnap->writing);
5194 +- dout(" removing %p cap_snap %p follows %lld\n",
5195 +- inode, capsnap, follows);
5196 +- list_del(&capsnap->ci_item);
5197 +- wake_ci |= __detach_cap_flush_from_ci(ci, &capsnap->cap_flush);
5198 +-
5199 +- spin_lock(&mdsc->cap_dirty_lock);
5200 +-
5201 +- if (list_empty(&ci->i_cap_flush_list))
5202 +- list_del_init(&ci->i_flushing_item);
5203 +-
5204 +- wake_mdsc |= __detach_cap_flush_from_mdsc(mdsc,
5205 +- &capsnap->cap_flush);
5206 +- spin_unlock(&mdsc->cap_dirty_lock);
5207 +- }
5208 ++ if (flushed)
5209 ++ ceph_remove_capsnap(inode, capsnap, &wake_ci, &wake_mdsc);
5210 + spin_unlock(&ci->i_ceph_lock);
5211 ++
5212 + if (flushed) {
5213 + ceph_put_snap_context(capsnap->context);
5214 + ceph_put_cap_snap(capsnap);
5215 +@@ -4137,8 +4173,9 @@ void ceph_handle_caps(struct ceph_mds_session *session,
5216 + done:
5217 + mutex_unlock(&session->s_mutex);
5218 + done_unlocked:
5219 +- ceph_put_string(extra_info.pool_ns);
5220 + iput(inode);
5221 ++out:
5222 ++ ceph_put_string(extra_info.pool_ns);
5223 + return;
5224 +
5225 + flush_cap_releases:
5226 +@@ -4153,7 +4190,7 @@ flush_cap_releases:
5227 + bad:
5228 + pr_err("ceph_handle_caps: corrupt message\n");
5229 + ceph_msg_dump(msg);
5230 +- return;
5231 ++ goto out;
5232 + }
5233 +
5234 + /*
5235 +diff --git a/fs/ceph/file.c b/fs/ceph/file.c
5236 +index d1755ac1d964a..3daebfaec8c6d 100644
5237 +--- a/fs/ceph/file.c
5238 ++++ b/fs/ceph/file.c
5239 +@@ -1722,32 +1722,26 @@ retry_snap:
5240 + goto out;
5241 + }
5242 +
5243 +- err = file_remove_privs(file);
5244 +- if (err)
5245 ++ down_read(&osdc->lock);
5246 ++ map_flags = osdc->osdmap->flags;
5247 ++ pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id);
5248 ++ up_read(&osdc->lock);
5249 ++ if ((map_flags & CEPH_OSDMAP_FULL) ||
5250 ++ (pool_flags & CEPH_POOL_FLAG_FULL)) {
5251 ++ err = -ENOSPC;
5252 + goto out;
5253 ++ }
5254 +
5255 +- err = file_update_time(file);
5256 ++ err = file_remove_privs(file);
5257 + if (err)
5258 + goto out;
5259 +
5260 +- inode_inc_iversion_raw(inode);
5261 +-
5262 + if (ci->i_inline_version != CEPH_INLINE_NONE) {
5263 + err = ceph_uninline_data(file, NULL);
5264 + if (err < 0)
5265 + goto out;
5266 + }
5267 +
5268 +- down_read(&osdc->lock);
5269 +- map_flags = osdc->osdmap->flags;
5270 +- pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id);
5271 +- up_read(&osdc->lock);
5272 +- if ((map_flags & CEPH_OSDMAP_FULL) ||
5273 +- (pool_flags & CEPH_POOL_FLAG_FULL)) {
5274 +- err = -ENOSPC;
5275 +- goto out;
5276 +- }
5277 +-
5278 + dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
5279 + inode, ceph_vinop(inode), pos, count, i_size_read(inode));
5280 + if (fi->fmode & CEPH_FILE_MODE_LAZY)
5281 +@@ -1759,6 +1753,12 @@ retry_snap:
5282 + if (err < 0)
5283 + goto out;
5284 +
5285 ++ err = file_update_time(file);
5286 ++ if (err)
5287 ++ goto out_caps;
5288 ++
5289 ++ inode_inc_iversion_raw(inode);
5290 ++
5291 + dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
5292 + inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
5293 +
5294 +@@ -1842,6 +1842,8 @@ retry_snap:
5295 + }
5296 +
5297 + goto out_unlocked;
5298 ++out_caps:
5299 ++ ceph_put_cap_refs(ci, got);
5300 + out:
5301 + if (direct_lock)
5302 + ceph_end_io_direct(inode);
5303 +diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
5304 +index 0b69aec23e5c4..52b3ddc5f1991 100644
5305 +--- a/fs/ceph/mds_client.c
5306 ++++ b/fs/ceph/mds_client.c
5307 +@@ -1583,14 +1583,39 @@ out:
5308 + return ret;
5309 + }
5310 +
5311 ++static int remove_capsnaps(struct ceph_mds_client *mdsc, struct inode *inode)
5312 ++{
5313 ++ struct ceph_inode_info *ci = ceph_inode(inode);
5314 ++ struct ceph_cap_snap *capsnap;
5315 ++ int capsnap_release = 0;
5316 ++
5317 ++ lockdep_assert_held(&ci->i_ceph_lock);
5318 ++
5319 ++ dout("removing capsnaps, ci is %p, inode is %p\n", ci, inode);
5320 ++
5321 ++ while (!list_empty(&ci->i_cap_snaps)) {
5322 ++ capsnap = list_first_entry(&ci->i_cap_snaps,
5323 ++ struct ceph_cap_snap, ci_item);
5324 ++ __ceph_remove_capsnap(inode, capsnap, NULL, NULL);
5325 ++ ceph_put_snap_context(capsnap->context);
5326 ++ ceph_put_cap_snap(capsnap);
5327 ++ capsnap_release++;
5328 ++ }
5329 ++ wake_up_all(&ci->i_cap_wq);
5330 ++ wake_up_all(&mdsc->cap_flushing_wq);
5331 ++ return capsnap_release;
5332 ++}
5333 ++
5334 + static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
5335 + void *arg)
5336 + {
5337 + struct ceph_fs_client *fsc = (struct ceph_fs_client *)arg;
5338 ++ struct ceph_mds_client *mdsc = fsc->mdsc;
5339 + struct ceph_inode_info *ci = ceph_inode(inode);
5340 + LIST_HEAD(to_remove);
5341 + bool dirty_dropped = false;
5342 + bool invalidate = false;
5343 ++ int capsnap_release = 0;
5344 +
5345 + dout("removing cap %p, ci is %p, inode is %p\n",
5346 + cap, ci, &ci->vfs_inode);
5347 +@@ -1598,7 +1623,6 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
5348 + __ceph_remove_cap(cap, false);
5349 + if (!ci->i_auth_cap) {
5350 + struct ceph_cap_flush *cf;
5351 +- struct ceph_mds_client *mdsc = fsc->mdsc;
5352 +
5353 + if (READ_ONCE(fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) {
5354 + if (inode->i_data.nrpages > 0)
5355 +@@ -1662,6 +1686,9 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
5356 + list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
5357 + ci->i_prealloc_cap_flush = NULL;
5358 + }
5359 ++
5360 ++ if (!list_empty(&ci->i_cap_snaps))
5361 ++ capsnap_release = remove_capsnaps(mdsc, inode);
5362 + }
5363 + spin_unlock(&ci->i_ceph_lock);
5364 + while (!list_empty(&to_remove)) {
5365 +@@ -1678,6 +1705,8 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
5366 + ceph_queue_invalidate(inode);
5367 + if (dirty_dropped)
5368 + iput(inode);
5369 ++ while (capsnap_release--)
5370 ++ iput(inode);
5371 + return 0;
5372 + }
5373 +
5374 +@@ -4912,7 +4941,6 @@ void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
5375 +
5376 + ceph_metric_destroy(&mdsc->metric);
5377 +
5378 +- flush_delayed_work(&mdsc->metric.delayed_work);
5379 + fsc->mdsc = NULL;
5380 + kfree(mdsc);
5381 + dout("mdsc_destroy %p done\n", mdsc);
5382 +diff --git a/fs/ceph/metric.c b/fs/ceph/metric.c
5383 +index 5ac151eb0d498..04d5df29bbbfb 100644
5384 +--- a/fs/ceph/metric.c
5385 ++++ b/fs/ceph/metric.c
5386 +@@ -302,6 +302,8 @@ void ceph_metric_destroy(struct ceph_client_metric *m)
5387 + if (!m)
5388 + return;
5389 +
5390 ++ cancel_delayed_work_sync(&m->delayed_work);
5391 ++
5392 + percpu_counter_destroy(&m->total_inodes);
5393 + percpu_counter_destroy(&m->opened_inodes);
5394 + percpu_counter_destroy(&m->i_caps_mis);
5395 +@@ -309,8 +311,6 @@ void ceph_metric_destroy(struct ceph_client_metric *m)
5396 + percpu_counter_destroy(&m->d_lease_mis);
5397 + percpu_counter_destroy(&m->d_lease_hit);
5398 +
5399 +- cancel_delayed_work_sync(&m->delayed_work);
5400 +-
5401 + ceph_put_mds_session(m->session);
5402 + }
5403 +
5404 +diff --git a/fs/ceph/super.h b/fs/ceph/super.h
5405 +index b1a363641beb6..2200ed76b1230 100644
5406 +--- a/fs/ceph/super.h
5407 ++++ b/fs/ceph/super.h
5408 +@@ -1163,6 +1163,12 @@ extern void ceph_put_cap_refs_no_check_caps(struct ceph_inode_info *ci,
5409 + int had);
5410 + extern void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
5411 + struct ceph_snap_context *snapc);
5412 ++extern void __ceph_remove_capsnap(struct inode *inode,
5413 ++ struct ceph_cap_snap *capsnap,
5414 ++ bool *wake_ci, bool *wake_mdsc);
5415 ++extern void ceph_remove_capsnap(struct inode *inode,
5416 ++ struct ceph_cap_snap *capsnap,
5417 ++ bool *wake_ci, bool *wake_mdsc);
5418 + extern void ceph_flush_snaps(struct ceph_inode_info *ci,
5419 + struct ceph_mds_session **psession);
5420 + extern bool __ceph_should_report_size(struct ceph_inode_info *ci);
5421 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
5422 +index 2dfd0d8297eb3..1b9de38a136aa 100644
5423 +--- a/fs/cifs/smb2ops.c
5424 ++++ b/fs/cifs/smb2ops.c
5425 +@@ -689,13 +689,19 @@ smb2_close_cached_fid(struct kref *ref)
5426 + cifs_dbg(FYI, "clear cached root file handle\n");
5427 + SMB2_close(0, cfid->tcon, cfid->fid->persistent_fid,
5428 + cfid->fid->volatile_fid);
5429 +- cfid->is_valid = false;
5430 +- cfid->file_all_info_is_valid = false;
5431 +- cfid->has_lease = false;
5432 +- if (cfid->dentry) {
5433 +- dput(cfid->dentry);
5434 +- cfid->dentry = NULL;
5435 +- }
5436 ++ }
5437 ++
5438 ++ /*
5439 ++ * We only check validity above to send SMB2_close,
5440 ++ * but we still need to invalidate these entries
5441 ++ * when this function is called
5442 ++ */
5443 ++ cfid->is_valid = false;
5444 ++ cfid->file_all_info_is_valid = false;
5445 ++ cfid->has_lease = false;
5446 ++ if (cfid->dentry) {
5447 ++ dput(cfid->dentry);
5448 ++ cfid->dentry = NULL;
5449 + }
5450 + }
5451 +
5452 +diff --git a/fs/coredump.c b/fs/coredump.c
5453 +index 07afb5ddb1c4e..19fe5312c10f3 100644
5454 +--- a/fs/coredump.c
5455 ++++ b/fs/coredump.c
5456 +@@ -1127,8 +1127,10 @@ int dump_vma_snapshot(struct coredump_params *cprm, int *vma_count,
5457 +
5458 + mmap_write_unlock(mm);
5459 +
5460 +- if (WARN_ON(i != *vma_count))
5461 ++ if (WARN_ON(i != *vma_count)) {
5462 ++ kvfree(*vma_meta);
5463 + return -EFAULT;
5464 ++ }
5465 +
5466 + *vma_data_size_ptr = vma_data_size;
5467 + return 0;
5468 +diff --git a/fs/io_uring.c b/fs/io_uring.c
5469 +index 43aaa35664315..754d59f734d84 100644
5470 +--- a/fs/io_uring.c
5471 ++++ b/fs/io_uring.c
5472 +@@ -10335,7 +10335,7 @@ static int __init io_uring_init(void)
5473 + BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8));
5474 +
5475 + BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
5476 +- BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
5477 ++ BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(int));
5478 +
5479 + req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
5480 + SLAB_ACCOUNT);
5481 +diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c
5482 +index 68e8d61e28dd5..62f8a7ac19c85 100644
5483 +--- a/fs/nilfs2/sysfs.c
5484 ++++ b/fs/nilfs2/sysfs.c
5485 +@@ -51,11 +51,9 @@ static const struct sysfs_ops nilfs_##name##_attr_ops = { \
5486 + #define NILFS_DEV_INT_GROUP_TYPE(name, parent_name) \
5487 + static void nilfs_##name##_attr_release(struct kobject *kobj) \
5488 + { \
5489 +- struct nilfs_sysfs_##parent_name##_subgroups *subgroups; \
5490 +- struct the_nilfs *nilfs = container_of(kobj->parent, \
5491 +- struct the_nilfs, \
5492 +- ns_##parent_name##_kobj); \
5493 +- subgroups = nilfs->ns_##parent_name##_subgroups; \
5494 ++ struct nilfs_sysfs_##parent_name##_subgroups *subgroups = container_of(kobj, \
5495 ++ struct nilfs_sysfs_##parent_name##_subgroups, \
5496 ++ sg_##name##_kobj); \
5497 + complete(&subgroups->sg_##name##_kobj_unregister); \
5498 + } \
5499 + static struct kobj_type nilfs_##name##_ktype = { \
5500 +@@ -81,12 +79,12 @@ static int nilfs_sysfs_create_##name##_group(struct the_nilfs *nilfs) \
5501 + err = kobject_init_and_add(kobj, &nilfs_##name##_ktype, parent, \
5502 + #name); \
5503 + if (err) \
5504 +- return err; \
5505 +- return 0; \
5506 ++ kobject_put(kobj); \
5507 ++ return err; \
5508 + } \
5509 + static void nilfs_sysfs_delete_##name##_group(struct the_nilfs *nilfs) \
5510 + { \
5511 +- kobject_del(&nilfs->ns_##parent_name##_subgroups->sg_##name##_kobj); \
5512 ++ kobject_put(&nilfs->ns_##parent_name##_subgroups->sg_##name##_kobj); \
5513 + }
5514 +
5515 + /************************************************************************
5516 +@@ -197,14 +195,14 @@ int nilfs_sysfs_create_snapshot_group(struct nilfs_root *root)
5517 + }
5518 +
5519 + if (err)
5520 +- return err;
5521 ++ kobject_put(&root->snapshot_kobj);
5522 +
5523 +- return 0;
5524 ++ return err;
5525 + }
5526 +
5527 + void nilfs_sysfs_delete_snapshot_group(struct nilfs_root *root)
5528 + {
5529 +- kobject_del(&root->snapshot_kobj);
5530 ++ kobject_put(&root->snapshot_kobj);
5531 + }
5532 +
5533 + /************************************************************************
5534 +@@ -986,7 +984,7 @@ int nilfs_sysfs_create_device_group(struct super_block *sb)
5535 + err = kobject_init_and_add(&nilfs->ns_dev_kobj, &nilfs_dev_ktype, NULL,
5536 + "%s", sb->s_id);
5537 + if (err)
5538 +- goto free_dev_subgroups;
5539 ++ goto cleanup_dev_kobject;
5540 +
5541 + err = nilfs_sysfs_create_mounted_snapshots_group(nilfs);
5542 + if (err)
5543 +@@ -1023,9 +1021,7 @@ delete_mounted_snapshots_group:
5544 + nilfs_sysfs_delete_mounted_snapshots_group(nilfs);
5545 +
5546 + cleanup_dev_kobject:
5547 +- kobject_del(&nilfs->ns_dev_kobj);
5548 +-
5549 +-free_dev_subgroups:
5550 ++ kobject_put(&nilfs->ns_dev_kobj);
5551 + kfree(nilfs->ns_dev_subgroups);
5552 +
5553 + failed_create_device_group:
5554 +diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
5555 +index 8b7b01a380cea..c8bfc01da5d71 100644
5556 +--- a/fs/nilfs2/the_nilfs.c
5557 ++++ b/fs/nilfs2/the_nilfs.c
5558 +@@ -792,14 +792,13 @@ nilfs_find_or_create_root(struct the_nilfs *nilfs, __u64 cno)
5559 +
5560 + void nilfs_put_root(struct nilfs_root *root)
5561 + {
5562 +- if (refcount_dec_and_test(&root->count)) {
5563 +- struct the_nilfs *nilfs = root->nilfs;
5564 ++ struct the_nilfs *nilfs = root->nilfs;
5565 +
5566 +- nilfs_sysfs_delete_snapshot_group(root);
5567 +-
5568 +- spin_lock(&nilfs->ns_cptree_lock);
5569 ++ if (refcount_dec_and_lock(&root->count, &nilfs->ns_cptree_lock)) {
5570 + rb_erase(&root->rb_node, &nilfs->ns_cptree);
5571 + spin_unlock(&nilfs->ns_cptree_lock);
5572 ++
5573 ++ nilfs_sysfs_delete_snapshot_group(root);
5574 + iput(root->ifile);
5575 +
5576 + kfree(root);
5577 +diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
5578 +index 4f72b47973c30..2f909ed084c63 100644
5579 +--- a/include/linux/cacheinfo.h
5580 ++++ b/include/linux/cacheinfo.h
5581 +@@ -79,24 +79,6 @@ struct cpu_cacheinfo {
5582 + bool cpu_map_populated;
5583 + };
5584 +
5585 +-/*
5586 +- * Helpers to make sure "func" is executed on the cpu whose cache
5587 +- * attributes are being detected
5588 +- */
5589 +-#define DEFINE_SMP_CALL_CACHE_FUNCTION(func) \
5590 +-static inline void _##func(void *ret) \
5591 +-{ \
5592 +- int cpu = smp_processor_id(); \
5593 +- *(int *)ret = __##func(cpu); \
5594 +-} \
5595 +- \
5596 +-int func(unsigned int cpu) \
5597 +-{ \
5598 +- int ret; \
5599 +- smp_call_function_single(cpu, _##func, &ret, true); \
5600 +- return ret; \
5601 +-}
5602 +-
5603 + struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu);
5604 + int init_cache_level(unsigned int cpu);
5605 + int populate_cache_leaves(unsigned int cpu);
5606 +diff --git a/include/linux/thermal.h b/include/linux/thermal.h
5607 +index d296f3b88fb98..8050d929a5b45 100644
5608 +--- a/include/linux/thermal.h
5609 ++++ b/include/linux/thermal.h
5610 +@@ -404,12 +404,13 @@ static inline void thermal_zone_device_unregister(
5611 + struct thermal_zone_device *tz)
5612 + { }
5613 + static inline struct thermal_cooling_device *
5614 +-thermal_cooling_device_register(char *type, void *devdata,
5615 ++thermal_cooling_device_register(const char *type, void *devdata,
5616 + const struct thermal_cooling_device_ops *ops)
5617 + { return ERR_PTR(-ENODEV); }
5618 + static inline struct thermal_cooling_device *
5619 + thermal_of_cooling_device_register(struct device_node *np,
5620 +- char *type, void *devdata, const struct thermal_cooling_device_ops *ops)
5621 ++ const char *type, void *devdata,
5622 ++ const struct thermal_cooling_device_ops *ops)
5623 + { return ERR_PTR(-ENODEV); }
5624 + static inline struct thermal_cooling_device *
5625 + devm_thermal_of_cooling_device_register(struct device *dev,
5626 +diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h
5627 +index a47a731e45277..b4b681b81df81 100644
5628 +--- a/include/uapi/misc/habanalabs.h
5629 ++++ b/include/uapi/misc/habanalabs.h
5630 +@@ -276,7 +276,9 @@ enum hl_device_status {
5631 + HL_DEVICE_STATUS_OPERATIONAL,
5632 + HL_DEVICE_STATUS_IN_RESET,
5633 + HL_DEVICE_STATUS_MALFUNCTION,
5634 +- HL_DEVICE_STATUS_NEEDS_RESET
5635 ++ HL_DEVICE_STATUS_NEEDS_RESET,
5636 ++ HL_DEVICE_STATUS_IN_DEVICE_CREATION,
5637 ++ HL_DEVICE_STATUS_LAST = HL_DEVICE_STATUS_IN_DEVICE_CREATION
5638 + };
5639 +
5640 + /* Opcode for management ioctl
5641 +diff --git a/init/initramfs.c b/init/initramfs.c
5642 +index af27abc596436..a842c05447456 100644
5643 +--- a/init/initramfs.c
5644 ++++ b/init/initramfs.c
5645 +@@ -15,6 +15,7 @@
5646 + #include <linux/mm.h>
5647 + #include <linux/namei.h>
5648 + #include <linux/init_syscalls.h>
5649 ++#include <linux/umh.h>
5650 +
5651 + static ssize_t __init xwrite(struct file *file, const char *p, size_t count,
5652 + loff_t *pos)
5653 +@@ -727,6 +728,7 @@ static int __init populate_rootfs(void)
5654 + {
5655 + initramfs_cookie = async_schedule_domain(do_populate_rootfs, NULL,
5656 + &initramfs_domain);
5657 ++ usermodehelper_enable();
5658 + if (!initramfs_async)
5659 + wait_for_initramfs();
5660 + return 0;
5661 +diff --git a/init/main.c b/init/main.c
5662 +index 8d97aba78c3ad..90733a916791f 100644
5663 +--- a/init/main.c
5664 ++++ b/init/main.c
5665 +@@ -1392,7 +1392,6 @@ static void __init do_basic_setup(void)
5666 + driver_init();
5667 + init_irq_proc();
5668 + do_ctors();
5669 +- usermodehelper_enable();
5670 + do_initcalls();
5671 + }
5672 +
5673 +diff --git a/init/noinitramfs.c b/init/noinitramfs.c
5674 +index 3d62b07f3bb9c..d1d26b93d25cd 100644
5675 +--- a/init/noinitramfs.c
5676 ++++ b/init/noinitramfs.c
5677 +@@ -10,6 +10,7 @@
5678 + #include <linux/kdev_t.h>
5679 + #include <linux/syscalls.h>
5680 + #include <linux/init_syscalls.h>
5681 ++#include <linux/umh.h>
5682 +
5683 + /*
5684 + * Create a simple rootfs that is similar to the default initramfs
5685 +@@ -18,6 +19,7 @@ static int __init default_rootfs(void)
5686 + {
5687 + int err;
5688 +
5689 ++ usermodehelper_enable();
5690 + err = init_mkdir("/dev", 0755);
5691 + if (err < 0)
5692 + goto out;
5693 +diff --git a/kernel/profile.c b/kernel/profile.c
5694 +index c2ebddb5e9746..eb9c7f0f5ac52 100644
5695 +--- a/kernel/profile.c
5696 ++++ b/kernel/profile.c
5697 +@@ -41,7 +41,8 @@ struct profile_hit {
5698 + #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
5699 +
5700 + static atomic_t *prof_buffer;
5701 +-static unsigned long prof_len, prof_shift;
5702 ++static unsigned long prof_len;
5703 ++static unsigned short int prof_shift;
5704 +
5705 + int prof_on __read_mostly;
5706 + EXPORT_SYMBOL_GPL(prof_on);
5707 +@@ -67,8 +68,8 @@ int profile_setup(char *str)
5708 + if (str[strlen(sleepstr)] == ',')
5709 + str += strlen(sleepstr) + 1;
5710 + if (get_option(&str, &par))
5711 +- prof_shift = par;
5712 +- pr_info("kernel sleep profiling enabled (shift: %ld)\n",
5713 ++ prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
5714 ++ pr_info("kernel sleep profiling enabled (shift: %u)\n",
5715 + prof_shift);
5716 + #else
5717 + pr_warn("kernel sleep profiling requires CONFIG_SCHEDSTATS\n");
5718 +@@ -78,21 +79,21 @@ int profile_setup(char *str)
5719 + if (str[strlen(schedstr)] == ',')
5720 + str += strlen(schedstr) + 1;
5721 + if (get_option(&str, &par))
5722 +- prof_shift = par;
5723 +- pr_info("kernel schedule profiling enabled (shift: %ld)\n",
5724 ++ prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
5725 ++ pr_info("kernel schedule profiling enabled (shift: %u)\n",
5726 + prof_shift);
5727 + } else if (!strncmp(str, kvmstr, strlen(kvmstr))) {
5728 + prof_on = KVM_PROFILING;
5729 + if (str[strlen(kvmstr)] == ',')
5730 + str += strlen(kvmstr) + 1;
5731 + if (get_option(&str, &par))
5732 +- prof_shift = par;
5733 +- pr_info("kernel KVM profiling enabled (shift: %ld)\n",
5734 ++ prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
5735 ++ pr_info("kernel KVM profiling enabled (shift: %u)\n",
5736 + prof_shift);
5737 + } else if (get_option(&str, &par)) {
5738 +- prof_shift = par;
5739 ++ prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
5740 + prof_on = CPU_PROFILING;
5741 +- pr_info("kernel profiling enabled (shift: %ld)\n",
5742 ++ pr_info("kernel profiling enabled (shift: %u)\n",
5743 + prof_shift);
5744 + }
5745 + return 1;
5746 +@@ -468,7 +469,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
5747 + unsigned long p = *ppos;
5748 + ssize_t read;
5749 + char *pnt;
5750 +- unsigned int sample_step = 1 << prof_shift;
5751 ++ unsigned long sample_step = 1UL << prof_shift;
5752 +
5753 + profile_flip_buffers();
5754 + if (p >= (prof_len+1)*sizeof(unsigned int))
5755 +diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
5756 +index 912b47aa99d82..d17b0a5ce6ac3 100644
5757 +--- a/kernel/sched/idle.c
5758 ++++ b/kernel/sched/idle.c
5759 +@@ -379,10 +379,10 @@ void play_idle_precise(u64 duration_ns, u64 latency_ns)
5760 + cpuidle_use_deepest_state(latency_ns);
5761 +
5762 + it.done = 0;
5763 +- hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
5764 ++ hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
5765 + it.timer.function = idle_inject_timer_fn;
5766 + hrtimer_start(&it.timer, ns_to_ktime(duration_ns),
5767 +- HRTIMER_MODE_REL_PINNED);
5768 ++ HRTIMER_MODE_REL_PINNED_HARD);
5769 +
5770 + while (!READ_ONCE(it.done))
5771 + do_idle();
5772 +diff --git a/kernel/sys.c b/kernel/sys.c
5773 +index ef1a78f5d71c7..6ec50924b5176 100644
5774 +--- a/kernel/sys.c
5775 ++++ b/kernel/sys.c
5776 +@@ -1959,13 +1959,6 @@ static int validate_prctl_map_addr(struct prctl_mm_map *prctl_map)
5777 +
5778 + error = -EINVAL;
5779 +
5780 +- /*
5781 +- * @brk should be after @end_data in traditional maps.
5782 +- */
5783 +- if (prctl_map->start_brk <= prctl_map->end_data ||
5784 +- prctl_map->brk <= prctl_map->end_data)
5785 +- goto out;
5786 +-
5787 + /*
5788 + * Neither we should allow to override limits if they set.
5789 + */
5790 +diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
5791 +index d713714cba67f..4bd8f94a56c63 100644
5792 +--- a/kernel/trace/trace_boot.c
5793 ++++ b/kernel/trace/trace_boot.c
5794 +@@ -235,14 +235,14 @@ trace_boot_init_events(struct trace_array *tr, struct xbc_node *node)
5795 + if (!node)
5796 + return;
5797 + /* per-event key starts with "event.GROUP.EVENT" */
5798 +- xbc_node_for_each_child(node, gnode) {
5799 ++ xbc_node_for_each_subkey(node, gnode) {
5800 + data = xbc_node_get_data(gnode);
5801 + if (!strcmp(data, "enable")) {
5802 + enable_all = true;
5803 + continue;
5804 + }
5805 + enable = false;
5806 +- xbc_node_for_each_child(gnode, enode) {
5807 ++ xbc_node_for_each_subkey(gnode, enode) {
5808 + data = xbc_node_get_data(enode);
5809 + if (!strcmp(data, "enable")) {
5810 + enable = true;
5811 +@@ -338,7 +338,7 @@ trace_boot_init_instances(struct xbc_node *node)
5812 + if (!node)
5813 + return;
5814 +
5815 +- xbc_node_for_each_child(node, inode) {
5816 ++ xbc_node_for_each_subkey(node, inode) {
5817 + p = xbc_node_get_data(inode);
5818 + if (!p || *p == '\0')
5819 + continue;
5820 +diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
5821 +index 5ddd575159fb8..ffd22e499997a 100644
5822 +--- a/lib/Kconfig.debug
5823 ++++ b/lib/Kconfig.debug
5824 +@@ -1062,7 +1062,6 @@ config HARDLOCKUP_DETECTOR
5825 + depends on HAVE_HARDLOCKUP_DETECTOR_PERF || HAVE_HARDLOCKUP_DETECTOR_ARCH
5826 + select LOCKUP_DETECTOR
5827 + select HARDLOCKUP_DETECTOR_PERF if HAVE_HARDLOCKUP_DETECTOR_PERF
5828 +- select HARDLOCKUP_DETECTOR_ARCH if HAVE_HARDLOCKUP_DETECTOR_ARCH
5829 + help
5830 + Say Y here to enable the kernel to act as a watchdog to detect
5831 + hard lockups.
5832 +@@ -2460,8 +2459,7 @@ config SLUB_KUNIT_TEST
5833 +
5834 + config RATIONAL_KUNIT_TEST
5835 + tristate "KUnit test for rational.c" if !KUNIT_ALL_TESTS
5836 +- depends on KUNIT
5837 +- select RATIONAL
5838 ++ depends on KUNIT && RATIONAL
5839 + default KUNIT_ALL_TESTS
5840 + help
5841 + This builds the rational math unit test.
5842 +diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
5843 +index 2bbd7dce0f1d3..490a4c9003395 100644
5844 +--- a/net/9p/trans_virtio.c
5845 ++++ b/net/9p/trans_virtio.c
5846 +@@ -610,7 +610,7 @@ static int p9_virtio_probe(struct virtio_device *vdev)
5847 + chan->vc_wq = kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL);
5848 + if (!chan->vc_wq) {
5849 + err = -ENOMEM;
5850 +- goto out_free_tag;
5851 ++ goto out_remove_file;
5852 + }
5853 + init_waitqueue_head(chan->vc_wq);
5854 + chan->ring_bufs_avail = 1;
5855 +@@ -628,6 +628,8 @@ static int p9_virtio_probe(struct virtio_device *vdev)
5856 +
5857 + return 0;
5858 +
5859 ++out_remove_file:
5860 ++ sysfs_remove_file(&vdev->dev.kobj, &dev_attr_mount_tag.attr);
5861 + out_free_tag:
5862 + kfree(tag);
5863 + out_free_vq:
5864 +diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
5865 +index dbb41821b1b85..cd5a2b186f0d0 100644
5866 +--- a/net/sunrpc/svc_xprt.c
5867 ++++ b/net/sunrpc/svc_xprt.c
5868 +@@ -662,7 +662,7 @@ static int svc_alloc_arg(struct svc_rqst *rqstp)
5869 + {
5870 + struct svc_serv *serv = rqstp->rq_server;
5871 + struct xdr_buf *arg = &rqstp->rq_arg;
5872 +- unsigned long pages, filled;
5873 ++ unsigned long pages, filled, ret;
5874 +
5875 + pages = (serv->sv_max_mesg + 2 * PAGE_SIZE) >> PAGE_SHIFT;
5876 + if (pages > RPCSVC_MAXPAGES) {
5877 +@@ -672,11 +672,12 @@ static int svc_alloc_arg(struct svc_rqst *rqstp)
5878 + pages = RPCSVC_MAXPAGES;
5879 + }
5880 +
5881 +- for (;;) {
5882 +- filled = alloc_pages_bulk_array(GFP_KERNEL, pages,
5883 +- rqstp->rq_pages);
5884 +- if (filled == pages)
5885 +- break;
5886 ++ for (filled = 0; filled < pages; filled = ret) {
5887 ++ ret = alloc_pages_bulk_array(GFP_KERNEL, pages,
5888 ++ rqstp->rq_pages);
5889 ++ if (ret > filled)
5890 ++ /* Made progress, don't sleep yet */
5891 ++ continue;
5892 +
5893 + set_current_state(TASK_INTERRUPTIBLE);
5894 + if (signalled() || kthread_should_stop()) {
5895 +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
5896 +index b0032c42333eb..572e564bf6cd9 100644
5897 +--- a/security/selinux/hooks.c
5898 ++++ b/security/selinux/hooks.c
5899 +@@ -2155,7 +2155,7 @@ static int selinux_ptrace_access_check(struct task_struct *child,
5900 + static int selinux_ptrace_traceme(struct task_struct *parent)
5901 + {
5902 + return avc_has_perm(&selinux_state,
5903 +- task_sid_subj(parent), task_sid_obj(current),
5904 ++ task_sid_obj(parent), task_sid_obj(current),
5905 + SECCLASS_PROCESS, PROCESS__PTRACE, NULL);
5906 + }
5907 +
5908 +@@ -6218,7 +6218,7 @@ static int selinux_msg_queue_msgrcv(struct kern_ipc_perm *msq, struct msg_msg *m
5909 + struct ipc_security_struct *isec;
5910 + struct msg_security_struct *msec;
5911 + struct common_audit_data ad;
5912 +- u32 sid = task_sid_subj(target);
5913 ++ u32 sid = task_sid_obj(target);
5914 + int rc;
5915 +
5916 + isec = selinux_ipc(msq);
5917 +diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
5918 +index 223a6da0e6dc5..b1694fa6f12b1 100644
5919 +--- a/security/smack/smack_lsm.c
5920 ++++ b/security/smack/smack_lsm.c
5921 +@@ -2016,7 +2016,7 @@ static int smk_curacc_on_task(struct task_struct *p, int access,
5922 + const char *caller)
5923 + {
5924 + struct smk_audit_info ad;
5925 +- struct smack_known *skp = smk_of_task_struct_subj(p);
5926 ++ struct smack_known *skp = smk_of_task_struct_obj(p);
5927 + int rc;
5928 +
5929 + smk_ad_init(&ad, caller, LSM_AUDIT_DATA_TASK);
5930 +@@ -3480,7 +3480,7 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
5931 + */
5932 + static int smack_getprocattr(struct task_struct *p, char *name, char **value)
5933 + {
5934 +- struct smack_known *skp = smk_of_task_struct_subj(p);
5935 ++ struct smack_known *skp = smk_of_task_struct_obj(p);
5936 + char *cp;
5937 + int slen;
5938 +
5939 +diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c
5940 +index 5e71382467e88..546f6fd0609e1 100644
5941 +--- a/sound/soc/generic/audio-graph-card.c
5942 ++++ b/sound/soc/generic/audio-graph-card.c
5943 +@@ -285,6 +285,7 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
5944 + if (li->cpu) {
5945 + struct snd_soc_card *card = simple_priv_to_card(priv);
5946 + struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0);
5947 ++ struct snd_soc_dai_link_component *platforms = asoc_link_to_platform(dai_link, 0);
5948 + int is_single_links = 0;
5949 +
5950 + /* Codec is dummy */
5951 +@@ -313,6 +314,7 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
5952 + dai_link->no_pcm = 1;
5953 +
5954 + asoc_simple_canonicalize_cpu(cpus, is_single_links);
5955 ++ asoc_simple_canonicalize_platform(platforms, cpus);
5956 + } else {
5957 + struct snd_soc_codec_conf *cconf = simple_props_to_codec_conf(dai_props, 0);
5958 + struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0);
5959 +@@ -366,6 +368,7 @@ static int graph_dai_link_of(struct asoc_simple_priv *priv,
5960 + struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
5961 + struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0);
5962 + struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0);
5963 ++ struct snd_soc_dai_link_component *platforms = asoc_link_to_platform(dai_link, 0);
5964 + char dai_name[64];
5965 + int ret, is_single_links = 0;
5966 +
5967 +@@ -383,6 +386,7 @@ static int graph_dai_link_of(struct asoc_simple_priv *priv,
5968 + "%s-%s", cpus->dai_name, codecs->dai_name);
5969 +
5970 + asoc_simple_canonicalize_cpu(cpus, is_single_links);
5971 ++ asoc_simple_canonicalize_platform(platforms, cpus);
5972 +
5973 + ret = graph_link_init(priv, cpu_ep, codec_ep, li, dai_name);
5974 + if (ret < 0)
5975 +@@ -608,6 +612,7 @@ static int graph_count_noml(struct asoc_simple_priv *priv,
5976 +
5977 + li->num[li->link].cpus = 1;
5978 + li->num[li->link].codecs = 1;
5979 ++ li->num[li->link].platforms = 1;
5980 +
5981 + li->link += 1; /* 1xCPU-Codec */
5982 +
5983 +@@ -630,6 +635,7 @@ static int graph_count_dpcm(struct asoc_simple_priv *priv,
5984 +
5985 + if (li->cpu) {
5986 + li->num[li->link].cpus = 1;
5987 ++ li->num[li->link].platforms = 1;
5988 +
5989 + li->link++; /* 1xCPU-dummy */
5990 + } else {
5991 +diff --git a/tools/bootconfig/scripts/ftrace2bconf.sh b/tools/bootconfig/scripts/ftrace2bconf.sh
5992 +index a0c3bcc6da4f3..fb201d5afe2c1 100755
5993 +--- a/tools/bootconfig/scripts/ftrace2bconf.sh
5994 ++++ b/tools/bootconfig/scripts/ftrace2bconf.sh
5995 +@@ -222,8 +222,8 @@ instance_options() { # [instance-name]
5996 + emit_kv $PREFIX.cpumask = $val
5997 + fi
5998 + val=`cat $INSTANCE/tracing_on`
5999 +- if [ `echo $val | sed -e s/f//g`x != x ]; then
6000 +- emit_kv $PREFIX.tracing_on = $val
6001 ++ if [ "$val" = "0" ]; then
6002 ++ emit_kv $PREFIX.tracing_on = 0
6003 + fi
6004 +
6005 + val=
6006 +diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c
6007 +index dbf5f5215abee..fa03ff0dc0831 100644
6008 +--- a/tools/perf/tests/bpf.c
6009 ++++ b/tools/perf/tests/bpf.c
6010 +@@ -192,7 +192,7 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
6011 + }
6012 +
6013 + if (count != expect * evlist->core.nr_entries) {
6014 +- pr_debug("BPF filter result incorrect, expected %d, got %d samples\n", expect, count);
6015 ++ pr_debug("BPF filter result incorrect, expected %d, got %d samples\n", expect * evlist->core.nr_entries, count);
6016 + goto out_delete_evlist;
6017 + }
6018 +
6019 +diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
6020 +index ee15db2be2f43..9ed9a5676d352 100644
6021 +--- a/tools/perf/util/dso.c
6022 ++++ b/tools/perf/util/dso.c
6023 +@@ -1349,6 +1349,16 @@ void dso__set_build_id(struct dso *dso, struct build_id *bid)
6024 +
6025 + bool dso__build_id_equal(const struct dso *dso, struct build_id *bid)
6026 + {
6027 ++ if (dso->bid.size > bid->size && dso->bid.size == BUILD_ID_SIZE) {
6028 ++ /*
6029 ++ * For the backward compatibility, it allows a build-id has
6030 ++ * trailing zeros.
6031 ++ */
6032 ++ return !memcmp(dso->bid.data, bid->data, bid->size) &&
6033 ++ !memchr_inv(&dso->bid.data[bid->size], 0,
6034 ++ dso->bid.size - bid->size);
6035 ++ }
6036 ++
6037 + return dso->bid.size == bid->size &&
6038 + memcmp(dso->bid.data, bid->data, dso->bid.size) == 0;
6039 + }
6040 +diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
6041 +index 77fc46ca07c07..0fc9a54107399 100644
6042 +--- a/tools/perf/util/symbol.c
6043 ++++ b/tools/perf/util/symbol.c
6044 +@@ -1581,10 +1581,6 @@ int dso__load_bfd_symbols(struct dso *dso, const char *debugfile)
6045 + if (bfd_get_flavour(abfd) == bfd_target_elf_flavour)
6046 + goto out_close;
6047 +
6048 +- section = bfd_get_section_by_name(abfd, ".text");
6049 +- if (section)
6050 +- dso->text_offset = section->vma - section->filepos;
6051 +-
6052 + symbols_size = bfd_get_symtab_upper_bound(abfd);
6053 + if (symbols_size == 0) {
6054 + bfd_close(abfd);
6055 +@@ -1602,6 +1598,22 @@ int dso__load_bfd_symbols(struct dso *dso, const char *debugfile)
6056 + if (symbols_count < 0)
6057 + goto out_free;
6058 +
6059 ++ section = bfd_get_section_by_name(abfd, ".text");
6060 ++ if (section) {
6061 ++ for (i = 0; i < symbols_count; ++i) {
6062 ++ if (!strcmp(bfd_asymbol_name(symbols[i]), "__ImageBase") ||
6063 ++ !strcmp(bfd_asymbol_name(symbols[i]), "__image_base__"))
6064 ++ break;
6065 ++ }
6066 ++ if (i < symbols_count) {
6067 ++ /* PE symbols can only have 4 bytes, so use .text high bits */
6068 ++ dso->text_offset = section->vma - (u32)section->vma;
6069 ++ dso->text_offset += (u32)bfd_asymbol_value(symbols[i]);
6070 ++ } else {
6071 ++ dso->text_offset = section->vma - section->filepos;
6072 ++ }
6073 ++ }
6074 ++
6075 + qsort(symbols, symbols_count, sizeof(asymbol *), bfd_symbols__cmpvalue);
6076 +
6077 + #ifdef bfd_get_section