1 |
commit: f5bcc74302a1750aa15305f7b61cd012d8162138 |
2 |
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
3 |
AuthorDate: Sat Sep 15 10:12:46 2018 +0000 |
4 |
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
5 |
CommitDate: Wed Nov 14 11:36:24 2018 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=f5bcc743 |
7 |
|
8 |
Linux patch 4.18.8 |
9 |
|
10 |
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org> |
11 |
|
12 |
0000_README | 4 + |
13 |
1007_linux-4.18.8.patch | 6654 +++++++++++++++++++++++++++++++++++++++++++++++ |
14 |
2 files changed, 6658 insertions(+) |
15 |
|
16 |
diff --git a/0000_README b/0000_README |
17 |
index f3682ca..597262e 100644 |
18 |
--- a/0000_README |
19 |
+++ b/0000_README |
20 |
@@ -71,6 +71,10 @@ Patch: 1006_linux-4.18.7.patch |
21 |
From: http://www.kernel.org |
22 |
Desc: Linux 4.18.7 |
23 |
|
24 |
+Patch: 1007_linux-4.18.8.patch |
25 |
+From: http://www.kernel.org |
26 |
+Desc: Linux 4.18.8 |
27 |
+ |
28 |
Patch: 1500_XATTR_USER_PREFIX.patch |
29 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
30 |
Desc: Support for namespace user.pax.* on tmpfs. |
31 |
|
32 |
diff --git a/1007_linux-4.18.8.patch b/1007_linux-4.18.8.patch |
33 |
new file mode 100644 |
34 |
index 0000000..8a888c7 |
35 |
--- /dev/null |
36 |
+++ b/1007_linux-4.18.8.patch |
37 |
@@ -0,0 +1,6654 @@ |
38 |
+diff --git a/Makefile b/Makefile |
39 |
+index 711b04d00e49..0d73431f66cd 100644 |
40 |
+--- a/Makefile |
41 |
++++ b/Makefile |
42 |
+@@ -1,7 +1,7 @@ |
43 |
+ # SPDX-License-Identifier: GPL-2.0 |
44 |
+ VERSION = 4 |
45 |
+ PATCHLEVEL = 18 |
46 |
+-SUBLEVEL = 7 |
47 |
++SUBLEVEL = 8 |
48 |
+ EXTRAVERSION = |
49 |
+ NAME = Merciless Moray |
50 |
+ |
51 |
+diff --git a/arch/arm/mach-rockchip/Kconfig b/arch/arm/mach-rockchip/Kconfig |
52 |
+index fafd3d7f9f8c..8ca926522026 100644 |
53 |
+--- a/arch/arm/mach-rockchip/Kconfig |
54 |
++++ b/arch/arm/mach-rockchip/Kconfig |
55 |
+@@ -17,6 +17,7 @@ config ARCH_ROCKCHIP |
56 |
+ select ARM_GLOBAL_TIMER |
57 |
+ select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK |
58 |
+ select ZONE_DMA if ARM_LPAE |
59 |
++ select PM |
60 |
+ help |
61 |
+ Support for Rockchip's Cortex-A9 Single-to-Quad-Core-SoCs |
62 |
+ containing the RK2928, RK30xx and RK31xx series. |
63 |
+diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms |
64 |
+index d5aeac351fc3..21a715ad8222 100644 |
65 |
+--- a/arch/arm64/Kconfig.platforms |
66 |
++++ b/arch/arm64/Kconfig.platforms |
67 |
+@@ -151,6 +151,7 @@ config ARCH_ROCKCHIP |
68 |
+ select GPIOLIB |
69 |
+ select PINCTRL |
70 |
+ select PINCTRL_ROCKCHIP |
71 |
++ select PM |
72 |
+ select ROCKCHIP_TIMER |
73 |
+ help |
74 |
+ This enables support for the ARMv8 based Rockchip chipsets, |
75 |
+diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h |
76 |
+index 16b077801a5f..a4a718dbfec6 100644 |
77 |
+--- a/arch/powerpc/include/asm/topology.h |
78 |
++++ b/arch/powerpc/include/asm/topology.h |
79 |
+@@ -92,6 +92,7 @@ extern int stop_topology_update(void); |
80 |
+ extern int prrn_is_enabled(void); |
81 |
+ extern int find_and_online_cpu_nid(int cpu); |
82 |
+ extern int timed_topology_update(int nsecs); |
83 |
++extern void __init shared_proc_topology_init(void); |
84 |
+ #else |
85 |
+ static inline int start_topology_update(void) |
86 |
+ { |
87 |
+@@ -113,6 +114,10 @@ static inline int timed_topology_update(int nsecs) |
88 |
+ { |
89 |
+ return 0; |
90 |
+ } |
91 |
++ |
92 |
++#ifdef CONFIG_SMP |
93 |
++static inline void shared_proc_topology_init(void) {} |
94 |
++#endif |
95 |
+ #endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */ |
96 |
+ |
97 |
+ #include <asm-generic/topology.h> |
98 |
+diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h |
99 |
+index 468653ce844c..327f6112fe8e 100644 |
100 |
+--- a/arch/powerpc/include/asm/uaccess.h |
101 |
++++ b/arch/powerpc/include/asm/uaccess.h |
102 |
+@@ -250,10 +250,17 @@ do { \ |
103 |
+ } \ |
104 |
+ } while (0) |
105 |
+ |
106 |
++/* |
107 |
++ * This is a type: either unsigned long, if the argument fits into |
108 |
++ * that type, or otherwise unsigned long long. |
109 |
++ */ |
110 |
++#define __long_type(x) \ |
111 |
++ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) |
112 |
++ |
113 |
+ #define __get_user_nocheck(x, ptr, size) \ |
114 |
+ ({ \ |
115 |
+ long __gu_err; \ |
116 |
+- unsigned long __gu_val; \ |
117 |
++ __long_type(*(ptr)) __gu_val; \ |
118 |
+ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ |
119 |
+ __chk_user_ptr(ptr); \ |
120 |
+ if (!is_kernel_addr((unsigned long)__gu_addr)) \ |
121 |
+@@ -267,7 +274,7 @@ do { \ |
122 |
+ #define __get_user_check(x, ptr, size) \ |
123 |
+ ({ \ |
124 |
+ long __gu_err = -EFAULT; \ |
125 |
+- unsigned long __gu_val = 0; \ |
126 |
++ __long_type(*(ptr)) __gu_val = 0; \ |
127 |
+ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ |
128 |
+ might_fault(); \ |
129 |
+ if (access_ok(VERIFY_READ, __gu_addr, (size))) { \ |
130 |
+@@ -281,7 +288,7 @@ do { \ |
131 |
+ #define __get_user_nosleep(x, ptr, size) \ |
132 |
+ ({ \ |
133 |
+ long __gu_err; \ |
134 |
+- unsigned long __gu_val; \ |
135 |
++ __long_type(*(ptr)) __gu_val; \ |
136 |
+ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ |
137 |
+ __chk_user_ptr(ptr); \ |
138 |
+ barrier_nospec(); \ |
139 |
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S |
140 |
+index 285c6465324a..f817342aab8f 100644 |
141 |
+--- a/arch/powerpc/kernel/exceptions-64s.S |
142 |
++++ b/arch/powerpc/kernel/exceptions-64s.S |
143 |
+@@ -1526,6 +1526,8 @@ TRAMP_REAL_BEGIN(stf_barrier_fallback) |
144 |
+ TRAMP_REAL_BEGIN(rfi_flush_fallback) |
145 |
+ SET_SCRATCH0(r13); |
146 |
+ GET_PACA(r13); |
147 |
++ std r1,PACA_EXRFI+EX_R12(r13) |
148 |
++ ld r1,PACAKSAVE(r13) |
149 |
+ std r9,PACA_EXRFI+EX_R9(r13) |
150 |
+ std r10,PACA_EXRFI+EX_R10(r13) |
151 |
+ std r11,PACA_EXRFI+EX_R11(r13) |
152 |
+@@ -1560,12 +1562,15 @@ TRAMP_REAL_BEGIN(rfi_flush_fallback) |
153 |
+ ld r9,PACA_EXRFI+EX_R9(r13) |
154 |
+ ld r10,PACA_EXRFI+EX_R10(r13) |
155 |
+ ld r11,PACA_EXRFI+EX_R11(r13) |
156 |
++ ld r1,PACA_EXRFI+EX_R12(r13) |
157 |
+ GET_SCRATCH0(r13); |
158 |
+ rfid |
159 |
+ |
160 |
+ TRAMP_REAL_BEGIN(hrfi_flush_fallback) |
161 |
+ SET_SCRATCH0(r13); |
162 |
+ GET_PACA(r13); |
163 |
++ std r1,PACA_EXRFI+EX_R12(r13) |
164 |
++ ld r1,PACAKSAVE(r13) |
165 |
+ std r9,PACA_EXRFI+EX_R9(r13) |
166 |
+ std r10,PACA_EXRFI+EX_R10(r13) |
167 |
+ std r11,PACA_EXRFI+EX_R11(r13) |
168 |
+@@ -1600,6 +1605,7 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback) |
169 |
+ ld r9,PACA_EXRFI+EX_R9(r13) |
170 |
+ ld r10,PACA_EXRFI+EX_R10(r13) |
171 |
+ ld r11,PACA_EXRFI+EX_R11(r13) |
172 |
++ ld r1,PACA_EXRFI+EX_R12(r13) |
173 |
+ GET_SCRATCH0(r13); |
174 |
+ hrfid |
175 |
+ |
176 |
+diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c |
177 |
+index 4794d6b4f4d2..b3142c7b9c31 100644 |
178 |
+--- a/arch/powerpc/kernel/smp.c |
179 |
++++ b/arch/powerpc/kernel/smp.c |
180 |
+@@ -1156,6 +1156,11 @@ void __init smp_cpus_done(unsigned int max_cpus) |
181 |
+ if (smp_ops && smp_ops->bringup_done) |
182 |
+ smp_ops->bringup_done(); |
183 |
+ |
184 |
++ /* |
185 |
++ * On a shared LPAR, associativity needs to be requested. |
186 |
++ * Hence, get numa topology before dumping cpu topology |
187 |
++ */ |
188 |
++ shared_proc_topology_init(); |
189 |
+ dump_numa_cpu_topology(); |
190 |
+ |
191 |
+ /* |
192 |
+diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c |
193 |
+index 0c7e05d89244..35ac5422903a 100644 |
194 |
+--- a/arch/powerpc/mm/numa.c |
195 |
++++ b/arch/powerpc/mm/numa.c |
196 |
+@@ -1078,7 +1078,6 @@ static int prrn_enabled; |
197 |
+ static void reset_topology_timer(void); |
198 |
+ static int topology_timer_secs = 1; |
199 |
+ static int topology_inited; |
200 |
+-static int topology_update_needed; |
201 |
+ |
202 |
+ /* |
203 |
+ * Change polling interval for associativity changes. |
204 |
+@@ -1306,11 +1305,8 @@ int numa_update_cpu_topology(bool cpus_locked) |
205 |
+ struct device *dev; |
206 |
+ int weight, new_nid, i = 0; |
207 |
+ |
208 |
+- if (!prrn_enabled && !vphn_enabled) { |
209 |
+- if (!topology_inited) |
210 |
+- topology_update_needed = 1; |
211 |
++ if (!prrn_enabled && !vphn_enabled && topology_inited) |
212 |
+ return 0; |
213 |
+- } |
214 |
+ |
215 |
+ weight = cpumask_weight(&cpu_associativity_changes_mask); |
216 |
+ if (!weight) |
217 |
+@@ -1423,7 +1419,6 @@ int numa_update_cpu_topology(bool cpus_locked) |
218 |
+ |
219 |
+ out: |
220 |
+ kfree(updates); |
221 |
+- topology_update_needed = 0; |
222 |
+ return changed; |
223 |
+ } |
224 |
+ |
225 |
+@@ -1551,6 +1546,15 @@ int prrn_is_enabled(void) |
226 |
+ return prrn_enabled; |
227 |
+ } |
228 |
+ |
229 |
++void __init shared_proc_topology_init(void) |
230 |
++{ |
231 |
++ if (lppaca_shared_proc(get_lppaca())) { |
232 |
++ bitmap_fill(cpumask_bits(&cpu_associativity_changes_mask), |
233 |
++ nr_cpumask_bits); |
234 |
++ numa_update_cpu_topology(false); |
235 |
++ } |
236 |
++} |
237 |
++ |
238 |
+ static int topology_read(struct seq_file *file, void *v) |
239 |
+ { |
240 |
+ if (vphn_enabled || prrn_enabled) |
241 |
+@@ -1608,10 +1612,6 @@ static int topology_update_init(void) |
242 |
+ return -ENOMEM; |
243 |
+ |
244 |
+ topology_inited = 1; |
245 |
+- if (topology_update_needed) |
246 |
+- bitmap_fill(cpumask_bits(&cpu_associativity_changes_mask), |
247 |
+- nr_cpumask_bits); |
248 |
+- |
249 |
+ return 0; |
250 |
+ } |
251 |
+ device_initcall(topology_update_init); |
252 |
+diff --git a/arch/powerpc/platforms/85xx/t1042rdb_diu.c b/arch/powerpc/platforms/85xx/t1042rdb_diu.c |
253 |
+index 58fa3d319f1c..dac36ba82fea 100644 |
254 |
+--- a/arch/powerpc/platforms/85xx/t1042rdb_diu.c |
255 |
++++ b/arch/powerpc/platforms/85xx/t1042rdb_diu.c |
256 |
+@@ -9,8 +9,10 @@ |
257 |
+ * option) any later version. |
258 |
+ */ |
259 |
+ |
260 |
++#include <linux/init.h> |
261 |
+ #include <linux/io.h> |
262 |
+ #include <linux/kernel.h> |
263 |
++#include <linux/module.h> |
264 |
+ #include <linux/of.h> |
265 |
+ #include <linux/of_address.h> |
266 |
+ |
267 |
+@@ -150,3 +152,5 @@ static int __init t1042rdb_diu_init(void) |
268 |
+ } |
269 |
+ |
270 |
+ early_initcall(t1042rdb_diu_init); |
271 |
++ |
272 |
++MODULE_LICENSE("GPL"); |
273 |
+diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c |
274 |
+index 2edc673be137..99d1152ae224 100644 |
275 |
+--- a/arch/powerpc/platforms/pseries/ras.c |
276 |
++++ b/arch/powerpc/platforms/pseries/ras.c |
277 |
+@@ -371,7 +371,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs) |
278 |
+ int len, error_log_length; |
279 |
+ |
280 |
+ error_log_length = 8 + rtas_error_extended_log_length(h); |
281 |
+- len = max_t(int, error_log_length, RTAS_ERROR_LOG_MAX); |
282 |
++ len = min_t(int, error_log_length, RTAS_ERROR_LOG_MAX); |
283 |
+ memset(global_mce_data_buf, 0, RTAS_ERROR_LOG_MAX); |
284 |
+ memcpy(global_mce_data_buf, h, len); |
285 |
+ errhdr = (struct rtas_error_log *)global_mce_data_buf; |
286 |
+diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c |
287 |
+index eb69a5186243..280e964e1aa8 100644 |
288 |
+--- a/arch/powerpc/sysdev/mpic_msgr.c |
289 |
++++ b/arch/powerpc/sysdev/mpic_msgr.c |
290 |
+@@ -196,7 +196,7 @@ static int mpic_msgr_probe(struct platform_device *dev) |
291 |
+ |
292 |
+ /* IO map the message register block. */ |
293 |
+ of_address_to_resource(np, 0, &rsrc); |
294 |
+- msgr_block_addr = ioremap(rsrc.start, rsrc.end - rsrc.start); |
295 |
++ msgr_block_addr = ioremap(rsrc.start, resource_size(&rsrc)); |
296 |
+ if (!msgr_block_addr) { |
297 |
+ dev_err(&dev->dev, "Failed to iomap MPIC message registers"); |
298 |
+ return -EFAULT; |
299 |
+diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile |
300 |
+index f6561b783b61..eed1c137f618 100644 |
301 |
+--- a/arch/riscv/kernel/vdso/Makefile |
302 |
++++ b/arch/riscv/kernel/vdso/Makefile |
303 |
+@@ -52,8 +52,8 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE |
304 |
+ # Add -lgcc so rv32 gets static muldi3 and lshrdi3 definitions. |
305 |
+ # Make sure only to export the intended __vdso_xxx symbol offsets. |
306 |
+ quiet_cmd_vdsold = VDSOLD $@ |
307 |
+- cmd_vdsold = $(CC) $(KCFLAGS) $(call cc-option, -no-pie) -nostdlib $(SYSCFLAGS_$(@F)) \ |
308 |
+- -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp -lgcc && \ |
309 |
++ cmd_vdsold = $(CC) $(KBUILD_CFLAGS) $(call cc-option, -no-pie) -nostdlib -nostartfiles $(SYSCFLAGS_$(@F)) \ |
310 |
++ -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp && \ |
311 |
+ $(CROSS_COMPILE)objcopy \ |
312 |
+ $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ |
313 |
+ |
314 |
+diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c |
315 |
+index 9f5ea9d87069..9b0216d571ad 100644 |
316 |
+--- a/arch/s390/kernel/crash_dump.c |
317 |
++++ b/arch/s390/kernel/crash_dump.c |
318 |
+@@ -404,11 +404,13 @@ static void *get_vmcoreinfo_old(unsigned long *size) |
319 |
+ if (copy_oldmem_kernel(nt_name, addr + sizeof(note), |
320 |
+ sizeof(nt_name) - 1)) |
321 |
+ return NULL; |
322 |
+- if (strcmp(nt_name, "VMCOREINFO") != 0) |
323 |
++ if (strcmp(nt_name, VMCOREINFO_NOTE_NAME) != 0) |
324 |
+ return NULL; |
325 |
+ vmcoreinfo = kzalloc_panic(note.n_descsz); |
326 |
+- if (copy_oldmem_kernel(vmcoreinfo, addr + 24, note.n_descsz)) |
327 |
++ if (copy_oldmem_kernel(vmcoreinfo, addr + 24, note.n_descsz)) { |
328 |
++ kfree(vmcoreinfo); |
329 |
+ return NULL; |
330 |
++ } |
331 |
+ *size = note.n_descsz; |
332 |
+ return vmcoreinfo; |
333 |
+ } |
334 |
+@@ -418,15 +420,20 @@ static void *get_vmcoreinfo_old(unsigned long *size) |
335 |
+ */ |
336 |
+ static void *nt_vmcoreinfo(void *ptr) |
337 |
+ { |
338 |
++ const char *name = VMCOREINFO_NOTE_NAME; |
339 |
+ unsigned long size; |
340 |
+ void *vmcoreinfo; |
341 |
+ |
342 |
+ vmcoreinfo = os_info_old_entry(OS_INFO_VMCOREINFO, &size); |
343 |
+- if (!vmcoreinfo) |
344 |
+- vmcoreinfo = get_vmcoreinfo_old(&size); |
345 |
++ if (vmcoreinfo) |
346 |
++ return nt_init_name(ptr, 0, vmcoreinfo, size, name); |
347 |
++ |
348 |
++ vmcoreinfo = get_vmcoreinfo_old(&size); |
349 |
+ if (!vmcoreinfo) |
350 |
+ return ptr; |
351 |
+- return nt_init_name(ptr, 0, vmcoreinfo, size, "VMCOREINFO"); |
352 |
++ ptr = nt_init_name(ptr, 0, vmcoreinfo, size, name); |
353 |
++ kfree(vmcoreinfo); |
354 |
++ return ptr; |
355 |
+ } |
356 |
+ |
357 |
+ /* |
358 |
+diff --git a/arch/um/Makefile b/arch/um/Makefile |
359 |
+index e54dda8a0363..de340e41f3b2 100644 |
360 |
+--- a/arch/um/Makefile |
361 |
++++ b/arch/um/Makefile |
362 |
+@@ -122,8 +122,7 @@ archheaders: |
363 |
+ $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-generic \ |
364 |
+ kbuild-file=$(HOST_DIR)/include/uapi/asm/Kbuild \ |
365 |
+ obj=$(HOST_DIR)/include/generated/uapi/asm |
366 |
+- $(Q)$(MAKE) KBUILD_SRC= ARCH=$(HEADER_ARCH) archheaders |
367 |
+- |
368 |
++ $(Q)$(MAKE) -f $(srctree)/Makefile ARCH=$(HEADER_ARCH) archheaders |
369 |
+ |
370 |
+ archprepare: include/generated/user_constants.h |
371 |
+ |
372 |
+diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h |
373 |
+index 8c7b3e5a2d01..3a17107594c8 100644 |
374 |
+--- a/arch/x86/include/asm/mce.h |
375 |
++++ b/arch/x86/include/asm/mce.h |
376 |
+@@ -148,6 +148,7 @@ enum mce_notifier_prios { |
377 |
+ MCE_PRIO_LOWEST = 0, |
378 |
+ }; |
379 |
+ |
380 |
++struct notifier_block; |
381 |
+ extern void mce_register_decode_chain(struct notifier_block *nb); |
382 |
+ extern void mce_unregister_decode_chain(struct notifier_block *nb); |
383 |
+ |
384 |
+diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h |
385 |
+index bb035a4cbc8c..9eeb1359ec75 100644 |
386 |
+--- a/arch/x86/include/asm/pgtable-3level.h |
387 |
++++ b/arch/x86/include/asm/pgtable-3level.h |
388 |
+@@ -2,6 +2,8 @@ |
389 |
+ #ifndef _ASM_X86_PGTABLE_3LEVEL_H |
390 |
+ #define _ASM_X86_PGTABLE_3LEVEL_H |
391 |
+ |
392 |
++#include <asm/atomic64_32.h> |
393 |
++ |
394 |
+ /* |
395 |
+ * Intel Physical Address Extension (PAE) Mode - three-level page |
396 |
+ * tables on PPro+ CPUs. |
397 |
+@@ -147,10 +149,7 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep) |
398 |
+ { |
399 |
+ pte_t res; |
400 |
+ |
401 |
+- /* xchg acts as a barrier before the setting of the high bits */ |
402 |
+- res.pte_low = xchg(&ptep->pte_low, 0); |
403 |
+- res.pte_high = ptep->pte_high; |
404 |
+- ptep->pte_high = 0; |
405 |
++ res.pte = (pteval_t)arch_atomic64_xchg((atomic64_t *)ptep, 0); |
406 |
+ |
407 |
+ return res; |
408 |
+ } |
409 |
+diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c |
410 |
+index 74392d9d51e0..a10481656d82 100644 |
411 |
+--- a/arch/x86/kernel/tsc.c |
412 |
++++ b/arch/x86/kernel/tsc.c |
413 |
+@@ -1343,7 +1343,7 @@ device_initcall(init_tsc_clocksource); |
414 |
+ |
415 |
+ void __init tsc_early_delay_calibrate(void) |
416 |
+ { |
417 |
+- unsigned long lpj; |
418 |
++ u64 lpj; |
419 |
+ |
420 |
+ if (!boot_cpu_has(X86_FEATURE_TSC)) |
421 |
+ return; |
422 |
+@@ -1355,7 +1355,7 @@ void __init tsc_early_delay_calibrate(void) |
423 |
+ if (!tsc_khz) |
424 |
+ return; |
425 |
+ |
426 |
+- lpj = tsc_khz * 1000; |
427 |
++ lpj = (u64)tsc_khz * 1000; |
428 |
+ do_div(lpj, HZ); |
429 |
+ loops_per_jiffy = lpj; |
430 |
+ } |
431 |
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c |
432 |
+index a44e568363a4..42f1ba92622a 100644 |
433 |
+--- a/arch/x86/kvm/mmu.c |
434 |
++++ b/arch/x86/kvm/mmu.c |
435 |
+@@ -221,6 +221,17 @@ static const u64 shadow_acc_track_saved_bits_mask = PT64_EPT_READABLE_MASK | |
436 |
+ PT64_EPT_EXECUTABLE_MASK; |
437 |
+ static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIFT; |
438 |
+ |
439 |
++/* |
440 |
++ * This mask must be set on all non-zero Non-Present or Reserved SPTEs in order |
441 |
++ * to guard against L1TF attacks. |
442 |
++ */ |
443 |
++static u64 __read_mostly shadow_nonpresent_or_rsvd_mask; |
444 |
++ |
445 |
++/* |
446 |
++ * The number of high-order 1 bits to use in the mask above. |
447 |
++ */ |
448 |
++static const u64 shadow_nonpresent_or_rsvd_mask_len = 5; |
449 |
++ |
450 |
+ static void mmu_spte_set(u64 *sptep, u64 spte); |
451 |
+ |
452 |
+ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value) |
453 |
+@@ -308,9 +319,13 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, |
454 |
+ { |
455 |
+ unsigned int gen = kvm_current_mmio_generation(vcpu); |
456 |
+ u64 mask = generation_mmio_spte_mask(gen); |
457 |
++ u64 gpa = gfn << PAGE_SHIFT; |
458 |
+ |
459 |
+ access &= ACC_WRITE_MASK | ACC_USER_MASK; |
460 |
+- mask |= shadow_mmio_value | access | gfn << PAGE_SHIFT; |
461 |
++ mask |= shadow_mmio_value | access; |
462 |
++ mask |= gpa | shadow_nonpresent_or_rsvd_mask; |
463 |
++ mask |= (gpa & shadow_nonpresent_or_rsvd_mask) |
464 |
++ << shadow_nonpresent_or_rsvd_mask_len; |
465 |
+ |
466 |
+ trace_mark_mmio_spte(sptep, gfn, access, gen); |
467 |
+ mmu_spte_set(sptep, mask); |
468 |
+@@ -323,8 +338,14 @@ static bool is_mmio_spte(u64 spte) |
469 |
+ |
470 |
+ static gfn_t get_mmio_spte_gfn(u64 spte) |
471 |
+ { |
472 |
+- u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask; |
473 |
+- return (spte & ~mask) >> PAGE_SHIFT; |
474 |
++ u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask | |
475 |
++ shadow_nonpresent_or_rsvd_mask; |
476 |
++ u64 gpa = spte & ~mask; |
477 |
++ |
478 |
++ gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len) |
479 |
++ & shadow_nonpresent_or_rsvd_mask; |
480 |
++ |
481 |
++ return gpa >> PAGE_SHIFT; |
482 |
+ } |
483 |
+ |
484 |
+ static unsigned get_mmio_spte_access(u64 spte) |
485 |
+@@ -381,7 +402,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, |
486 |
+ } |
487 |
+ EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); |
488 |
+ |
489 |
+-static void kvm_mmu_clear_all_pte_masks(void) |
490 |
++static void kvm_mmu_reset_all_pte_masks(void) |
491 |
+ { |
492 |
+ shadow_user_mask = 0; |
493 |
+ shadow_accessed_mask = 0; |
494 |
+@@ -391,6 +412,18 @@ static void kvm_mmu_clear_all_pte_masks(void) |
495 |
+ shadow_mmio_mask = 0; |
496 |
+ shadow_present_mask = 0; |
497 |
+ shadow_acc_track_mask = 0; |
498 |
++ |
499 |
++ /* |
500 |
++ * If the CPU has 46 or less physical address bits, then set an |
501 |
++ * appropriate mask to guard against L1TF attacks. Otherwise, it is |
502 |
++ * assumed that the CPU is not vulnerable to L1TF. |
503 |
++ */ |
504 |
++ if (boot_cpu_data.x86_phys_bits < |
505 |
++ 52 - shadow_nonpresent_or_rsvd_mask_len) |
506 |
++ shadow_nonpresent_or_rsvd_mask = |
507 |
++ rsvd_bits(boot_cpu_data.x86_phys_bits - |
508 |
++ shadow_nonpresent_or_rsvd_mask_len, |
509 |
++ boot_cpu_data.x86_phys_bits - 1); |
510 |
+ } |
511 |
+ |
512 |
+ static int is_cpuid_PSE36(void) |
513 |
+@@ -5500,7 +5533,7 @@ int kvm_mmu_module_init(void) |
514 |
+ { |
515 |
+ int ret = -ENOMEM; |
516 |
+ |
517 |
+- kvm_mmu_clear_all_pte_masks(); |
518 |
++ kvm_mmu_reset_all_pte_masks(); |
519 |
+ |
520 |
+ pte_list_desc_cache = kmem_cache_create("pte_list_desc", |
521 |
+ sizeof(struct pte_list_desc), |
522 |
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c |
523 |
+index bedabcf33a3e..9869bfd0c601 100644 |
524 |
+--- a/arch/x86/kvm/vmx.c |
525 |
++++ b/arch/x86/kvm/vmx.c |
526 |
+@@ -939,17 +939,21 @@ struct vcpu_vmx { |
527 |
+ /* |
528 |
+ * loaded_vmcs points to the VMCS currently used in this vcpu. For a |
529 |
+ * non-nested (L1) guest, it always points to vmcs01. For a nested |
530 |
+- * guest (L2), it points to a different VMCS. |
531 |
++ * guest (L2), it points to a different VMCS. loaded_cpu_state points |
532 |
++ * to the VMCS whose state is loaded into the CPU registers that only |
533 |
++ * need to be switched when transitioning to/from the kernel; a NULL |
534 |
++ * value indicates that host state is loaded. |
535 |
+ */ |
536 |
+ struct loaded_vmcs vmcs01; |
537 |
+ struct loaded_vmcs *loaded_vmcs; |
538 |
++ struct loaded_vmcs *loaded_cpu_state; |
539 |
+ bool __launched; /* temporary, used in vmx_vcpu_run */ |
540 |
+ struct msr_autoload { |
541 |
+ struct vmx_msrs guest; |
542 |
+ struct vmx_msrs host; |
543 |
+ } msr_autoload; |
544 |
++ |
545 |
+ struct { |
546 |
+- int loaded; |
547 |
+ u16 fs_sel, gs_sel, ldt_sel; |
548 |
+ #ifdef CONFIG_X86_64 |
549 |
+ u16 ds_sel, es_sel; |
550 |
+@@ -2750,10 +2754,11 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) |
551 |
+ #endif |
552 |
+ int i; |
553 |
+ |
554 |
+- if (vmx->host_state.loaded) |
555 |
++ if (vmx->loaded_cpu_state) |
556 |
+ return; |
557 |
+ |
558 |
+- vmx->host_state.loaded = 1; |
559 |
++ vmx->loaded_cpu_state = vmx->loaded_vmcs; |
560 |
++ |
561 |
+ /* |
562 |
+ * Set host fs and gs selectors. Unfortunately, 22.2.3 does not |
563 |
+ * allow segment selectors with cpl > 0 or ti == 1. |
564 |
+@@ -2815,11 +2820,14 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) |
565 |
+ |
566 |
+ static void __vmx_load_host_state(struct vcpu_vmx *vmx) |
567 |
+ { |
568 |
+- if (!vmx->host_state.loaded) |
569 |
++ if (!vmx->loaded_cpu_state) |
570 |
+ return; |
571 |
+ |
572 |
++ WARN_ON_ONCE(vmx->loaded_cpu_state != vmx->loaded_vmcs); |
573 |
++ |
574 |
+ ++vmx->vcpu.stat.host_state_reload; |
575 |
+- vmx->host_state.loaded = 0; |
576 |
++ vmx->loaded_cpu_state = NULL; |
577 |
++ |
578 |
+ #ifdef CONFIG_X86_64 |
579 |
+ if (is_long_mode(&vmx->vcpu)) |
580 |
+ rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); |
581 |
+@@ -8115,7 +8123,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu) |
582 |
+ |
583 |
+ /* CPL=0 must be checked manually. */ |
584 |
+ if (vmx_get_cpl(vcpu)) { |
585 |
+- kvm_queue_exception(vcpu, UD_VECTOR); |
586 |
++ kvm_inject_gp(vcpu, 0); |
587 |
+ return 1; |
588 |
+ } |
589 |
+ |
590 |
+@@ -8179,7 +8187,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu) |
591 |
+ static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) |
592 |
+ { |
593 |
+ if (vmx_get_cpl(vcpu)) { |
594 |
+- kvm_queue_exception(vcpu, UD_VECTOR); |
595 |
++ kvm_inject_gp(vcpu, 0); |
596 |
+ return 0; |
597 |
+ } |
598 |
+ |
599 |
+@@ -10517,8 +10525,8 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) |
600 |
+ return; |
601 |
+ |
602 |
+ cpu = get_cpu(); |
603 |
+- vmx->loaded_vmcs = vmcs; |
604 |
+ vmx_vcpu_put(vcpu); |
605 |
++ vmx->loaded_vmcs = vmcs; |
606 |
+ vmx_vcpu_load(vcpu, cpu); |
607 |
+ put_cpu(); |
608 |
+ } |
609 |
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c |
610 |
+index 24c84aa87049..94cd63081471 100644 |
611 |
+--- a/arch/x86/kvm/x86.c |
612 |
++++ b/arch/x86/kvm/x86.c |
613 |
+@@ -6506,20 +6506,22 @@ static void kvm_set_mmio_spte_mask(void) |
614 |
+ * Set the reserved bits and the present bit of an paging-structure |
615 |
+ * entry to generate page fault with PFER.RSV = 1. |
616 |
+ */ |
617 |
+- /* Mask the reserved physical address bits. */ |
618 |
+- mask = rsvd_bits(maxphyaddr, 51); |
619 |
++ |
620 |
++ /* |
621 |
++ * Mask the uppermost physical address bit, which would be reserved as |
622 |
++ * long as the supported physical address width is less than 52. |
623 |
++ */ |
624 |
++ mask = 1ull << 51; |
625 |
+ |
626 |
+ /* Set the present bit. */ |
627 |
+ mask |= 1ull; |
628 |
+ |
629 |
+-#ifdef CONFIG_X86_64 |
630 |
+ /* |
631 |
+ * If reserved bit is not supported, clear the present bit to disable |
632 |
+ * mmio page fault. |
633 |
+ */ |
634 |
+- if (maxphyaddr == 52) |
635 |
++ if (IS_ENABLED(CONFIG_X86_64) && maxphyaddr == 52) |
636 |
+ mask &= ~1ull; |
637 |
+-#endif |
638 |
+ |
639 |
+ kvm_mmu_set_mmio_spte_mask(mask, mask); |
640 |
+ } |
641 |
+diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c |
642 |
+index 2c30cabfda90..071d82ec9abb 100644 |
643 |
+--- a/arch/x86/xen/mmu_pv.c |
644 |
++++ b/arch/x86/xen/mmu_pv.c |
645 |
+@@ -434,14 +434,13 @@ static void xen_set_pud(pud_t *ptr, pud_t val) |
646 |
+ static void xen_set_pte_atomic(pte_t *ptep, pte_t pte) |
647 |
+ { |
648 |
+ trace_xen_mmu_set_pte_atomic(ptep, pte); |
649 |
+- set_64bit((u64 *)ptep, native_pte_val(pte)); |
650 |
++ __xen_set_pte(ptep, pte); |
651 |
+ } |
652 |
+ |
653 |
+ static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
654 |
+ { |
655 |
+ trace_xen_mmu_pte_clear(mm, addr, ptep); |
656 |
+- if (!xen_batched_set_pte(ptep, native_make_pte(0))) |
657 |
+- native_pte_clear(mm, addr, ptep); |
658 |
++ __xen_set_pte(ptep, native_make_pte(0)); |
659 |
+ } |
660 |
+ |
661 |
+ static void xen_pmd_clear(pmd_t *pmdp) |
662 |
+@@ -1571,7 +1570,7 @@ static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) |
663 |
+ pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & |
664 |
+ pte_val_ma(pte)); |
665 |
+ #endif |
666 |
+- native_set_pte(ptep, pte); |
667 |
++ __xen_set_pte(ptep, pte); |
668 |
+ } |
669 |
+ |
670 |
+ /* Early in boot, while setting up the initial pagetable, assume |
671 |
+diff --git a/block/bio.c b/block/bio.c |
672 |
+index 047c5dca6d90..ff94640bc734 100644 |
673 |
+--- a/block/bio.c |
674 |
++++ b/block/bio.c |
675 |
+@@ -156,7 +156,7 @@ out: |
676 |
+ |
677 |
+ unsigned int bvec_nr_vecs(unsigned short idx) |
678 |
+ { |
679 |
+- return bvec_slabs[idx].nr_vecs; |
680 |
++ return bvec_slabs[--idx].nr_vecs; |
681 |
+ } |
682 |
+ |
683 |
+ void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx) |
684 |
+diff --git a/block/blk-core.c b/block/blk-core.c |
685 |
+index 1646ea85dade..746a5eac4541 100644 |
686 |
+--- a/block/blk-core.c |
687 |
++++ b/block/blk-core.c |
688 |
+@@ -2159,7 +2159,9 @@ static inline bool should_fail_request(struct hd_struct *part, |
689 |
+ |
690 |
+ static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part) |
691 |
+ { |
692 |
+- if (part->policy && op_is_write(bio_op(bio))) { |
693 |
++ const int op = bio_op(bio); |
694 |
++ |
695 |
++ if (part->policy && (op_is_write(op) && !op_is_flush(op))) { |
696 |
+ char b[BDEVNAME_SIZE]; |
697 |
+ |
698 |
+ WARN_ONCE(1, |
699 |
+diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c |
700 |
+index 3de0836163c2..d5f2c21d8531 100644 |
701 |
+--- a/block/blk-mq-tag.c |
702 |
++++ b/block/blk-mq-tag.c |
703 |
+@@ -23,6 +23,9 @@ bool blk_mq_has_free_tags(struct blk_mq_tags *tags) |
704 |
+ |
705 |
+ /* |
706 |
+ * If a previously inactive queue goes active, bump the active user count. |
707 |
++ * We need to do this before try to allocate driver tag, then even if fail |
708 |
++ * to get tag when first time, the other shared-tag users could reserve |
709 |
++ * budget for it. |
710 |
+ */ |
711 |
+ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) |
712 |
+ { |
713 |
+diff --git a/block/blk-mq.c b/block/blk-mq.c |
714 |
+index 654b0dc7e001..2f9e14361673 100644 |
715 |
+--- a/block/blk-mq.c |
716 |
++++ b/block/blk-mq.c |
717 |
+@@ -285,7 +285,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, |
718 |
+ rq->tag = -1; |
719 |
+ rq->internal_tag = tag; |
720 |
+ } else { |
721 |
+- if (blk_mq_tag_busy(data->hctx)) { |
722 |
++ if (data->hctx->flags & BLK_MQ_F_TAG_SHARED) { |
723 |
+ rq_flags = RQF_MQ_INFLIGHT; |
724 |
+ atomic_inc(&data->hctx->nr_active); |
725 |
+ } |
726 |
+@@ -367,6 +367,8 @@ static struct request *blk_mq_get_request(struct request_queue *q, |
727 |
+ if (!op_is_flush(op) && e->type->ops.mq.limit_depth && |
728 |
+ !(data->flags & BLK_MQ_REQ_RESERVED)) |
729 |
+ e->type->ops.mq.limit_depth(op, data); |
730 |
++ } else { |
731 |
++ blk_mq_tag_busy(data->hctx); |
732 |
+ } |
733 |
+ |
734 |
+ tag = blk_mq_get_tag(data); |
735 |
+@@ -970,6 +972,7 @@ bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx, |
736 |
+ .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), |
737 |
+ .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT, |
738 |
+ }; |
739 |
++ bool shared; |
740 |
+ |
741 |
+ might_sleep_if(wait); |
742 |
+ |
743 |
+@@ -979,9 +982,10 @@ bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx, |
744 |
+ if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag)) |
745 |
+ data.flags |= BLK_MQ_REQ_RESERVED; |
746 |
+ |
747 |
++ shared = blk_mq_tag_busy(data.hctx); |
748 |
+ rq->tag = blk_mq_get_tag(&data); |
749 |
+ if (rq->tag >= 0) { |
750 |
+- if (blk_mq_tag_busy(data.hctx)) { |
751 |
++ if (shared) { |
752 |
+ rq->rq_flags |= RQF_MQ_INFLIGHT; |
753 |
+ atomic_inc(&data.hctx->nr_active); |
754 |
+ } |
755 |
+diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c |
756 |
+index 82b6c27b3245..f6f180f3aa1c 100644 |
757 |
+--- a/block/cfq-iosched.c |
758 |
++++ b/block/cfq-iosched.c |
759 |
+@@ -4735,12 +4735,13 @@ USEC_SHOW_FUNCTION(cfq_target_latency_us_show, cfqd->cfq_target_latency); |
760 |
+ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ |
761 |
+ { \ |
762 |
+ struct cfq_data *cfqd = e->elevator_data; \ |
763 |
+- unsigned int __data; \ |
764 |
++ unsigned int __data, __min = (MIN), __max = (MAX); \ |
765 |
++ \ |
766 |
+ cfq_var_store(&__data, (page)); \ |
767 |
+- if (__data < (MIN)) \ |
768 |
+- __data = (MIN); \ |
769 |
+- else if (__data > (MAX)) \ |
770 |
+- __data = (MAX); \ |
771 |
++ if (__data < __min) \ |
772 |
++ __data = __min; \ |
773 |
++ else if (__data > __max) \ |
774 |
++ __data = __max; \ |
775 |
+ if (__CONV) \ |
776 |
+ *(__PTR) = (u64)__data * NSEC_PER_MSEC; \ |
777 |
+ else \ |
778 |
+@@ -4769,12 +4770,13 @@ STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, |
779 |
+ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ |
780 |
+ { \ |
781 |
+ struct cfq_data *cfqd = e->elevator_data; \ |
782 |
+- unsigned int __data; \ |
783 |
++ unsigned int __data, __min = (MIN), __max = (MAX); \ |
784 |
++ \ |
785 |
+ cfq_var_store(&__data, (page)); \ |
786 |
+- if (__data < (MIN)) \ |
787 |
+- __data = (MIN); \ |
788 |
+- else if (__data > (MAX)) \ |
789 |
+- __data = (MAX); \ |
790 |
++ if (__data < __min) \ |
791 |
++ __data = __min; \ |
792 |
++ else if (__data > __max) \ |
793 |
++ __data = __max; \ |
794 |
+ *(__PTR) = (u64)__data * NSEC_PER_USEC; \ |
795 |
+ return count; \ |
796 |
+ } |
797 |
+diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c |
798 |
+index 3de794bcf8fa..69603ba52a3a 100644 |
799 |
+--- a/drivers/acpi/acpica/hwregs.c |
800 |
++++ b/drivers/acpi/acpica/hwregs.c |
801 |
+@@ -528,13 +528,18 @@ acpi_status acpi_hw_register_read(u32 register_id, u32 *return_value) |
802 |
+ |
803 |
+ status = |
804 |
+ acpi_hw_read(&value64, &acpi_gbl_FADT.xpm2_control_block); |
805 |
+- value = (u32)value64; |
806 |
++ if (ACPI_SUCCESS(status)) { |
807 |
++ value = (u32)value64; |
808 |
++ } |
809 |
+ break; |
810 |
+ |
811 |
+ case ACPI_REGISTER_PM_TIMER: /* 32-bit access */ |
812 |
+ |
813 |
+ status = acpi_hw_read(&value64, &acpi_gbl_FADT.xpm_timer_block); |
814 |
+- value = (u32)value64; |
815 |
++ if (ACPI_SUCCESS(status)) { |
816 |
++ value = (u32)value64; |
817 |
++ } |
818 |
++ |
819 |
+ break; |
820 |
+ |
821 |
+ case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */ |
822 |
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c |
823 |
+index 970dd87d347c..6799d00dd790 100644 |
824 |
+--- a/drivers/acpi/scan.c |
825 |
++++ b/drivers/acpi/scan.c |
826 |
+@@ -1612,7 +1612,8 @@ static int acpi_add_single_object(struct acpi_device **child, |
827 |
+ * Note this must be done before the get power-/wakeup_dev-flags calls. |
828 |
+ */ |
829 |
+ if (type == ACPI_BUS_TYPE_DEVICE) |
830 |
+- acpi_bus_get_status(device); |
831 |
++ if (acpi_bus_get_status(device) < 0) |
832 |
++ acpi_set_device_status(device, 0); |
833 |
+ |
834 |
+ acpi_bus_get_power_flags(device); |
835 |
+ acpi_bus_get_wakeup_device_flags(device); |
836 |
+@@ -1690,7 +1691,7 @@ static int acpi_bus_type_and_status(acpi_handle handle, int *type, |
837 |
+ * acpi_add_single_object updates this once we've an acpi_device |
838 |
+ * so that acpi_bus_get_status' quirk handling can be used. |
839 |
+ */ |
840 |
+- *sta = 0; |
841 |
++ *sta = ACPI_STA_DEFAULT; |
842 |
+ break; |
843 |
+ case ACPI_TYPE_PROCESSOR: |
844 |
+ *type = ACPI_BUS_TYPE_PROCESSOR; |
845 |
+diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c |
846 |
+index 2a8634a52856..5a628148f3f0 100644 |
847 |
+--- a/drivers/clk/rockchip/clk-rk3399.c |
848 |
++++ b/drivers/clk/rockchip/clk-rk3399.c |
849 |
+@@ -1523,6 +1523,7 @@ static const char *const rk3399_pmucru_critical_clocks[] __initconst = { |
850 |
+ "pclk_pmu_src", |
851 |
+ "fclk_cm0s_src_pmu", |
852 |
+ "clk_timer_src_pmu", |
853 |
++ "pclk_rkpwm_pmu", |
854 |
+ }; |
855 |
+ |
856 |
+ static void __init rk3399_clk_init(struct device_node *np) |
857 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h |
858 |
+index 7dcbac8af9a7..b60aa7d43cb7 100644 |
859 |
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h |
860 |
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h |
861 |
+@@ -1579,9 +1579,9 @@ struct amdgpu_device { |
862 |
+ DECLARE_HASHTABLE(mn_hash, 7); |
863 |
+ |
864 |
+ /* tracking pinned memory */ |
865 |
+- u64 vram_pin_size; |
866 |
+- u64 invisible_pin_size; |
867 |
+- u64 gart_pin_size; |
868 |
++ atomic64_t vram_pin_size; |
869 |
++ atomic64_t visible_pin_size; |
870 |
++ atomic64_t gart_pin_size; |
871 |
+ |
872 |
+ /* amdkfd interface */ |
873 |
+ struct kfd_dev *kfd; |
874 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c |
875 |
+index 9c85a90be293..5a196ec49be8 100644 |
876 |
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c |
877 |
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c |
878 |
+@@ -257,7 +257,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, |
879 |
+ return; |
880 |
+ } |
881 |
+ |
882 |
+- total_vram = adev->gmc.real_vram_size - adev->vram_pin_size; |
883 |
++ total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size); |
884 |
+ used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); |
885 |
+ free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram; |
886 |
+ |
887 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |
888 |
+index 91517b166a3b..063f9aa96946 100644 |
889 |
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |
890 |
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |
891 |
+@@ -494,13 +494,13 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file |
892 |
+ case AMDGPU_INFO_VRAM_GTT: { |
893 |
+ struct drm_amdgpu_info_vram_gtt vram_gtt; |
894 |
+ |
895 |
+- vram_gtt.vram_size = adev->gmc.real_vram_size; |
896 |
+- vram_gtt.vram_size -= adev->vram_pin_size; |
897 |
+- vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size; |
898 |
+- vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size); |
899 |
++ vram_gtt.vram_size = adev->gmc.real_vram_size - |
900 |
++ atomic64_read(&adev->vram_pin_size); |
901 |
++ vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size - |
902 |
++ atomic64_read(&adev->visible_pin_size); |
903 |
+ vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size; |
904 |
+ vram_gtt.gtt_size *= PAGE_SIZE; |
905 |
+- vram_gtt.gtt_size -= adev->gart_pin_size; |
906 |
++ vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size); |
907 |
+ return copy_to_user(out, &vram_gtt, |
908 |
+ min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; |
909 |
+ } |
910 |
+@@ -509,17 +509,16 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file |
911 |
+ |
912 |
+ memset(&mem, 0, sizeof(mem)); |
913 |
+ mem.vram.total_heap_size = adev->gmc.real_vram_size; |
914 |
+- mem.vram.usable_heap_size = |
915 |
+- adev->gmc.real_vram_size - adev->vram_pin_size; |
916 |
++ mem.vram.usable_heap_size = adev->gmc.real_vram_size - |
917 |
++ atomic64_read(&adev->vram_pin_size); |
918 |
+ mem.vram.heap_usage = |
919 |
+ amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); |
920 |
+ mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4; |
921 |
+ |
922 |
+ mem.cpu_accessible_vram.total_heap_size = |
923 |
+ adev->gmc.visible_vram_size; |
924 |
+- mem.cpu_accessible_vram.usable_heap_size = |
925 |
+- adev->gmc.visible_vram_size - |
926 |
+- (adev->vram_pin_size - adev->invisible_pin_size); |
927 |
++ mem.cpu_accessible_vram.usable_heap_size = adev->gmc.visible_vram_size - |
928 |
++ atomic64_read(&adev->visible_pin_size); |
929 |
+ mem.cpu_accessible_vram.heap_usage = |
930 |
+ amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); |
931 |
+ mem.cpu_accessible_vram.max_allocation = |
932 |
+@@ -527,8 +526,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file |
933 |
+ |
934 |
+ mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size; |
935 |
+ mem.gtt.total_heap_size *= PAGE_SIZE; |
936 |
+- mem.gtt.usable_heap_size = mem.gtt.total_heap_size |
937 |
+- - adev->gart_pin_size; |
938 |
++ mem.gtt.usable_heap_size = mem.gtt.total_heap_size - |
939 |
++ atomic64_read(&adev->gart_pin_size); |
940 |
+ mem.gtt.heap_usage = |
941 |
+ amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]); |
942 |
+ mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4; |
943 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c |
944 |
+index 3526efa8960e..3873c3353020 100644 |
945 |
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c |
946 |
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c |
947 |
+@@ -50,11 +50,35 @@ static bool amdgpu_need_backup(struct amdgpu_device *adev) |
948 |
+ return true; |
949 |
+ } |
950 |
+ |
951 |
++/** |
952 |
++ * amdgpu_bo_subtract_pin_size - Remove BO from pin_size accounting |
953 |
++ * |
954 |
++ * @bo: &amdgpu_bo buffer object |
955 |
++ * |
956 |
++ * This function is called when a BO stops being pinned, and updates the |
957 |
++ * &amdgpu_device pin_size values accordingly. |
958 |
++ */ |
959 |
++static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo) |
960 |
++{ |
961 |
++ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
962 |
++ |
963 |
++ if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { |
964 |
++ atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size); |
965 |
++ atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo), |
966 |
++ &adev->visible_pin_size); |
967 |
++ } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { |
968 |
++ atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size); |
969 |
++ } |
970 |
++} |
971 |
++ |
972 |
+ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) |
973 |
+ { |
974 |
+ struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); |
975 |
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); |
976 |
+ |
977 |
++ if (bo->pin_count > 0) |
978 |
++ amdgpu_bo_subtract_pin_size(bo); |
979 |
++ |
980 |
+ if (bo->kfd_bo) |
981 |
+ amdgpu_amdkfd_unreserve_system_memory_limit(bo); |
982 |
+ |
983 |
+@@ -761,10 +785,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, |
984 |
+ |
985 |
+ domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); |
986 |
+ if (domain == AMDGPU_GEM_DOMAIN_VRAM) { |
987 |
+- adev->vram_pin_size += amdgpu_bo_size(bo); |
988 |
+- adev->invisible_pin_size += amdgpu_vram_mgr_bo_invisible_size(bo); |
989 |
++ atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size); |
990 |
++ atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo), |
991 |
++ &adev->visible_pin_size); |
992 |
+ } else if (domain == AMDGPU_GEM_DOMAIN_GTT) { |
993 |
+- adev->gart_pin_size += amdgpu_bo_size(bo); |
994 |
++ atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size); |
995 |
+ } |
996 |
+ |
997 |
+ error: |
998 |
+@@ -790,12 +815,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo) |
999 |
+ if (bo->pin_count) |
1000 |
+ return 0; |
1001 |
+ |
1002 |
+- if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { |
1003 |
+- adev->vram_pin_size -= amdgpu_bo_size(bo); |
1004 |
+- adev->invisible_pin_size -= amdgpu_vram_mgr_bo_invisible_size(bo); |
1005 |
+- } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { |
1006 |
+- adev->gart_pin_size -= amdgpu_bo_size(bo); |
1007 |
+- } |
1008 |
++ amdgpu_bo_subtract_pin_size(bo); |
1009 |
+ |
1010 |
+ for (i = 0; i < bo->placement.num_placement; i++) { |
1011 |
+ bo->placements[i].lpfn = 0; |
1012 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c |
1013 |
+index a44c3d58fef4..2ec20348b983 100644 |
1014 |
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c |
1015 |
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c |
1016 |
+@@ -1157,7 +1157,7 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev, |
1017 |
+ int r, size = sizeof(vddnb); |
1018 |
+ |
1019 |
+ /* only APUs have vddnb */ |
1020 |
+- if (adev->flags & AMD_IS_APU) |
1021 |
++ if (!(adev->flags & AMD_IS_APU)) |
1022 |
+ return -EINVAL; |
1023 |
+ |
1024 |
+ /* Can't get voltage when the card is off */ |
1025 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c |
1026 |
+index 9f1a5bd39ae8..5b39d1399630 100644 |
1027 |
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c |
1028 |
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c |
1029 |
+@@ -131,6 +131,11 @@ psp_cmd_submit_buf(struct psp_context *psp, |
1030 |
+ msleep(1); |
1031 |
+ } |
1032 |
+ |
1033 |
++ if (ucode) { |
1034 |
++ ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo; |
1035 |
++ ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi; |
1036 |
++ } |
1037 |
++ |
1038 |
+ return ret; |
1039 |
+ } |
1040 |
+ |
1041 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c |
1042 |
+index 86a0715d9431..1cafe8d83a4d 100644 |
1043 |
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c |
1044 |
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c |
1045 |
+@@ -53,9 +53,8 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev, |
1046 |
+ int fd, |
1047 |
+ enum drm_sched_priority priority) |
1048 |
+ { |
1049 |
+- struct file *filp = fcheck(fd); |
1050 |
++ struct file *filp = fget(fd); |
1051 |
+ struct drm_file *file; |
1052 |
+- struct pid *pid; |
1053 |
+ struct amdgpu_fpriv *fpriv; |
1054 |
+ struct amdgpu_ctx *ctx; |
1055 |
+ uint32_t id; |
1056 |
+@@ -63,20 +62,12 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev, |
1057 |
+ if (!filp) |
1058 |
+ return -EINVAL; |
1059 |
+ |
1060 |
+- pid = get_pid(((struct drm_file *)filp->private_data)->pid); |
1061 |
++ file = filp->private_data; |
1062 |
++ fpriv = file->driver_priv; |
1063 |
++ idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id) |
1064 |
++ amdgpu_ctx_priority_override(ctx, priority); |
1065 |
+ |
1066 |
+- mutex_lock(&adev->ddev->filelist_mutex); |
1067 |
+- list_for_each_entry(file, &adev->ddev->filelist, lhead) { |
1068 |
+- if (file->pid != pid) |
1069 |
+- continue; |
1070 |
+- |
1071 |
+- fpriv = file->driver_priv; |
1072 |
+- idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id) |
1073 |
+- amdgpu_ctx_priority_override(ctx, priority); |
1074 |
+- } |
1075 |
+- mutex_unlock(&adev->ddev->filelist_mutex); |
1076 |
+- |
1077 |
+- put_pid(pid); |
1078 |
++ fput(filp); |
1079 |
+ |
1080 |
+ return 0; |
1081 |
+ } |
1082 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h |
1083 |
+index e5da4654b630..8b3cc6687769 100644 |
1084 |
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h |
1085 |
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h |
1086 |
+@@ -73,7 +73,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem); |
1087 |
+ uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man); |
1088 |
+ int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man); |
1089 |
+ |
1090 |
+-u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo); |
1091 |
++u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo); |
1092 |
+ uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man); |
1093 |
+ uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man); |
1094 |
+ |
1095 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h |
1096 |
+index 08e38579af24..bdc472b6e641 100644 |
1097 |
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h |
1098 |
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h |
1099 |
+@@ -194,6 +194,7 @@ enum AMDGPU_UCODE_ID { |
1100 |
+ AMDGPU_UCODE_ID_SMC, |
1101 |
+ AMDGPU_UCODE_ID_UVD, |
1102 |
+ AMDGPU_UCODE_ID_VCE, |
1103 |
++ AMDGPU_UCODE_ID_VCN, |
1104 |
+ AMDGPU_UCODE_ID_MAXIMUM, |
1105 |
+ }; |
1106 |
+ |
1107 |
+@@ -226,6 +227,9 @@ struct amdgpu_firmware_info { |
1108 |
+ void *kaddr; |
1109 |
+ /* ucode_size_bytes */ |
1110 |
+ uint32_t ucode_size; |
1111 |
++ /* starting tmr mc address */ |
1112 |
++ uint32_t tmr_mc_addr_lo; |
1113 |
++ uint32_t tmr_mc_addr_hi; |
1114 |
+ }; |
1115 |
+ |
1116 |
+ void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr); |
1117 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c |
1118 |
+index 1b4ad9b2a755..bee49991c1ff 100644 |
1119 |
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c |
1120 |
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c |
1121 |
+@@ -111,9 +111,10 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) |
1122 |
+ version_major, version_minor, family_id); |
1123 |
+ } |
1124 |
+ |
1125 |
+- bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8) |
1126 |
+- + AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE |
1127 |
++ bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE |
1128 |
+ + AMDGPU_VCN_SESSION_SIZE * 40; |
1129 |
++ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) |
1130 |
++ bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); |
1131 |
+ r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, |
1132 |
+ AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo, |
1133 |
+ &adev->vcn.gpu_addr, &adev->vcn.cpu_addr); |
1134 |
+@@ -187,11 +188,13 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev) |
1135 |
+ unsigned offset; |
1136 |
+ |
1137 |
+ hdr = (const struct common_firmware_header *)adev->vcn.fw->data; |
1138 |
+- offset = le32_to_cpu(hdr->ucode_array_offset_bytes); |
1139 |
+- memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset, |
1140 |
+- le32_to_cpu(hdr->ucode_size_bytes)); |
1141 |
+- size -= le32_to_cpu(hdr->ucode_size_bytes); |
1142 |
+- ptr += le32_to_cpu(hdr->ucode_size_bytes); |
1143 |
++ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { |
1144 |
++ offset = le32_to_cpu(hdr->ucode_array_offset_bytes); |
1145 |
++ memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset, |
1146 |
++ le32_to_cpu(hdr->ucode_size_bytes)); |
1147 |
++ size -= le32_to_cpu(hdr->ucode_size_bytes); |
1148 |
++ ptr += le32_to_cpu(hdr->ucode_size_bytes); |
1149 |
++ } |
1150 |
+ memset_io(ptr, 0, size); |
1151 |
+ } |
1152 |
+ |
1153 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c |
1154 |
+index b6333f92ba45..ef4784458800 100644 |
1155 |
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c |
1156 |
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c |
1157 |
+@@ -97,33 +97,29 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev, |
1158 |
+ } |
1159 |
+ |
1160 |
+ /** |
1161 |
+- * amdgpu_vram_mgr_bo_invisible_size - CPU invisible BO size |
1162 |
++ * amdgpu_vram_mgr_bo_visible_size - CPU visible BO size |
1163 |
+ * |
1164 |
+ * @bo: &amdgpu_bo buffer object (must be in VRAM) |
1165 |
+ * |
1166 |
+ * Returns: |
1167 |
+- * How much of the given &amdgpu_bo buffer object lies in CPU invisible VRAM. |
1168 |
++ * How much of the given &amdgpu_bo buffer object lies in CPU visible VRAM. |
1169 |
+ */ |
1170 |
+-u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo) |
1171 |
++u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo) |
1172 |
+ { |
1173 |
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
1174 |
+ struct ttm_mem_reg *mem = &bo->tbo.mem; |
1175 |
+ struct drm_mm_node *nodes = mem->mm_node; |
1176 |
+ unsigned pages = mem->num_pages; |
1177 |
+- u64 usage = 0; |
1178 |
++ u64 usage; |
1179 |
+ |
1180 |
+ if (adev->gmc.visible_vram_size == adev->gmc.real_vram_size) |
1181 |
+- return 0; |
1182 |
++ return amdgpu_bo_size(bo); |
1183 |
+ |
1184 |
+ if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT) |
1185 |
+- return amdgpu_bo_size(bo); |
1186 |
++ return 0; |
1187 |
+ |
1188 |
+- while (nodes && pages) { |
1189 |
+- usage += nodes->size << PAGE_SHIFT; |
1190 |
+- usage -= amdgpu_vram_mgr_vis_size(adev, nodes); |
1191 |
+- pages -= nodes->size; |
1192 |
+- ++nodes; |
1193 |
+- } |
1194 |
++ for (usage = 0; nodes && pages; pages -= nodes->size, nodes++) |
1195 |
++ usage += amdgpu_vram_mgr_vis_size(adev, nodes); |
1196 |
+ |
1197 |
+ return usage; |
1198 |
+ } |
1199 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c |
1200 |
+index a69153435ea7..8f0ac805ecd2 100644 |
1201 |
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c |
1202 |
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c |
1203 |
+@@ -3433,7 +3433,7 @@ static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev) |
1204 |
+ |
1205 |
+ /* wait for RLC_SAFE_MODE */ |
1206 |
+ for (i = 0; i < adev->usec_timeout; i++) { |
1207 |
+- if (!REG_GET_FIELD(SOC15_REG_OFFSET(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) |
1208 |
++ if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) |
1209 |
+ break; |
1210 |
+ udelay(1); |
1211 |
+ } |
1212 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c |
1213 |
+index 0ff136d02d9b..02be34e72ed9 100644 |
1214 |
+--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c |
1215 |
++++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c |
1216 |
+@@ -88,6 +88,9 @@ psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type * |
1217 |
+ case AMDGPU_UCODE_ID_VCE: |
1218 |
+ *type = GFX_FW_TYPE_VCE; |
1219 |
+ break; |
1220 |
++ case AMDGPU_UCODE_ID_VCN: |
1221 |
++ *type = GFX_FW_TYPE_VCN; |
1222 |
++ break; |
1223 |
+ case AMDGPU_UCODE_ID_MAXIMUM: |
1224 |
+ default: |
1225 |
+ return -EINVAL; |
1226 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c |
1227 |
+index bfddf97dd13e..a16eebc05d12 100644 |
1228 |
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c |
1229 |
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c |
1230 |
+@@ -1569,7 +1569,6 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = { |
1231 |
+ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = { |
1232 |
+ .type = AMDGPU_RING_TYPE_UVD, |
1233 |
+ .align_mask = 0xf, |
1234 |
+- .nop = PACKET0(mmUVD_NO_OP, 0), |
1235 |
+ .support_64bit_ptrs = false, |
1236 |
+ .get_rptr = uvd_v6_0_ring_get_rptr, |
1237 |
+ .get_wptr = uvd_v6_0_ring_get_wptr, |
1238 |
+@@ -1587,7 +1586,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = { |
1239 |
+ .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush, |
1240 |
+ .test_ring = uvd_v6_0_ring_test_ring, |
1241 |
+ .test_ib = amdgpu_uvd_ring_test_ib, |
1242 |
+- .insert_nop = amdgpu_ring_insert_nop, |
1243 |
++ .insert_nop = uvd_v6_0_ring_insert_nop, |
1244 |
+ .pad_ib = amdgpu_ring_generic_pad_ib, |
1245 |
+ .begin_use = amdgpu_uvd_ring_begin_use, |
1246 |
+ .end_use = amdgpu_uvd_ring_end_use, |
1247 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c |
1248 |
+index 29684c3ea4ef..700119168067 100644 |
1249 |
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c |
1250 |
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c |
1251 |
+@@ -90,6 +90,16 @@ static int vcn_v1_0_sw_init(void *handle) |
1252 |
+ if (r) |
1253 |
+ return r; |
1254 |
+ |
1255 |
++ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { |
1256 |
++ const struct common_firmware_header *hdr; |
1257 |
++ hdr = (const struct common_firmware_header *)adev->vcn.fw->data; |
1258 |
++ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN; |
1259 |
++ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw; |
1260 |
++ adev->firmware.fw_size += |
1261 |
++ ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); |
1262 |
++ DRM_INFO("PSP loading VCN firmware\n"); |
1263 |
++ } |
1264 |
++ |
1265 |
+ r = amdgpu_vcn_resume(adev); |
1266 |
+ if (r) |
1267 |
+ return r; |
1268 |
+@@ -241,26 +251,38 @@ static int vcn_v1_0_resume(void *handle) |
1269 |
+ static void vcn_v1_0_mc_resume(struct amdgpu_device *adev) |
1270 |
+ { |
1271 |
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); |
1272 |
+- |
1273 |
+- WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, |
1274 |
++ uint32_t offset; |
1275 |
++ |
1276 |
++ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { |
1277 |
++ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, |
1278 |
++ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo)); |
1279 |
++ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, |
1280 |
++ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi)); |
1281 |
++ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0); |
1282 |
++ offset = 0; |
1283 |
++ } else { |
1284 |
++ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, |
1285 |
+ lower_32_bits(adev->vcn.gpu_addr)); |
1286 |
+- WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, |
1287 |
++ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, |
1288 |
+ upper_32_bits(adev->vcn.gpu_addr)); |
1289 |
+- WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, |
1290 |
+- AMDGPU_UVD_FIRMWARE_OFFSET >> 3); |
1291 |
++ offset = size; |
1292 |
++ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, |
1293 |
++ AMDGPU_UVD_FIRMWARE_OFFSET >> 3); |
1294 |
++ } |
1295 |
++ |
1296 |
+ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size); |
1297 |
+ |
1298 |
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, |
1299 |
+- lower_32_bits(adev->vcn.gpu_addr + size)); |
1300 |
++ lower_32_bits(adev->vcn.gpu_addr + offset)); |
1301 |
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, |
1302 |
+- upper_32_bits(adev->vcn.gpu_addr + size)); |
1303 |
++ upper_32_bits(adev->vcn.gpu_addr + offset)); |
1304 |
+ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0); |
1305 |
+ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_HEAP_SIZE); |
1306 |
+ |
1307 |
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, |
1308 |
+- lower_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE)); |
1309 |
++ lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE)); |
1310 |
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, |
1311 |
+- upper_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE)); |
1312 |
++ upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE)); |
1313 |
+ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0); |
1314 |
+ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, |
1315 |
+ AMDGPU_VCN_STACK_SIZE + (AMDGPU_VCN_SESSION_SIZE * 40)); |
1316 |
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |
1317 |
+index 770c6b24be0b..e484d0a94bdc 100644 |
1318 |
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |
1319 |
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |
1320 |
+@@ -1334,6 +1334,7 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm) |
1321 |
+ struct backlight_properties props = { 0 }; |
1322 |
+ |
1323 |
+ props.max_brightness = AMDGPU_MAX_BL_LEVEL; |
1324 |
++ props.brightness = AMDGPU_MAX_BL_LEVEL; |
1325 |
+ props.type = BACKLIGHT_RAW; |
1326 |
+ |
1327 |
+ snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d", |
1328 |
+@@ -2123,13 +2124,8 @@ convert_color_depth_from_display_info(const struct drm_connector *connector) |
1329 |
+ static enum dc_aspect_ratio |
1330 |
+ get_aspect_ratio(const struct drm_display_mode *mode_in) |
1331 |
+ { |
1332 |
+- int32_t width = mode_in->crtc_hdisplay * 9; |
1333 |
+- int32_t height = mode_in->crtc_vdisplay * 16; |
1334 |
+- |
1335 |
+- if ((width - height) < 10 && (width - height) > -10) |
1336 |
+- return ASPECT_RATIO_16_9; |
1337 |
+- else |
1338 |
+- return ASPECT_RATIO_4_3; |
1339 |
++ /* 1-1 mapping, since both enums follow the HDMI spec. */ |
1340 |
++ return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio; |
1341 |
+ } |
1342 |
+ |
1343 |
+ static enum dc_color_space |
1344 |
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c |
1345 |
+index 52f2c01349e3..9bfb040352e9 100644 |
1346 |
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c |
1347 |
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c |
1348 |
+@@ -98,10 +98,16 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name, |
1349 |
+ */ |
1350 |
+ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc) |
1351 |
+ { |
1352 |
+- struct dm_crtc_state *crtc_state = to_dm_crtc_state(crtc->state); |
1353 |
+- struct dc_stream_state *stream_state = crtc_state->stream; |
1354 |
++ struct dm_crtc_state *crtc_state; |
1355 |
++ struct dc_stream_state *stream_state; |
1356 |
+ uint32_t crcs[3]; |
1357 |
+ |
1358 |
++ if (crtc == NULL) |
1359 |
++ return; |
1360 |
++ |
1361 |
++ crtc_state = to_dm_crtc_state(crtc->state); |
1362 |
++ stream_state = crtc_state->stream; |
1363 |
++ |
1364 |
+ /* Early return if CRC capture is not enabled. */ |
1365 |
+ if (!crtc_state->crc_enabled) |
1366 |
+ return; |
1367 |
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c |
1368 |
+index 651e1fd4622f..a558bfaa0c46 100644 |
1369 |
+--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c |
1370 |
++++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c |
1371 |
+@@ -808,6 +808,24 @@ static enum bp_result transmitter_control_v1_5( |
1372 |
+ * (=1: 8bpp, =1.25: 10bpp, =1.5:12bpp, =2: 16bpp) |
1373 |
+ * LVDS mode: usPixelClock = pixel clock |
1374 |
+ */ |
1375 |
++ if (cntl->signal == SIGNAL_TYPE_HDMI_TYPE_A) { |
1376 |
++ switch (cntl->color_depth) { |
1377 |
++ case COLOR_DEPTH_101010: |
1378 |
++ params.usSymClock = |
1379 |
++ cpu_to_le16((le16_to_cpu(params.usSymClock) * 30) / 24); |
1380 |
++ break; |
1381 |
++ case COLOR_DEPTH_121212: |
1382 |
++ params.usSymClock = |
1383 |
++ cpu_to_le16((le16_to_cpu(params.usSymClock) * 36) / 24); |
1384 |
++ break; |
1385 |
++ case COLOR_DEPTH_161616: |
1386 |
++ params.usSymClock = |
1387 |
++ cpu_to_le16((le16_to_cpu(params.usSymClock) * 48) / 24); |
1388 |
++ break; |
1389 |
++ default: |
1390 |
++ break; |
1391 |
++ } |
1392 |
++ } |
1393 |
+ |
1394 |
+ if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params)) |
1395 |
+ result = BP_RESULT_OK; |
1396 |
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c |
1397 |
+index 2fa521812d23..8a7890b03d97 100644 |
1398 |
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c |
1399 |
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c |
1400 |
+@@ -728,6 +728,17 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) |
1401 |
+ break; |
1402 |
+ case EDID_NO_RESPONSE: |
1403 |
+ DC_LOG_ERROR("No EDID read.\n"); |
1404 |
++ |
1405 |
++ /* |
1406 |
++ * Abort detection for non-DP connectors if we have |
1407 |
++ * no EDID |
1408 |
++ * |
1409 |
++ * DP needs to report as connected if HDP is high |
1410 |
++ * even if we have no EDID in order to go to |
1411 |
++ * fail-safe mode |
1412 |
++ */ |
1413 |
++ if (!dc_is_dp_signal(link->connector_signal)) |
1414 |
++ return false; |
1415 |
+ default: |
1416 |
+ break; |
1417 |
+ } |
1418 |
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c |
1419 |
+index 751f3ac9d921..754b4c2fc90a 100644 |
1420 |
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c |
1421 |
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c |
1422 |
+@@ -268,24 +268,30 @@ bool resource_construct( |
1423 |
+ |
1424 |
+ return true; |
1425 |
+ } |
1426 |
++static int find_matching_clock_source( |
1427 |
++ const struct resource_pool *pool, |
1428 |
++ struct clock_source *clock_source) |
1429 |
++{ |
1430 |
+ |
1431 |
++ int i; |
1432 |
++ |
1433 |
++ for (i = 0; i < pool->clk_src_count; i++) { |
1434 |
++ if (pool->clock_sources[i] == clock_source) |
1435 |
++ return i; |
1436 |
++ } |
1437 |
++ return -1; |
1438 |
++} |
1439 |
+ |
1440 |
+ void resource_unreference_clock_source( |
1441 |
+ struct resource_context *res_ctx, |
1442 |
+ const struct resource_pool *pool, |
1443 |
+ struct clock_source *clock_source) |
1444 |
+ { |
1445 |
+- int i; |
1446 |
+- |
1447 |
+- for (i = 0; i < pool->clk_src_count; i++) { |
1448 |
+- if (pool->clock_sources[i] != clock_source) |
1449 |
+- continue; |
1450 |
++ int i = find_matching_clock_source(pool, clock_source); |
1451 |
+ |
1452 |
++ if (i > -1) |
1453 |
+ res_ctx->clock_source_ref_count[i]--; |
1454 |
+ |
1455 |
+- break; |
1456 |
+- } |
1457 |
+- |
1458 |
+ if (pool->dp_clock_source == clock_source) |
1459 |
+ res_ctx->dp_clock_source_ref_count--; |
1460 |
+ } |
1461 |
+@@ -295,19 +301,31 @@ void resource_reference_clock_source( |
1462 |
+ const struct resource_pool *pool, |
1463 |
+ struct clock_source *clock_source) |
1464 |
+ { |
1465 |
+- int i; |
1466 |
+- for (i = 0; i < pool->clk_src_count; i++) { |
1467 |
+- if (pool->clock_sources[i] != clock_source) |
1468 |
+- continue; |
1469 |
++ int i = find_matching_clock_source(pool, clock_source); |
1470 |
+ |
1471 |
++ if (i > -1) |
1472 |
+ res_ctx->clock_source_ref_count[i]++; |
1473 |
+- break; |
1474 |
+- } |
1475 |
+ |
1476 |
+ if (pool->dp_clock_source == clock_source) |
1477 |
+ res_ctx->dp_clock_source_ref_count++; |
1478 |
+ } |
1479 |
+ |
1480 |
++int resource_get_clock_source_reference( |
1481 |
++ struct resource_context *res_ctx, |
1482 |
++ const struct resource_pool *pool, |
1483 |
++ struct clock_source *clock_source) |
1484 |
++{ |
1485 |
++ int i = find_matching_clock_source(pool, clock_source); |
1486 |
++ |
1487 |
++ if (i > -1) |
1488 |
++ return res_ctx->clock_source_ref_count[i]; |
1489 |
++ |
1490 |
++ if (pool->dp_clock_source == clock_source) |
1491 |
++ return res_ctx->dp_clock_source_ref_count; |
1492 |
++ |
1493 |
++ return -1; |
1494 |
++} |
1495 |
++ |
1496 |
+ bool resource_are_streams_timing_synchronizable( |
1497 |
+ struct dc_stream_state *stream1, |
1498 |
+ struct dc_stream_state *stream2) |
1499 |
+@@ -330,6 +348,9 @@ bool resource_are_streams_timing_synchronizable( |
1500 |
+ != stream2->timing.pix_clk_khz) |
1501 |
+ return false; |
1502 |
+ |
1503 |
++ if (stream1->clamping.c_depth != stream2->clamping.c_depth) |
1504 |
++ return false; |
1505 |
++ |
1506 |
+ if (stream1->phy_pix_clk != stream2->phy_pix_clk |
1507 |
+ && (!dc_is_dp_signal(stream1->signal) |
1508 |
+ || !dc_is_dp_signal(stream2->signal))) |
1509 |
+@@ -337,6 +358,20 @@ bool resource_are_streams_timing_synchronizable( |
1510 |
+ |
1511 |
+ return true; |
1512 |
+ } |
1513 |
++static bool is_dp_and_hdmi_sharable( |
1514 |
++ struct dc_stream_state *stream1, |
1515 |
++ struct dc_stream_state *stream2) |
1516 |
++{ |
1517 |
++ if (stream1->ctx->dc->caps.disable_dp_clk_share) |
1518 |
++ return false; |
1519 |
++ |
1520 |
++ if (stream1->clamping.c_depth != COLOR_DEPTH_888 || |
1521 |
++ stream2->clamping.c_depth != COLOR_DEPTH_888) |
1522 |
++ return false; |
1523 |
++ |
1524 |
++ return true; |
1525 |
++ |
1526 |
++} |
1527 |
+ |
1528 |
+ static bool is_sharable_clk_src( |
1529 |
+ const struct pipe_ctx *pipe_with_clk_src, |
1530 |
+@@ -348,7 +383,10 @@ static bool is_sharable_clk_src( |
1531 |
+ if (pipe_with_clk_src->stream->signal == SIGNAL_TYPE_VIRTUAL) |
1532 |
+ return false; |
1533 |
+ |
1534 |
+- if (dc_is_dp_signal(pipe_with_clk_src->stream->signal)) |
1535 |
++ if (dc_is_dp_signal(pipe_with_clk_src->stream->signal) || |
1536 |
++ (dc_is_dp_signal(pipe->stream->signal) && |
1537 |
++ !is_dp_and_hdmi_sharable(pipe_with_clk_src->stream, |
1538 |
++ pipe->stream))) |
1539 |
+ return false; |
1540 |
+ |
1541 |
+ if (dc_is_hdmi_signal(pipe_with_clk_src->stream->signal) |
1542 |
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h |
1543 |
+index 53c71296f3dd..efe155d50668 100644 |
1544 |
+--- a/drivers/gpu/drm/amd/display/dc/dc.h |
1545 |
++++ b/drivers/gpu/drm/amd/display/dc/dc.h |
1546 |
+@@ -77,6 +77,7 @@ struct dc_caps { |
1547 |
+ bool dual_link_dvi; |
1548 |
+ bool post_blend_color_processing; |
1549 |
+ bool force_dp_tps4_for_cp2520; |
1550 |
++ bool disable_dp_clk_share; |
1551 |
+ }; |
1552 |
+ |
1553 |
+ struct dc_dcc_surface_param { |
1554 |
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c |
1555 |
+index dbe3b26b6d9e..f6ec1d3dfd0c 100644 |
1556 |
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c |
1557 |
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c |
1558 |
+@@ -919,7 +919,7 @@ void dce110_link_encoder_enable_tmds_output( |
1559 |
+ enum bp_result result; |
1560 |
+ |
1561 |
+ /* Enable the PHY */ |
1562 |
+- |
1563 |
++ cntl.connector_obj_id = enc110->base.connector; |
1564 |
+ cntl.action = TRANSMITTER_CONTROL_ENABLE; |
1565 |
+ cntl.engine_id = enc->preferred_engine; |
1566 |
+ cntl.transmitter = enc110->base.transmitter; |
1567 |
+@@ -961,7 +961,7 @@ void dce110_link_encoder_enable_dp_output( |
1568 |
+ * We need to set number of lanes manually. |
1569 |
+ */ |
1570 |
+ configure_encoder(enc110, link_settings); |
1571 |
+- |
1572 |
++ cntl.connector_obj_id = enc110->base.connector; |
1573 |
+ cntl.action = TRANSMITTER_CONTROL_ENABLE; |
1574 |
+ cntl.engine_id = enc->preferred_engine; |
1575 |
+ cntl.transmitter = enc110->base.transmitter; |
1576 |
+diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c |
1577 |
+index 344dd2e69e7c..aa2f03eb46fe 100644 |
1578 |
+--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c |
1579 |
++++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c |
1580 |
+@@ -884,7 +884,7 @@ static bool construct( |
1581 |
+ dc->caps.i2c_speed_in_khz = 40; |
1582 |
+ dc->caps.max_cursor_size = 128; |
1583 |
+ dc->caps.dual_link_dvi = true; |
1584 |
+- |
1585 |
++ dc->caps.disable_dp_clk_share = true; |
1586 |
+ for (i = 0; i < pool->base.pipe_count; i++) { |
1587 |
+ pool->base.timing_generators[i] = |
1588 |
+ dce100_timing_generator_create( |
1589 |
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c |
1590 |
+index e2994d337044..111c4921987f 100644 |
1591 |
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c |
1592 |
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c |
1593 |
+@@ -143,7 +143,7 @@ static void wait_for_fbc_state_changed( |
1594 |
+ struct dce110_compressor *cp110, |
1595 |
+ bool enabled) |
1596 |
+ { |
1597 |
+- uint8_t counter = 0; |
1598 |
++ uint16_t counter = 0; |
1599 |
+ uint32_t addr = mmFBC_STATUS; |
1600 |
+ uint32_t value; |
1601 |
+ |
1602 |
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c |
1603 |
+index c29052b6da5a..7c0b1d7aa9b8 100644 |
1604 |
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c |
1605 |
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c |
1606 |
+@@ -1939,7 +1939,9 @@ static void dce110_reset_hw_ctx_wrap( |
1607 |
+ pipe_ctx_old->plane_res.mi->funcs->free_mem_input( |
1608 |
+ pipe_ctx_old->plane_res.mi, dc->current_state->stream_count); |
1609 |
+ |
1610 |
+- if (old_clk) |
1611 |
++ if (old_clk && 0 == resource_get_clock_source_reference(&context->res_ctx, |
1612 |
++ dc->res_pool, |
1613 |
++ old_clk)) |
1614 |
+ old_clk->funcs->cs_power_down(old_clk); |
1615 |
+ |
1616 |
+ dc->hwss.disable_plane(dc, pipe_ctx_old); |
1617 |
+diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c |
1618 |
+index 48a068964722..6f4992bdc9ce 100644 |
1619 |
+--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c |
1620 |
++++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c |
1621 |
+@@ -902,6 +902,7 @@ static bool dce80_construct( |
1622 |
+ } |
1623 |
+ |
1624 |
+ dc->caps.max_planes = pool->base.pipe_count; |
1625 |
++ dc->caps.disable_dp_clk_share = true; |
1626 |
+ |
1627 |
+ if (!resource_construct(num_virtual_links, dc, &pool->base, |
1628 |
+ &res_create_funcs)) |
1629 |
+@@ -1087,6 +1088,7 @@ static bool dce81_construct( |
1630 |
+ } |
1631 |
+ |
1632 |
+ dc->caps.max_planes = pool->base.pipe_count; |
1633 |
++ dc->caps.disable_dp_clk_share = true; |
1634 |
+ |
1635 |
+ if (!resource_construct(num_virtual_links, dc, &pool->base, |
1636 |
+ &res_create_funcs)) |
1637 |
+@@ -1268,6 +1270,7 @@ static bool dce83_construct( |
1638 |
+ } |
1639 |
+ |
1640 |
+ dc->caps.max_planes = pool->base.pipe_count; |
1641 |
++ dc->caps.disable_dp_clk_share = true; |
1642 |
+ |
1643 |
+ if (!resource_construct(num_virtual_links, dc, &pool->base, |
1644 |
+ &res_create_funcs)) |
1645 |
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h |
1646 |
+index 640a647f4611..abf42a7d0859 100644 |
1647 |
+--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h |
1648 |
++++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h |
1649 |
+@@ -102,6 +102,11 @@ void resource_reference_clock_source( |
1650 |
+ const struct resource_pool *pool, |
1651 |
+ struct clock_source *clock_source); |
1652 |
+ |
1653 |
++int resource_get_clock_source_reference( |
1654 |
++ struct resource_context *res_ctx, |
1655 |
++ const struct resource_pool *pool, |
1656 |
++ struct clock_source *clock_source); |
1657 |
++ |
1658 |
+ bool resource_are_streams_timing_synchronizable( |
1659 |
+ struct dc_stream_state *stream1, |
1660 |
+ struct dc_stream_state *stream2); |
1661 |
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c |
1662 |
+index c952845833d7..5e19f5977eb1 100644 |
1663 |
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c |
1664 |
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c |
1665 |
+@@ -403,6 +403,49 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris12[] = { |
1666 |
+ { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, |
1667 |
+ |
1668 |
+ { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, |
1669 |
++ { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, |
1670 |
++ |
1671 |
++ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND }, |
1672 |
++ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, |
1673 |
++ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND }, |
1674 |
++ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, |
1675 |
++ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, |
1676 |
++ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, |
1677 |
++ |
1678 |
++ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, |
1679 |
++ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, |
1680 |
++ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, |
1681 |
++ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, GPU_CONFIGREG_DIDT_IND }, |
1682 |
++ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, |
1683 |
++ |
1684 |
++ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, |
1685 |
++ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND }, |
1686 |
++ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, GPU_CONFIGREG_DIDT_IND }, |
1687 |
++ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, |
1688 |
++ |
1689 |
++ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, |
1690 |
++ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, |
1691 |
++ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, |
1692 |
++ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, |
1693 |
++ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, |
1694 |
++ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, |
1695 |
++ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, |
1696 |
++ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, |
1697 |
++ |
1698 |
++ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND }, |
1699 |
++ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, |
1700 |
++ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, GPU_CONFIGREG_DIDT_IND }, |
1701 |
++ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, GPU_CONFIGREG_DIDT_IND }, |
1702 |
++ |
1703 |
++ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND }, |
1704 |
++ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, GPU_CONFIGREG_DIDT_IND }, |
1705 |
++ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, |
1706 |
++ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, |
1707 |
++ |
1708 |
++ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, |
1709 |
++ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, |
1710 |
++ |
1711 |
++ { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, |
1712 |
+ { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND }, |
1713 |
+ |
1714 |
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND }, |
1715 |
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c |
1716 |
+index 50690c72b2ea..617557bd8c24 100644 |
1717 |
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c |
1718 |
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c |
1719 |
+@@ -244,6 +244,7 @@ static int smu8_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) |
1720 |
+ return 0; |
1721 |
+ } |
1722 |
+ |
1723 |
++/* convert form 8bit vid to real voltage in mV*4 */ |
1724 |
+ static uint32_t smu8_convert_8Bit_index_to_voltage( |
1725 |
+ struct pp_hwmgr *hwmgr, uint16_t voltage) |
1726 |
+ { |
1727 |
+@@ -1702,13 +1703,13 @@ static int smu8_read_sensor(struct pp_hwmgr *hwmgr, int idx, |
1728 |
+ case AMDGPU_PP_SENSOR_VDDNB: |
1729 |
+ tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) & |
1730 |
+ CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT; |
1731 |
+- vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp); |
1732 |
++ vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp) / 4; |
1733 |
+ *((uint32_t *)value) = vddnb; |
1734 |
+ return 0; |
1735 |
+ case AMDGPU_PP_SENSOR_VDDGFX: |
1736 |
+ tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) & |
1737 |
+ CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT; |
1738 |
+- vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp); |
1739 |
++ vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp) / 4; |
1740 |
+ *((uint32_t *)value) = vddgfx; |
1741 |
+ return 0; |
1742 |
+ case AMDGPU_PP_SENSOR_UVD_VCLK: |
1743 |
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c |
1744 |
+index c98e5de777cd..fcd2808874bf 100644 |
1745 |
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c |
1746 |
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c |
1747 |
+@@ -490,7 +490,7 @@ static int vega12_get_number_dpm_level(struct pp_hwmgr *hwmgr, |
1748 |
+ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr, |
1749 |
+ PPCLK_e clkID, uint32_t index, uint32_t *clock) |
1750 |
+ { |
1751 |
+- int result; |
1752 |
++ int result = 0; |
1753 |
+ |
1754 |
+ /* |
1755 |
+ *SMU expects the Clock ID to be in the top 16 bits. |
1756 |
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c |
1757 |
+index a5808382bdf0..c7b4481c90d7 100644 |
1758 |
+--- a/drivers/gpu/drm/drm_edid.c |
1759 |
++++ b/drivers/gpu/drm/drm_edid.c |
1760 |
+@@ -116,6 +116,9 @@ static const struct edid_quirk { |
1761 |
+ /* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */ |
1762 |
+ { "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC }, |
1763 |
+ |
1764 |
++ /* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */ |
1765 |
++ { "SDC", 0x3652, EDID_QUIRK_FORCE_6BPC }, |
1766 |
++ |
1767 |
+ /* Belinea 10 15 55 */ |
1768 |
+ { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 }, |
1769 |
+ { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 }, |
1770 |
+@@ -163,8 +166,9 @@ static const struct edid_quirk { |
1771 |
+ /* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/ |
1772 |
+ { "ETR", 13896, EDID_QUIRK_FORCE_8BPC }, |
1773 |
+ |
1774 |
+- /* HTC Vive VR Headset */ |
1775 |
++ /* HTC Vive and Vive Pro VR Headsets */ |
1776 |
+ { "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP }, |
1777 |
++ { "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP }, |
1778 |
+ |
1779 |
+ /* Oculus Rift DK1, DK2, and CV1 VR Headsets */ |
1780 |
+ { "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP }, |
1781 |
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c |
1782 |
+index 686f6552db48..3ef440b235e5 100644 |
1783 |
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c |
1784 |
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c |
1785 |
+@@ -799,6 +799,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) |
1786 |
+ |
1787 |
+ free_buffer: |
1788 |
+ etnaviv_cmdbuf_free(&gpu->buffer); |
1789 |
++ gpu->buffer.suballoc = NULL; |
1790 |
+ destroy_iommu: |
1791 |
+ etnaviv_iommu_destroy(gpu->mmu); |
1792 |
+ gpu->mmu = NULL; |
1793 |
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c |
1794 |
+index 9c449b8d8eab..015f9e93419d 100644 |
1795 |
+--- a/drivers/gpu/drm/i915/i915_drv.c |
1796 |
++++ b/drivers/gpu/drm/i915/i915_drv.c |
1797 |
+@@ -919,7 +919,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, |
1798 |
+ spin_lock_init(&dev_priv->uncore.lock); |
1799 |
+ |
1800 |
+ mutex_init(&dev_priv->sb_lock); |
1801 |
+- mutex_init(&dev_priv->modeset_restore_lock); |
1802 |
+ mutex_init(&dev_priv->av_mutex); |
1803 |
+ mutex_init(&dev_priv->wm.wm_mutex); |
1804 |
+ mutex_init(&dev_priv->pps_mutex); |
1805 |
+@@ -1560,11 +1559,6 @@ static int i915_drm_suspend(struct drm_device *dev) |
1806 |
+ pci_power_t opregion_target_state; |
1807 |
+ int error; |
1808 |
+ |
1809 |
+- /* ignore lid events during suspend */ |
1810 |
+- mutex_lock(&dev_priv->modeset_restore_lock); |
1811 |
+- dev_priv->modeset_restore = MODESET_SUSPENDED; |
1812 |
+- mutex_unlock(&dev_priv->modeset_restore_lock); |
1813 |
+- |
1814 |
+ disable_rpm_wakeref_asserts(dev_priv); |
1815 |
+ |
1816 |
+ /* We do a lot of poking in a lot of registers, make sure they work |
1817 |
+@@ -1764,10 +1758,6 @@ static int i915_drm_resume(struct drm_device *dev) |
1818 |
+ |
1819 |
+ intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); |
1820 |
+ |
1821 |
+- mutex_lock(&dev_priv->modeset_restore_lock); |
1822 |
+- dev_priv->modeset_restore = MODESET_DONE; |
1823 |
+- mutex_unlock(&dev_priv->modeset_restore_lock); |
1824 |
+- |
1825 |
+ intel_opregion_notify_adapter(dev_priv, PCI_D0); |
1826 |
+ |
1827 |
+ enable_rpm_wakeref_asserts(dev_priv); |
1828 |
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h |
1829 |
+index 71e1aa54f774..7c22fac3aa04 100644 |
1830 |
+--- a/drivers/gpu/drm/i915/i915_drv.h |
1831 |
++++ b/drivers/gpu/drm/i915/i915_drv.h |
1832 |
+@@ -1003,12 +1003,6 @@ struct i915_gem_mm { |
1833 |
+ #define I915_ENGINE_DEAD_TIMEOUT (4 * HZ) /* Seqno, head and subunits dead */ |
1834 |
+ #define I915_SEQNO_DEAD_TIMEOUT (12 * HZ) /* Seqno dead with active head */ |
1835 |
+ |
1836 |
+-enum modeset_restore { |
1837 |
+- MODESET_ON_LID_OPEN, |
1838 |
+- MODESET_DONE, |
1839 |
+- MODESET_SUSPENDED, |
1840 |
+-}; |
1841 |
+- |
1842 |
+ #define DP_AUX_A 0x40 |
1843 |
+ #define DP_AUX_B 0x10 |
1844 |
+ #define DP_AUX_C 0x20 |
1845 |
+@@ -1740,8 +1734,6 @@ struct drm_i915_private { |
1846 |
+ |
1847 |
+ unsigned long quirks; |
1848 |
+ |
1849 |
+- enum modeset_restore modeset_restore; |
1850 |
+- struct mutex modeset_restore_lock; |
1851 |
+ struct drm_atomic_state *modeset_restore_state; |
1852 |
+ struct drm_modeset_acquire_ctx reset_ctx; |
1853 |
+ |
1854 |
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h |
1855 |
+index 7720569f2024..6e048ee88e3f 100644 |
1856 |
+--- a/drivers/gpu/drm/i915/i915_reg.h |
1857 |
++++ b/drivers/gpu/drm/i915/i915_reg.h |
1858 |
+@@ -8825,6 +8825,7 @@ enum skl_power_gate { |
1859 |
+ #define TRANS_MSA_10_BPC (2<<5) |
1860 |
+ #define TRANS_MSA_12_BPC (3<<5) |
1861 |
+ #define TRANS_MSA_16_BPC (4<<5) |
1862 |
++#define TRANS_MSA_CEA_RANGE (1<<3) |
1863 |
+ |
1864 |
+ /* LCPLL Control */ |
1865 |
+ #define LCPLL_CTL _MMIO(0x130040) |
1866 |
+diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c |
1867 |
+index fed26d6e4e27..e195c287c263 100644 |
1868 |
+--- a/drivers/gpu/drm/i915/intel_ddi.c |
1869 |
++++ b/drivers/gpu/drm/i915/intel_ddi.c |
1870 |
+@@ -1659,6 +1659,10 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state) |
1871 |
+ WARN_ON(transcoder_is_dsi(cpu_transcoder)); |
1872 |
+ |
1873 |
+ temp = TRANS_MSA_SYNC_CLK; |
1874 |
++ |
1875 |
++ if (crtc_state->limited_color_range) |
1876 |
++ temp |= TRANS_MSA_CEA_RANGE; |
1877 |
++ |
1878 |
+ switch (crtc_state->pipe_bpp) { |
1879 |
+ case 18: |
1880 |
+ temp |= TRANS_MSA_6_BPC; |
1881 |
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c |
1882 |
+index 16faea30114a..8e465095fe06 100644 |
1883 |
+--- a/drivers/gpu/drm/i915/intel_dp.c |
1884 |
++++ b/drivers/gpu/drm/i915/intel_dp.c |
1885 |
+@@ -4293,18 +4293,6 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp) |
1886 |
+ return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); |
1887 |
+ } |
1888 |
+ |
1889 |
+-/* |
1890 |
+- * If display is now connected check links status, |
1891 |
+- * there has been known issues of link loss triggering |
1892 |
+- * long pulse. |
1893 |
+- * |
1894 |
+- * Some sinks (eg. ASUS PB287Q) seem to perform some |
1895 |
+- * weird HPD ping pong during modesets. So we can apparently |
1896 |
+- * end up with HPD going low during a modeset, and then |
1897 |
+- * going back up soon after. And once that happens we must |
1898 |
+- * retrain the link to get a picture. That's in case no |
1899 |
+- * userspace component reacted to intermittent HPD dip. |
1900 |
+- */ |
1901 |
+ int intel_dp_retrain_link(struct intel_encoder *encoder, |
1902 |
+ struct drm_modeset_acquire_ctx *ctx) |
1903 |
+ { |
1904 |
+@@ -4794,7 +4782,8 @@ intel_dp_unset_edid(struct intel_dp *intel_dp) |
1905 |
+ } |
1906 |
+ |
1907 |
+ static int |
1908 |
+-intel_dp_long_pulse(struct intel_connector *connector) |
1909 |
++intel_dp_long_pulse(struct intel_connector *connector, |
1910 |
++ struct drm_modeset_acquire_ctx *ctx) |
1911 |
+ { |
1912 |
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
1913 |
+ struct intel_dp *intel_dp = intel_attached_dp(&connector->base); |
1914 |
+@@ -4853,6 +4842,22 @@ intel_dp_long_pulse(struct intel_connector *connector) |
1915 |
+ */ |
1916 |
+ status = connector_status_disconnected; |
1917 |
+ goto out; |
1918 |
++ } else { |
1919 |
++ /* |
1920 |
++ * If display is now connected check links status, |
1921 |
++ * there has been known issues of link loss triggering |
1922 |
++ * long pulse. |
1923 |
++ * |
1924 |
++ * Some sinks (eg. ASUS PB287Q) seem to perform some |
1925 |
++ * weird HPD ping pong during modesets. So we can apparently |
1926 |
++ * end up with HPD going low during a modeset, and then |
1927 |
++ * going back up soon after. And once that happens we must |
1928 |
++ * retrain the link to get a picture. That's in case no |
1929 |
++ * userspace component reacted to intermittent HPD dip. |
1930 |
++ */ |
1931 |
++ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; |
1932 |
++ |
1933 |
++ intel_dp_retrain_link(encoder, ctx); |
1934 |
+ } |
1935 |
+ |
1936 |
+ /* |
1937 |
+@@ -4914,7 +4919,7 @@ intel_dp_detect(struct drm_connector *connector, |
1938 |
+ return ret; |
1939 |
+ } |
1940 |
+ |
1941 |
+- status = intel_dp_long_pulse(intel_dp->attached_connector); |
1942 |
++ status = intel_dp_long_pulse(intel_dp->attached_connector, ctx); |
1943 |
+ } |
1944 |
+ |
1945 |
+ intel_dp->detect_done = false; |
1946 |
+diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c |
1947 |
+index d8cb53ef4351..c8640959a7fc 100644 |
1948 |
+--- a/drivers/gpu/drm/i915/intel_hdmi.c |
1949 |
++++ b/drivers/gpu/drm/i915/intel_hdmi.c |
1950 |
+@@ -933,8 +933,12 @@ static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port, |
1951 |
+ |
1952 |
+ ret = i2c_transfer(adapter, &msg, 1); |
1953 |
+ if (ret == 1) |
1954 |
+- return 0; |
1955 |
+- return ret >= 0 ? -EIO : ret; |
1956 |
++ ret = 0; |
1957 |
++ else if (ret >= 0) |
1958 |
++ ret = -EIO; |
1959 |
++ |
1960 |
++ kfree(write_buf); |
1961 |
++ return ret; |
1962 |
+ } |
1963 |
+ |
1964 |
+ static |
1965 |
+diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c |
1966 |
+index b4941101f21a..cdf19553ffac 100644 |
1967 |
+--- a/drivers/gpu/drm/i915/intel_lpe_audio.c |
1968 |
++++ b/drivers/gpu/drm/i915/intel_lpe_audio.c |
1969 |
+@@ -127,9 +127,7 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv) |
1970 |
+ return platdev; |
1971 |
+ } |
1972 |
+ |
1973 |
+- pm_runtime_forbid(&platdev->dev); |
1974 |
+- pm_runtime_set_active(&platdev->dev); |
1975 |
+- pm_runtime_enable(&platdev->dev); |
1976 |
++ pm_runtime_no_callbacks(&platdev->dev); |
1977 |
+ |
1978 |
+ return platdev; |
1979 |
+ } |
1980 |
+diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c |
1981 |
+index 8ae8f42f430a..6b6758419fb3 100644 |
1982 |
+--- a/drivers/gpu/drm/i915/intel_lspcon.c |
1983 |
++++ b/drivers/gpu/drm/i915/intel_lspcon.c |
1984 |
+@@ -74,7 +74,7 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon, |
1985 |
+ DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n", |
1986 |
+ lspcon_mode_name(mode)); |
1987 |
+ |
1988 |
+- wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 100); |
1989 |
++ wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 400); |
1990 |
+ if (current_mode != mode) |
1991 |
+ DRM_ERROR("LSPCON mode hasn't settled\n"); |
1992 |
+ |
1993 |
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c |
1994 |
+index 48f618dc9abb..63d7faa99946 100644 |
1995 |
+--- a/drivers/gpu/drm/i915/intel_lvds.c |
1996 |
++++ b/drivers/gpu/drm/i915/intel_lvds.c |
1997 |
+@@ -44,8 +44,6 @@ |
1998 |
+ /* Private structure for the integrated LVDS support */ |
1999 |
+ struct intel_lvds_connector { |
2000 |
+ struct intel_connector base; |
2001 |
+- |
2002 |
+- struct notifier_block lid_notifier; |
2003 |
+ }; |
2004 |
+ |
2005 |
+ struct intel_lvds_pps { |
2006 |
+@@ -454,26 +452,9 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, |
2007 |
+ return true; |
2008 |
+ } |
2009 |
+ |
2010 |
+-/* |
2011 |
+- * Detect the LVDS connection. |
2012 |
+- * |
2013 |
+- * Since LVDS doesn't have hotlug, we use the lid as a proxy. Open means |
2014 |
+- * connected and closed means disconnected. We also send hotplug events as |
2015 |
+- * needed, using lid status notification from the input layer. |
2016 |
+- */ |
2017 |
+ static enum drm_connector_status |
2018 |
+ intel_lvds_detect(struct drm_connector *connector, bool force) |
2019 |
+ { |
2020 |
+- struct drm_i915_private *dev_priv = to_i915(connector->dev); |
2021 |
+- enum drm_connector_status status; |
2022 |
+- |
2023 |
+- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", |
2024 |
+- connector->base.id, connector->name); |
2025 |
+- |
2026 |
+- status = intel_panel_detect(dev_priv); |
2027 |
+- if (status != connector_status_unknown) |
2028 |
+- return status; |
2029 |
+- |
2030 |
+ return connector_status_connected; |
2031 |
+ } |
2032 |
+ |
2033 |
+@@ -498,117 +479,6 @@ static int intel_lvds_get_modes(struct drm_connector *connector) |
2034 |
+ return 1; |
2035 |
+ } |
2036 |
+ |
2037 |
+-static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id) |
2038 |
+-{ |
2039 |
+- DRM_INFO("Skipping forced modeset for %s\n", id->ident); |
2040 |
+- return 1; |
2041 |
+-} |
2042 |
+- |
2043 |
+-/* The GPU hangs up on these systems if modeset is performed on LID open */ |
2044 |
+-static const struct dmi_system_id intel_no_modeset_on_lid[] = { |
2045 |
+- { |
2046 |
+- .callback = intel_no_modeset_on_lid_dmi_callback, |
2047 |
+- .ident = "Toshiba Tecra A11", |
2048 |
+- .matches = { |
2049 |
+- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), |
2050 |
+- DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"), |
2051 |
+- }, |
2052 |
+- }, |
2053 |
+- |
2054 |
+- { } /* terminating entry */ |
2055 |
+-}; |
2056 |
+- |
2057 |
+-/* |
2058 |
+- * Lid events. Note the use of 'modeset': |
2059 |
+- * - we set it to MODESET_ON_LID_OPEN on lid close, |
2060 |
+- * and set it to MODESET_DONE on open |
2061 |
+- * - we use it as a "only once" bit (ie we ignore |
2062 |
+- * duplicate events where it was already properly set) |
2063 |
+- * - the suspend/resume paths will set it to |
2064 |
+- * MODESET_SUSPENDED and ignore the lid open event, |
2065 |
+- * because they restore the mode ("lid open"). |
2066 |
+- */ |
2067 |
+-static int intel_lid_notify(struct notifier_block *nb, unsigned long val, |
2068 |
+- void *unused) |
2069 |
+-{ |
2070 |
+- struct intel_lvds_connector *lvds_connector = |
2071 |
+- container_of(nb, struct intel_lvds_connector, lid_notifier); |
2072 |
+- struct drm_connector *connector = &lvds_connector->base.base; |
2073 |
+- struct drm_device *dev = connector->dev; |
2074 |
+- struct drm_i915_private *dev_priv = to_i915(dev); |
2075 |
+- |
2076 |
+- if (dev->switch_power_state != DRM_SWITCH_POWER_ON) |
2077 |
+- return NOTIFY_OK; |
2078 |
+- |
2079 |
+- mutex_lock(&dev_priv->modeset_restore_lock); |
2080 |
+- if (dev_priv->modeset_restore == MODESET_SUSPENDED) |
2081 |
+- goto exit; |
2082 |
+- /* |
2083 |
+- * check and update the status of LVDS connector after receiving |
2084 |
+- * the LID nofication event. |
2085 |
+- */ |
2086 |
+- connector->status = connector->funcs->detect(connector, false); |
2087 |
+- |
2088 |
+- /* Don't force modeset on machines where it causes a GPU lockup */ |
2089 |
+- if (dmi_check_system(intel_no_modeset_on_lid)) |
2090 |
+- goto exit; |
2091 |
+- if (!acpi_lid_open()) { |
2092 |
+- /* do modeset on next lid open event */ |
2093 |
+- dev_priv->modeset_restore = MODESET_ON_LID_OPEN; |
2094 |
+- goto exit; |
2095 |
+- } |
2096 |
+- |
2097 |
+- if (dev_priv->modeset_restore == MODESET_DONE) |
2098 |
+- goto exit; |
2099 |
+- |
2100 |
+- /* |
2101 |
+- * Some old platform's BIOS love to wreak havoc while the lid is closed. |
2102 |
+- * We try to detect this here and undo any damage. The split for PCH |
2103 |
+- * platforms is rather conservative and a bit arbitrary expect that on |
2104 |
+- * those platforms VGA disabling requires actual legacy VGA I/O access, |
2105 |
+- * and as part of the cleanup in the hw state restore we also redisable |
2106 |
+- * the vga plane. |
2107 |
+- */ |
2108 |
+- if (!HAS_PCH_SPLIT(dev_priv)) |
2109 |
+- intel_display_resume(dev); |
2110 |
+- |
2111 |
+- dev_priv->modeset_restore = MODESET_DONE; |
2112 |
+- |
2113 |
+-exit: |
2114 |
+- mutex_unlock(&dev_priv->modeset_restore_lock); |
2115 |
+- return NOTIFY_OK; |
2116 |
+-} |
2117 |
+- |
2118 |
+-static int |
2119 |
+-intel_lvds_connector_register(struct drm_connector *connector) |
2120 |
+-{ |
2121 |
+- struct intel_lvds_connector *lvds = to_lvds_connector(connector); |
2122 |
+- int ret; |
2123 |
+- |
2124 |
+- ret = intel_connector_register(connector); |
2125 |
+- if (ret) |
2126 |
+- return ret; |
2127 |
+- |
2128 |
+- lvds->lid_notifier.notifier_call = intel_lid_notify; |
2129 |
+- if (acpi_lid_notifier_register(&lvds->lid_notifier)) { |
2130 |
+- DRM_DEBUG_KMS("lid notifier registration failed\n"); |
2131 |
+- lvds->lid_notifier.notifier_call = NULL; |
2132 |
+- } |
2133 |
+- |
2134 |
+- return 0; |
2135 |
+-} |
2136 |
+- |
2137 |
+-static void |
2138 |
+-intel_lvds_connector_unregister(struct drm_connector *connector) |
2139 |
+-{ |
2140 |
+- struct intel_lvds_connector *lvds = to_lvds_connector(connector); |
2141 |
+- |
2142 |
+- if (lvds->lid_notifier.notifier_call) |
2143 |
+- acpi_lid_notifier_unregister(&lvds->lid_notifier); |
2144 |
+- |
2145 |
+- intel_connector_unregister(connector); |
2146 |
+-} |
2147 |
+- |
2148 |
+ /** |
2149 |
+ * intel_lvds_destroy - unregister and free LVDS structures |
2150 |
+ * @connector: connector to free |
2151 |
+@@ -641,8 +511,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = { |
2152 |
+ .fill_modes = drm_helper_probe_single_connector_modes, |
2153 |
+ .atomic_get_property = intel_digital_connector_atomic_get_property, |
2154 |
+ .atomic_set_property = intel_digital_connector_atomic_set_property, |
2155 |
+- .late_register = intel_lvds_connector_register, |
2156 |
+- .early_unregister = intel_lvds_connector_unregister, |
2157 |
++ .late_register = intel_connector_register, |
2158 |
++ .early_unregister = intel_connector_unregister, |
2159 |
+ .destroy = intel_lvds_destroy, |
2160 |
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, |
2161 |
+ .atomic_duplicate_state = intel_digital_connector_duplicate_state, |
2162 |
+@@ -1108,8 +978,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) |
2163 |
+ * 2) check for VBT data |
2164 |
+ * 3) check to see if LVDS is already on |
2165 |
+ * if none of the above, no panel |
2166 |
+- * 4) make sure lid is open |
2167 |
+- * if closed, act like it's not there for now |
2168 |
+ */ |
2169 |
+ |
2170 |
+ /* |
2171 |
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c |
2172 |
+index 2121345a61af..78ce3d232c4d 100644 |
2173 |
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c |
2174 |
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c |
2175 |
+@@ -486,6 +486,31 @@ static void vop_line_flag_irq_disable(struct vop *vop) |
2176 |
+ spin_unlock_irqrestore(&vop->irq_lock, flags); |
2177 |
+ } |
2178 |
+ |
2179 |
++static int vop_core_clks_enable(struct vop *vop) |
2180 |
++{ |
2181 |
++ int ret; |
2182 |
++ |
2183 |
++ ret = clk_enable(vop->hclk); |
2184 |
++ if (ret < 0) |
2185 |
++ return ret; |
2186 |
++ |
2187 |
++ ret = clk_enable(vop->aclk); |
2188 |
++ if (ret < 0) |
2189 |
++ goto err_disable_hclk; |
2190 |
++ |
2191 |
++ return 0; |
2192 |
++ |
2193 |
++err_disable_hclk: |
2194 |
++ clk_disable(vop->hclk); |
2195 |
++ return ret; |
2196 |
++} |
2197 |
++ |
2198 |
++static void vop_core_clks_disable(struct vop *vop) |
2199 |
++{ |
2200 |
++ clk_disable(vop->aclk); |
2201 |
++ clk_disable(vop->hclk); |
2202 |
++} |
2203 |
++ |
2204 |
+ static int vop_enable(struct drm_crtc *crtc) |
2205 |
+ { |
2206 |
+ struct vop *vop = to_vop(crtc); |
2207 |
+@@ -497,17 +522,13 @@ static int vop_enable(struct drm_crtc *crtc) |
2208 |
+ return ret; |
2209 |
+ } |
2210 |
+ |
2211 |
+- ret = clk_enable(vop->hclk); |
2212 |
++ ret = vop_core_clks_enable(vop); |
2213 |
+ if (WARN_ON(ret < 0)) |
2214 |
+ goto err_put_pm_runtime; |
2215 |
+ |
2216 |
+ ret = clk_enable(vop->dclk); |
2217 |
+ if (WARN_ON(ret < 0)) |
2218 |
+- goto err_disable_hclk; |
2219 |
+- |
2220 |
+- ret = clk_enable(vop->aclk); |
2221 |
+- if (WARN_ON(ret < 0)) |
2222 |
+- goto err_disable_dclk; |
2223 |
++ goto err_disable_core; |
2224 |
+ |
2225 |
+ /* |
2226 |
+ * Slave iommu shares power, irq and clock with vop. It was associated |
2227 |
+@@ -519,7 +540,7 @@ static int vop_enable(struct drm_crtc *crtc) |
2228 |
+ if (ret) { |
2229 |
+ DRM_DEV_ERROR(vop->dev, |
2230 |
+ "failed to attach dma mapping, %d\n", ret); |
2231 |
+- goto err_disable_aclk; |
2232 |
++ goto err_disable_dclk; |
2233 |
+ } |
2234 |
+ |
2235 |
+ spin_lock(&vop->reg_lock); |
2236 |
+@@ -552,18 +573,14 @@ static int vop_enable(struct drm_crtc *crtc) |
2237 |
+ |
2238 |
+ spin_unlock(&vop->reg_lock); |
2239 |
+ |
2240 |
+- enable_irq(vop->irq); |
2241 |
+- |
2242 |
+ drm_crtc_vblank_on(crtc); |
2243 |
+ |
2244 |
+ return 0; |
2245 |
+ |
2246 |
+-err_disable_aclk: |
2247 |
+- clk_disable(vop->aclk); |
2248 |
+ err_disable_dclk: |
2249 |
+ clk_disable(vop->dclk); |
2250 |
+-err_disable_hclk: |
2251 |
+- clk_disable(vop->hclk); |
2252 |
++err_disable_core: |
2253 |
++ vop_core_clks_disable(vop); |
2254 |
+ err_put_pm_runtime: |
2255 |
+ pm_runtime_put_sync(vop->dev); |
2256 |
+ return ret; |
2257 |
+@@ -599,8 +616,6 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc, |
2258 |
+ |
2259 |
+ vop_dsp_hold_valid_irq_disable(vop); |
2260 |
+ |
2261 |
+- disable_irq(vop->irq); |
2262 |
+- |
2263 |
+ vop->is_enabled = false; |
2264 |
+ |
2265 |
+ /* |
2266 |
+@@ -609,8 +624,7 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc, |
2267 |
+ rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev); |
2268 |
+ |
2269 |
+ clk_disable(vop->dclk); |
2270 |
+- clk_disable(vop->aclk); |
2271 |
+- clk_disable(vop->hclk); |
2272 |
++ vop_core_clks_disable(vop); |
2273 |
+ pm_runtime_put(vop->dev); |
2274 |
+ mutex_unlock(&vop->vop_lock); |
2275 |
+ |
2276 |
+@@ -1177,6 +1191,18 @@ static irqreturn_t vop_isr(int irq, void *data) |
2277 |
+ uint32_t active_irqs; |
2278 |
+ int ret = IRQ_NONE; |
2279 |
+ |
2280 |
++ /* |
2281 |
++ * The irq is shared with the iommu. If the runtime-pm state of the |
2282 |
++ * vop-device is disabled the irq has to be targeted at the iommu. |
2283 |
++ */ |
2284 |
++ if (!pm_runtime_get_if_in_use(vop->dev)) |
2285 |
++ return IRQ_NONE; |
2286 |
++ |
2287 |
++ if (vop_core_clks_enable(vop)) { |
2288 |
++ DRM_DEV_ERROR_RATELIMITED(vop->dev, "couldn't enable clocks\n"); |
2289 |
++ goto out; |
2290 |
++ } |
2291 |
++ |
2292 |
+ /* |
2293 |
+ * interrupt register has interrupt status, enable and clear bits, we |
2294 |
+ * must hold irq_lock to avoid a race with enable/disable_vblank(). |
2295 |
+@@ -1192,7 +1218,7 @@ static irqreturn_t vop_isr(int irq, void *data) |
2296 |
+ |
2297 |
+ /* This is expected for vop iommu irqs, since the irq is shared */ |
2298 |
+ if (!active_irqs) |
2299 |
+- return IRQ_NONE; |
2300 |
++ goto out_disable; |
2301 |
+ |
2302 |
+ if (active_irqs & DSP_HOLD_VALID_INTR) { |
2303 |
+ complete(&vop->dsp_hold_completion); |
2304 |
+@@ -1218,6 +1244,10 @@ static irqreturn_t vop_isr(int irq, void *data) |
2305 |
+ DRM_DEV_ERROR(vop->dev, "Unknown VOP IRQs: %#02x\n", |
2306 |
+ active_irqs); |
2307 |
+ |
2308 |
++out_disable: |
2309 |
++ vop_core_clks_disable(vop); |
2310 |
++out: |
2311 |
++ pm_runtime_put(vop->dev); |
2312 |
+ return ret; |
2313 |
+ } |
2314 |
+ |
2315 |
+@@ -1596,9 +1626,6 @@ static int vop_bind(struct device *dev, struct device *master, void *data) |
2316 |
+ if (ret) |
2317 |
+ goto err_disable_pm_runtime; |
2318 |
+ |
2319 |
+- /* IRQ is initially disabled; it gets enabled in power_on */ |
2320 |
+- disable_irq(vop->irq); |
2321 |
+- |
2322 |
+ return 0; |
2323 |
+ |
2324 |
+ err_disable_pm_runtime: |
2325 |
+diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c |
2326 |
+index e67f4ea28c0e..051b8be3dc0f 100644 |
2327 |
+--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c |
2328 |
++++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c |
2329 |
+@@ -363,8 +363,10 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master, |
2330 |
+ of_property_read_u32(endpoint, "reg", &endpoint_id); |
2331 |
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 1, endpoint_id, |
2332 |
+ &lvds->panel, &lvds->bridge); |
2333 |
+- if (!ret) |
2334 |
++ if (!ret) { |
2335 |
++ of_node_put(endpoint); |
2336 |
+ break; |
2337 |
++ } |
2338 |
+ } |
2339 |
+ if (!child_count) { |
2340 |
+ DRM_DEV_ERROR(dev, "lvds port does not have any children\n"); |
2341 |
+diff --git a/drivers/hid/hid-redragon.c b/drivers/hid/hid-redragon.c |
2342 |
+index daf59578bf93..73c9d4c4fa34 100644 |
2343 |
+--- a/drivers/hid/hid-redragon.c |
2344 |
++++ b/drivers/hid/hid-redragon.c |
2345 |
+@@ -44,29 +44,6 @@ static __u8 *redragon_report_fixup(struct hid_device *hdev, __u8 *rdesc, |
2346 |
+ return rdesc; |
2347 |
+ } |
2348 |
+ |
2349 |
+-static int redragon_probe(struct hid_device *dev, |
2350 |
+- const struct hid_device_id *id) |
2351 |
+-{ |
2352 |
+- int ret; |
2353 |
+- |
2354 |
+- ret = hid_parse(dev); |
2355 |
+- if (ret) { |
2356 |
+- hid_err(dev, "parse failed\n"); |
2357 |
+- return ret; |
2358 |
+- } |
2359 |
+- |
2360 |
+- /* do not register unused input device */ |
2361 |
+- if (dev->maxapplication == 1) |
2362 |
+- return 0; |
2363 |
+- |
2364 |
+- ret = hid_hw_start(dev, HID_CONNECT_DEFAULT); |
2365 |
+- if (ret) { |
2366 |
+- hid_err(dev, "hw start failed\n"); |
2367 |
+- return ret; |
2368 |
+- } |
2369 |
+- |
2370 |
+- return 0; |
2371 |
+-} |
2372 |
+ static const struct hid_device_id redragon_devices[] = { |
2373 |
+ {HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_REDRAGON_ASURA)}, |
2374 |
+ {} |
2375 |
+@@ -77,8 +54,7 @@ MODULE_DEVICE_TABLE(hid, redragon_devices); |
2376 |
+ static struct hid_driver redragon_driver = { |
2377 |
+ .name = "redragon", |
2378 |
+ .id_table = redragon_devices, |
2379 |
+- .report_fixup = redragon_report_fixup, |
2380 |
+- .probe = redragon_probe |
2381 |
++ .report_fixup = redragon_report_fixup |
2382 |
+ }; |
2383 |
+ |
2384 |
+ module_hid_driver(redragon_driver); |
2385 |
+diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c |
2386 |
+index b8f303dea305..32affd3fa8bd 100644 |
2387 |
+--- a/drivers/i2c/i2c-core-acpi.c |
2388 |
++++ b/drivers/i2c/i2c-core-acpi.c |
2389 |
+@@ -453,8 +453,12 @@ static int acpi_gsb_i2c_read_bytes(struct i2c_client *client, |
2390 |
+ else |
2391 |
+ dev_err(&client->adapter->dev, "i2c read %d bytes from client@%#x starting at reg %#x failed, error: %d\n", |
2392 |
+ data_len, client->addr, cmd, ret); |
2393 |
+- } else { |
2394 |
++ /* 2 transfers must have completed successfully */ |
2395 |
++ } else if (ret == 2) { |
2396 |
+ memcpy(data, buffer, data_len); |
2397 |
++ ret = 0; |
2398 |
++ } else { |
2399 |
++ ret = -EIO; |
2400 |
+ } |
2401 |
+ |
2402 |
+ kfree(buffer); |
2403 |
+@@ -595,8 +599,6 @@ i2c_acpi_space_handler(u32 function, acpi_physical_address command, |
2404 |
+ if (action == ACPI_READ) { |
2405 |
+ status = acpi_gsb_i2c_read_bytes(client, command, |
2406 |
+ gsb->data, info->access_length); |
2407 |
+- if (status > 0) |
2408 |
+- status = 0; |
2409 |
+ } else { |
2410 |
+ status = acpi_gsb_i2c_write_bytes(client, command, |
2411 |
+ gsb->data, info->access_length); |
2412 |
+diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c |
2413 |
+index fbe7198a715a..bedd5fba33b0 100644 |
2414 |
+--- a/drivers/infiniband/hw/hfi1/affinity.c |
2415 |
++++ b/drivers/infiniband/hw/hfi1/affinity.c |
2416 |
+@@ -198,7 +198,7 @@ int node_affinity_init(void) |
2417 |
+ while ((dev = pci_get_device(ids->vendor, ids->device, dev))) { |
2418 |
+ node = pcibus_to_node(dev->bus); |
2419 |
+ if (node < 0) |
2420 |
+- node = numa_node_id(); |
2421 |
++ goto out; |
2422 |
+ |
2423 |
+ hfi1_per_node_cntr[node]++; |
2424 |
+ } |
2425 |
+@@ -206,6 +206,18 @@ int node_affinity_init(void) |
2426 |
+ } |
2427 |
+ |
2428 |
+ return 0; |
2429 |
++ |
2430 |
++out: |
2431 |
++ /* |
2432 |
++ * Invalid PCI NUMA node information found, note it, and populate |
2433 |
++ * our database 1:1. |
2434 |
++ */ |
2435 |
++ pr_err("HFI: Invalid PCI NUMA node. Performance may be affected\n"); |
2436 |
++ pr_err("HFI: System BIOS may need to be upgraded\n"); |
2437 |
++ for (node = 0; node < node_affinity.num_possible_nodes; node++) |
2438 |
++ hfi1_per_node_cntr[node] = 1; |
2439 |
++ |
2440 |
++ return 0; |
2441 |
+ } |
2442 |
+ |
2443 |
+ static void node_affinity_destroy(struct hfi1_affinity_node *entry) |
2444 |
+@@ -622,8 +634,14 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd) |
2445 |
+ int curr_cpu, possible, i, ret; |
2446 |
+ bool new_entry = false; |
2447 |
+ |
2448 |
+- if (node < 0) |
2449 |
+- node = numa_node_id(); |
2450 |
++ /* |
2451 |
++ * If the BIOS does not have the NUMA node information set, select |
2452 |
++ * NUMA 0 so we get consistent performance. |
2453 |
++ */ |
2454 |
++ if (node < 0) { |
2455 |
++ dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n"); |
2456 |
++ node = 0; |
2457 |
++ } |
2458 |
+ dd->node = node; |
2459 |
+ |
2460 |
+ local_mask = cpumask_of_node(dd->node); |
2461 |
+diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c |
2462 |
+index b9f2c871ff9a..e11c149da04d 100644 |
2463 |
+--- a/drivers/infiniband/hw/hns/hns_roce_pd.c |
2464 |
++++ b/drivers/infiniband/hw/hns/hns_roce_pd.c |
2465 |
+@@ -37,7 +37,7 @@ |
2466 |
+ |
2467 |
+ static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn) |
2468 |
+ { |
2469 |
+- return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn); |
2470 |
++ return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn) ? -ENOMEM : 0; |
2471 |
+ } |
2472 |
+ |
2473 |
+ static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn) |
2474 |
+diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c |
2475 |
+index baaf906f7c2e..97664570c5ac 100644 |
2476 |
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c |
2477 |
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c |
2478 |
+@@ -115,7 +115,10 @@ static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt, |
2479 |
+ { |
2480 |
+ struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; |
2481 |
+ |
2482 |
+- return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, base); |
2483 |
++ return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, |
2484 |
++ base) ? |
2485 |
++ -ENOMEM : |
2486 |
++ 0; |
2487 |
+ } |
2488 |
+ |
2489 |
+ enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state) |
2490 |
+diff --git a/drivers/input/input.c b/drivers/input/input.c |
2491 |
+index 6365c1958264..3304aaaffe87 100644 |
2492 |
+--- a/drivers/input/input.c |
2493 |
++++ b/drivers/input/input.c |
2494 |
+@@ -480,11 +480,19 @@ EXPORT_SYMBOL(input_inject_event); |
2495 |
+ */ |
2496 |
+ void input_alloc_absinfo(struct input_dev *dev) |
2497 |
+ { |
2498 |
+- if (!dev->absinfo) |
2499 |
+- dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo), |
2500 |
+- GFP_KERNEL); |
2501 |
++ if (dev->absinfo) |
2502 |
++ return; |
2503 |
+ |
2504 |
+- WARN(!dev->absinfo, "%s(): kcalloc() failed?\n", __func__); |
2505 |
++ dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo), GFP_KERNEL); |
2506 |
++ if (!dev->absinfo) { |
2507 |
++ dev_err(dev->dev.parent ?: &dev->dev, |
2508 |
++ "%s: unable to allocate memory\n", __func__); |
2509 |
++ /* |
2510 |
++ * We will handle this allocation failure in |
2511 |
++ * input_register_device() when we refuse to register input |
2512 |
++ * device with ABS bits but without absinfo. |
2513 |
++ */ |
2514 |
++ } |
2515 |
+ } |
2516 |
+ EXPORT_SYMBOL(input_alloc_absinfo); |
2517 |
+ |
2518 |
+diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c |
2519 |
+index af4a8e7fcd27..3b05117118c3 100644 |
2520 |
+--- a/drivers/iommu/omap-iommu.c |
2521 |
++++ b/drivers/iommu/omap-iommu.c |
2522 |
+@@ -550,7 +550,7 @@ static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, |
2523 |
+ |
2524 |
+ pte_ready: |
2525 |
+ iopte = iopte_offset(iopgd, da); |
2526 |
+- *pt_dma = virt_to_phys(iopte); |
2527 |
++ *pt_dma = iopgd_page_paddr(iopgd); |
2528 |
+ dev_vdbg(obj->dev, |
2529 |
+ "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", |
2530 |
+ __func__, da, iopgd, *iopgd, iopte, *iopte); |
2531 |
+@@ -738,7 +738,7 @@ static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da) |
2532 |
+ } |
2533 |
+ bytes *= nent; |
2534 |
+ memset(iopte, 0, nent * sizeof(*iopte)); |
2535 |
+- pt_dma = virt_to_phys(iopte); |
2536 |
++ pt_dma = iopgd_page_paddr(iopgd); |
2537 |
+ flush_iopte_range(obj->dev, pt_dma, pt_offset, nent); |
2538 |
+ |
2539 |
+ /* |
2540 |
+diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c |
2541 |
+index 054cd2c8e9c8..2b1724e8d307 100644 |
2542 |
+--- a/drivers/iommu/rockchip-iommu.c |
2543 |
++++ b/drivers/iommu/rockchip-iommu.c |
2544 |
+@@ -521,10 +521,11 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id) |
2545 |
+ u32 int_status; |
2546 |
+ dma_addr_t iova; |
2547 |
+ irqreturn_t ret = IRQ_NONE; |
2548 |
+- int i; |
2549 |
++ int i, err; |
2550 |
+ |
2551 |
+- if (WARN_ON(!pm_runtime_get_if_in_use(iommu->dev))) |
2552 |
+- return 0; |
2553 |
++ err = pm_runtime_get_if_in_use(iommu->dev); |
2554 |
++ if (WARN_ON_ONCE(err <= 0)) |
2555 |
++ return ret; |
2556 |
+ |
2557 |
+ if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks))) |
2558 |
+ goto out; |
2559 |
+@@ -620,11 +621,15 @@ static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain, |
2560 |
+ spin_lock_irqsave(&rk_domain->iommus_lock, flags); |
2561 |
+ list_for_each(pos, &rk_domain->iommus) { |
2562 |
+ struct rk_iommu *iommu; |
2563 |
++ int ret; |
2564 |
+ |
2565 |
+ iommu = list_entry(pos, struct rk_iommu, node); |
2566 |
+ |
2567 |
+ /* Only zap TLBs of IOMMUs that are powered on. */ |
2568 |
+- if (pm_runtime_get_if_in_use(iommu->dev)) { |
2569 |
++ ret = pm_runtime_get_if_in_use(iommu->dev); |
2570 |
++ if (WARN_ON_ONCE(ret < 0)) |
2571 |
++ continue; |
2572 |
++ if (ret) { |
2573 |
+ WARN_ON(clk_bulk_enable(iommu->num_clocks, |
2574 |
+ iommu->clocks)); |
2575 |
+ rk_iommu_zap_lines(iommu, iova, size); |
2576 |
+@@ -891,6 +896,7 @@ static void rk_iommu_detach_device(struct iommu_domain *domain, |
2577 |
+ struct rk_iommu *iommu; |
2578 |
+ struct rk_iommu_domain *rk_domain = to_rk_domain(domain); |
2579 |
+ unsigned long flags; |
2580 |
++ int ret; |
2581 |
+ |
2582 |
+ /* Allow 'virtual devices' (eg drm) to detach from domain */ |
2583 |
+ iommu = rk_iommu_from_dev(dev); |
2584 |
+@@ -909,7 +915,9 @@ static void rk_iommu_detach_device(struct iommu_domain *domain, |
2585 |
+ list_del_init(&iommu->node); |
2586 |
+ spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); |
2587 |
+ |
2588 |
+- if (pm_runtime_get_if_in_use(iommu->dev)) { |
2589 |
++ ret = pm_runtime_get_if_in_use(iommu->dev); |
2590 |
++ WARN_ON_ONCE(ret < 0); |
2591 |
++ if (ret > 0) { |
2592 |
+ rk_iommu_disable(iommu); |
2593 |
+ pm_runtime_put(iommu->dev); |
2594 |
+ } |
2595 |
+@@ -946,7 +954,8 @@ static int rk_iommu_attach_device(struct iommu_domain *domain, |
2596 |
+ list_add_tail(&iommu->node, &rk_domain->iommus); |
2597 |
+ spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); |
2598 |
+ |
2599 |
+- if (!pm_runtime_get_if_in_use(iommu->dev)) |
2600 |
++ ret = pm_runtime_get_if_in_use(iommu->dev); |
2601 |
++ if (!ret || WARN_ON_ONCE(ret < 0)) |
2602 |
+ return 0; |
2603 |
+ |
2604 |
+ ret = rk_iommu_enable(iommu); |
2605 |
+@@ -1152,17 +1161,6 @@ static int rk_iommu_probe(struct platform_device *pdev) |
2606 |
+ if (iommu->num_mmu == 0) |
2607 |
+ return PTR_ERR(iommu->bases[0]); |
2608 |
+ |
2609 |
+- i = 0; |
2610 |
+- while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) { |
2611 |
+- if (irq < 0) |
2612 |
+- return irq; |
2613 |
+- |
2614 |
+- err = devm_request_irq(iommu->dev, irq, rk_iommu_irq, |
2615 |
+- IRQF_SHARED, dev_name(dev), iommu); |
2616 |
+- if (err) |
2617 |
+- return err; |
2618 |
+- } |
2619 |
+- |
2620 |
+ iommu->reset_disabled = device_property_read_bool(dev, |
2621 |
+ "rockchip,disable-mmu-reset"); |
2622 |
+ |
2623 |
+@@ -1219,6 +1217,19 @@ static int rk_iommu_probe(struct platform_device *pdev) |
2624 |
+ |
2625 |
+ pm_runtime_enable(dev); |
2626 |
+ |
2627 |
++ i = 0; |
2628 |
++ while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) { |
2629 |
++ if (irq < 0) |
2630 |
++ return irq; |
2631 |
++ |
2632 |
++ err = devm_request_irq(iommu->dev, irq, rk_iommu_irq, |
2633 |
++ IRQF_SHARED, dev_name(dev), iommu); |
2634 |
++ if (err) { |
2635 |
++ pm_runtime_disable(dev); |
2636 |
++ goto err_remove_sysfs; |
2637 |
++ } |
2638 |
++ } |
2639 |
++ |
2640 |
+ return 0; |
2641 |
+ err_remove_sysfs: |
2642 |
+ iommu_device_sysfs_remove(&iommu->iommu); |
2643 |
+diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c |
2644 |
+index faf734ff4cf3..0f6e30e9009d 100644 |
2645 |
+--- a/drivers/irqchip/irq-bcm7038-l1.c |
2646 |
++++ b/drivers/irqchip/irq-bcm7038-l1.c |
2647 |
+@@ -217,6 +217,7 @@ static int bcm7038_l1_set_affinity(struct irq_data *d, |
2648 |
+ return 0; |
2649 |
+ } |
2650 |
+ |
2651 |
++#ifdef CONFIG_SMP |
2652 |
+ static void bcm7038_l1_cpu_offline(struct irq_data *d) |
2653 |
+ { |
2654 |
+ struct cpumask *mask = irq_data_get_affinity_mask(d); |
2655 |
+@@ -241,6 +242,7 @@ static void bcm7038_l1_cpu_offline(struct irq_data *d) |
2656 |
+ } |
2657 |
+ irq_set_affinity_locked(d, &new_affinity, false); |
2658 |
+ } |
2659 |
++#endif |
2660 |
+ |
2661 |
+ static int __init bcm7038_l1_init_one(struct device_node *dn, |
2662 |
+ unsigned int idx, |
2663 |
+@@ -293,7 +295,9 @@ static struct irq_chip bcm7038_l1_irq_chip = { |
2664 |
+ .irq_mask = bcm7038_l1_mask, |
2665 |
+ .irq_unmask = bcm7038_l1_unmask, |
2666 |
+ .irq_set_affinity = bcm7038_l1_set_affinity, |
2667 |
++#ifdef CONFIG_SMP |
2668 |
+ .irq_cpu_offline = bcm7038_l1_cpu_offline, |
2669 |
++#endif |
2670 |
+ }; |
2671 |
+ |
2672 |
+ static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq, |
2673 |
+diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c |
2674 |
+index 3a7e8905a97e..880e48947576 100644 |
2675 |
+--- a/drivers/irqchip/irq-stm32-exti.c |
2676 |
++++ b/drivers/irqchip/irq-stm32-exti.c |
2677 |
+@@ -602,17 +602,24 @@ stm32_exti_host_data *stm32_exti_host_init(const struct stm32_exti_drv_data *dd, |
2678 |
+ sizeof(struct stm32_exti_chip_data), |
2679 |
+ GFP_KERNEL); |
2680 |
+ if (!host_data->chips_data) |
2681 |
+- return NULL; |
2682 |
++ goto free_host_data; |
2683 |
+ |
2684 |
+ host_data->base = of_iomap(node, 0); |
2685 |
+ if (!host_data->base) { |
2686 |
+ pr_err("%pOF: Unable to map registers\n", node); |
2687 |
+- return NULL; |
2688 |
++ goto free_chips_data; |
2689 |
+ } |
2690 |
+ |
2691 |
+ stm32_host_data = host_data; |
2692 |
+ |
2693 |
+ return host_data; |
2694 |
++ |
2695 |
++free_chips_data: |
2696 |
++ kfree(host_data->chips_data); |
2697 |
++free_host_data: |
2698 |
++ kfree(host_data); |
2699 |
++ |
2700 |
++ return NULL; |
2701 |
+ } |
2702 |
+ |
2703 |
+ static struct |
2704 |
+@@ -664,10 +671,8 @@ static int __init stm32_exti_init(const struct stm32_exti_drv_data *drv_data, |
2705 |
+ struct irq_domain *domain; |
2706 |
+ |
2707 |
+ host_data = stm32_exti_host_init(drv_data, node); |
2708 |
+- if (!host_data) { |
2709 |
+- ret = -ENOMEM; |
2710 |
+- goto out_free_mem; |
2711 |
+- } |
2712 |
++ if (!host_data) |
2713 |
++ return -ENOMEM; |
2714 |
+ |
2715 |
+ domain = irq_domain_add_linear(node, drv_data->bank_nr * IRQS_PER_BANK, |
2716 |
+ &irq_exti_domain_ops, NULL); |
2717 |
+@@ -724,7 +729,6 @@ out_free_domain: |
2718 |
+ irq_domain_remove(domain); |
2719 |
+ out_unmap: |
2720 |
+ iounmap(host_data->base); |
2721 |
+-out_free_mem: |
2722 |
+ kfree(host_data->chips_data); |
2723 |
+ kfree(host_data); |
2724 |
+ return ret; |
2725 |
+@@ -751,10 +755,8 @@ __init stm32_exti_hierarchy_init(const struct stm32_exti_drv_data *drv_data, |
2726 |
+ } |
2727 |
+ |
2728 |
+ host_data = stm32_exti_host_init(drv_data, node); |
2729 |
+- if (!host_data) { |
2730 |
+- ret = -ENOMEM; |
2731 |
+- goto out_free_mem; |
2732 |
+- } |
2733 |
++ if (!host_data) |
2734 |
++ return -ENOMEM; |
2735 |
+ |
2736 |
+ for (i = 0; i < drv_data->bank_nr; i++) |
2737 |
+ stm32_exti_chip_init(host_data, i, node); |
2738 |
+@@ -776,7 +778,6 @@ __init stm32_exti_hierarchy_init(const struct stm32_exti_drv_data *drv_data, |
2739 |
+ |
2740 |
+ out_unmap: |
2741 |
+ iounmap(host_data->base); |
2742 |
+-out_free_mem: |
2743 |
+ kfree(host_data->chips_data); |
2744 |
+ kfree(host_data); |
2745 |
+ return ret; |
2746 |
+diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c |
2747 |
+index 3c7547a3c371..d7b9cdafd1c3 100644 |
2748 |
+--- a/drivers/md/dm-kcopyd.c |
2749 |
++++ b/drivers/md/dm-kcopyd.c |
2750 |
+@@ -487,6 +487,8 @@ static int run_complete_job(struct kcopyd_job *job) |
2751 |
+ if (atomic_dec_and_test(&kc->nr_jobs)) |
2752 |
+ wake_up(&kc->destroyq); |
2753 |
+ |
2754 |
++ cond_resched(); |
2755 |
++ |
2756 |
+ return 0; |
2757 |
+ } |
2758 |
+ |
2759 |
+diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c |
2760 |
+index 2a87b0d2f21f..a530972c5a7e 100644 |
2761 |
+--- a/drivers/mfd/sm501.c |
2762 |
++++ b/drivers/mfd/sm501.c |
2763 |
+@@ -715,6 +715,7 @@ sm501_create_subdev(struct sm501_devdata *sm, char *name, |
2764 |
+ smdev->pdev.name = name; |
2765 |
+ smdev->pdev.id = sm->pdev_id; |
2766 |
+ smdev->pdev.dev.parent = sm->dev; |
2767 |
++ smdev->pdev.dev.coherent_dma_mask = 0xffffffff; |
2768 |
+ |
2769 |
+ if (res_count) { |
2770 |
+ smdev->pdev.resource = (struct resource *)(smdev+1); |
2771 |
+diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c |
2772 |
+index 94d7a865b135..7504f430c011 100644 |
2773 |
+--- a/drivers/mtd/ubi/vtbl.c |
2774 |
++++ b/drivers/mtd/ubi/vtbl.c |
2775 |
+@@ -578,6 +578,16 @@ static int init_volumes(struct ubi_device *ubi, |
2776 |
+ vol->ubi = ubi; |
2777 |
+ reserved_pebs += vol->reserved_pebs; |
2778 |
+ |
2779 |
++ /* |
2780 |
++ * We use ubi->peb_count and not vol->reserved_pebs because |
2781 |
++ * we want to keep the code simple. Otherwise we'd have to |
2782 |
++ * resize/check the bitmap upon volume resize too. |
2783 |
++ * Allocating a few bytes more does not hurt. |
2784 |
++ */ |
2785 |
++ err = ubi_fastmap_init_checkmap(vol, ubi->peb_count); |
2786 |
++ if (err) |
2787 |
++ return err; |
2788 |
++ |
2789 |
+ /* |
2790 |
+ * In case of dynamic volume UBI knows nothing about how many |
2791 |
+ * data is stored there. So assume the whole volume is used. |
2792 |
+@@ -620,16 +630,6 @@ static int init_volumes(struct ubi_device *ubi, |
2793 |
+ (long long)(vol->used_ebs - 1) * vol->usable_leb_size; |
2794 |
+ vol->used_bytes += av->last_data_size; |
2795 |
+ vol->last_eb_bytes = av->last_data_size; |
2796 |
+- |
2797 |
+- /* |
2798 |
+- * We use ubi->peb_count and not vol->reserved_pebs because |
2799 |
+- * we want to keep the code simple. Otherwise we'd have to |
2800 |
+- * resize/check the bitmap upon volume resize too. |
2801 |
+- * Allocating a few bytes more does not hurt. |
2802 |
+- */ |
2803 |
+- err = ubi_fastmap_init_checkmap(vol, ubi->peb_count); |
2804 |
+- if (err) |
2805 |
+- return err; |
2806 |
+ } |
2807 |
+ |
2808 |
+ /* And add the layout volume */ |
2809 |
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c |
2810 |
+index 4394c1162be4..4fdf3d33aa59 100644 |
2811 |
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c |
2812 |
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c |
2813 |
+@@ -5907,12 +5907,12 @@ unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) |
2814 |
+ return bp->hw_resc.max_cp_rings; |
2815 |
+ } |
2816 |
+ |
2817 |
+-void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max) |
2818 |
++unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) |
2819 |
+ { |
2820 |
+- bp->hw_resc.max_cp_rings = max; |
2821 |
++ return bp->hw_resc.max_cp_rings - bnxt_get_ulp_msix_num(bp); |
2822 |
+ } |
2823 |
+ |
2824 |
+-unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) |
2825 |
++static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) |
2826 |
+ { |
2827 |
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
2828 |
+ |
2829 |
+@@ -8492,7 +8492,8 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, |
2830 |
+ |
2831 |
+ *max_tx = hw_resc->max_tx_rings; |
2832 |
+ *max_rx = hw_resc->max_rx_rings; |
2833 |
+- *max_cp = min_t(int, hw_resc->max_irqs, hw_resc->max_cp_rings); |
2834 |
++ *max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp), |
2835 |
++ hw_resc->max_irqs); |
2836 |
+ *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs); |
2837 |
+ max_ring_grps = hw_resc->max_hw_ring_grps; |
2838 |
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { |
2839 |
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h |
2840 |
+index 91575ef97c8c..ea1246a94b38 100644 |
2841 |
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h |
2842 |
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h |
2843 |
+@@ -1468,8 +1468,7 @@ int bnxt_hwrm_set_coal(struct bnxt *); |
2844 |
+ unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp); |
2845 |
+ void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max); |
2846 |
+ unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp); |
2847 |
+-void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max); |
2848 |
+-unsigned int bnxt_get_max_func_irqs(struct bnxt *bp); |
2849 |
++unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp); |
2850 |
+ int bnxt_get_avail_msix(struct bnxt *bp, int num); |
2851 |
+ int bnxt_reserve_rings(struct bnxt *bp); |
2852 |
+ void bnxt_tx_disable(struct bnxt *bp); |
2853 |
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c |
2854 |
+index a64910892c25..2c77004a022b 100644 |
2855 |
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c |
2856 |
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c |
2857 |
+@@ -451,7 +451,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs) |
2858 |
+ |
2859 |
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1); |
2860 |
+ |
2861 |
+- vf_cp_rings = hw_resc->max_cp_rings - bp->cp_nr_rings; |
2862 |
++ vf_cp_rings = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings; |
2863 |
+ vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; |
2864 |
+ if (bp->flags & BNXT_FLAG_AGG_RINGS) |
2865 |
+ vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2; |
2866 |
+@@ -544,7 +544,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) |
2867 |
+ max_stat_ctxs = hw_resc->max_stat_ctxs; |
2868 |
+ |
2869 |
+ /* Remaining rings are distributed equally amongs VF's for now */ |
2870 |
+- vf_cp_rings = (hw_resc->max_cp_rings - bp->cp_nr_rings) / num_vfs; |
2871 |
++ vf_cp_rings = (bnxt_get_max_func_cp_rings_for_en(bp) - |
2872 |
++ bp->cp_nr_rings) / num_vfs; |
2873 |
+ vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs; |
2874 |
+ if (bp->flags & BNXT_FLAG_AGG_RINGS) |
2875 |
+ vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) / |
2876 |
+@@ -638,7 +639,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) |
2877 |
+ */ |
2878 |
+ vfs_supported = *num_vfs; |
2879 |
+ |
2880 |
+- avail_cp = hw_resc->max_cp_rings - bp->cp_nr_rings; |
2881 |
++ avail_cp = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings; |
2882 |
+ avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; |
2883 |
+ avail_cp = min_t(int, avail_cp, avail_stat); |
2884 |
+ |
2885 |
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c |
2886 |
+index 840f6e505f73..4209cfd73971 100644 |
2887 |
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c |
2888 |
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c |
2889 |
+@@ -169,7 +169,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id, |
2890 |
+ edev->ulp_tbl[ulp_id].msix_requested = avail_msix; |
2891 |
+ } |
2892 |
+ bnxt_fill_msix_vecs(bp, ent); |
2893 |
+- bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix); |
2894 |
+ edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED; |
2895 |
+ return avail_msix; |
2896 |
+ } |
2897 |
+@@ -178,7 +177,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id) |
2898 |
+ { |
2899 |
+ struct net_device *dev = edev->net; |
2900 |
+ struct bnxt *bp = netdev_priv(dev); |
2901 |
+- int max_cp_rings, msix_requested; |
2902 |
+ |
2903 |
+ ASSERT_RTNL(); |
2904 |
+ if (ulp_id != BNXT_ROCE_ULP) |
2905 |
+@@ -187,9 +185,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id) |
2906 |
+ if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED)) |
2907 |
+ return 0; |
2908 |
+ |
2909 |
+- max_cp_rings = bnxt_get_max_func_cp_rings(bp); |
2910 |
+- msix_requested = edev->ulp_tbl[ulp_id].msix_requested; |
2911 |
+- bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested); |
2912 |
+ edev->ulp_tbl[ulp_id].msix_requested = 0; |
2913 |
+ edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED; |
2914 |
+ if (netif_running(dev)) { |
2915 |
+@@ -220,21 +215,6 @@ int bnxt_get_ulp_msix_base(struct bnxt *bp) |
2916 |
+ return 0; |
2917 |
+ } |
2918 |
+ |
2919 |
+-void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id) |
2920 |
+-{ |
2921 |
+- ASSERT_RTNL(); |
2922 |
+- if (bnxt_ulp_registered(bp->edev, ulp_id)) { |
2923 |
+- struct bnxt_en_dev *edev = bp->edev; |
2924 |
+- unsigned int msix_req, max; |
2925 |
+- |
2926 |
+- msix_req = edev->ulp_tbl[ulp_id].msix_requested; |
2927 |
+- max = bnxt_get_max_func_cp_rings(bp); |
2928 |
+- bnxt_set_max_func_cp_rings(bp, max - msix_req); |
2929 |
+- max = bnxt_get_max_func_stat_ctxs(bp); |
2930 |
+- bnxt_set_max_func_stat_ctxs(bp, max - 1); |
2931 |
+- } |
2932 |
+-} |
2933 |
+- |
2934 |
+ static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id, |
2935 |
+ struct bnxt_fw_msg *fw_msg) |
2936 |
+ { |
2937 |
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h |
2938 |
+index df48ac71729f..d9bea37cd211 100644 |
2939 |
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h |
2940 |
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h |
2941 |
+@@ -90,7 +90,6 @@ static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id) |
2942 |
+ |
2943 |
+ int bnxt_get_ulp_msix_num(struct bnxt *bp); |
2944 |
+ int bnxt_get_ulp_msix_base(struct bnxt *bp); |
2945 |
+-void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id); |
2946 |
+ void bnxt_ulp_stop(struct bnxt *bp); |
2947 |
+ void bnxt_ulp_start(struct bnxt *bp); |
2948 |
+ void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs); |
2949 |
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h |
2950 |
+index b773bc07edf7..14b49612aa86 100644 |
2951 |
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h |
2952 |
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h |
2953 |
+@@ -186,6 +186,9 @@ struct bcmgenet_mib_counters { |
2954 |
+ #define UMAC_MAC1 0x010 |
2955 |
+ #define UMAC_MAX_FRAME_LEN 0x014 |
2956 |
+ |
2957 |
++#define UMAC_MODE 0x44 |
2958 |
++#define MODE_LINK_STATUS (1 << 5) |
2959 |
++ |
2960 |
+ #define UMAC_EEE_CTRL 0x064 |
2961 |
+ #define EN_LPI_RX_PAUSE (1 << 0) |
2962 |
+ #define EN_LPI_TX_PFC (1 << 1) |
2963 |
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c |
2964 |
+index 5333274a283c..4241ae928d4a 100644 |
2965 |
+--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c |
2966 |
++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c |
2967 |
+@@ -115,8 +115,14 @@ void bcmgenet_mii_setup(struct net_device *dev) |
2968 |
+ static int bcmgenet_fixed_phy_link_update(struct net_device *dev, |
2969 |
+ struct fixed_phy_status *status) |
2970 |
+ { |
2971 |
+- if (dev && dev->phydev && status) |
2972 |
+- status->link = dev->phydev->link; |
2973 |
++ struct bcmgenet_priv *priv; |
2974 |
++ u32 reg; |
2975 |
++ |
2976 |
++ if (dev && dev->phydev && status) { |
2977 |
++ priv = netdev_priv(dev); |
2978 |
++ reg = bcmgenet_umac_readl(priv, UMAC_MODE); |
2979 |
++ status->link = !!(reg & MODE_LINK_STATUS); |
2980 |
++ } |
2981 |
+ |
2982 |
+ return 0; |
2983 |
+ } |
2984 |
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c |
2985 |
+index a6c911bb5ce2..515d96e32143 100644 |
2986 |
+--- a/drivers/net/ethernet/cadence/macb_main.c |
2987 |
++++ b/drivers/net/ethernet/cadence/macb_main.c |
2988 |
+@@ -481,11 +481,6 @@ static int macb_mii_probe(struct net_device *dev) |
2989 |
+ |
2990 |
+ if (np) { |
2991 |
+ if (of_phy_is_fixed_link(np)) { |
2992 |
+- if (of_phy_register_fixed_link(np) < 0) { |
2993 |
+- dev_err(&bp->pdev->dev, |
2994 |
+- "broken fixed-link specification\n"); |
2995 |
+- return -ENODEV; |
2996 |
+- } |
2997 |
+ bp->phy_node = of_node_get(np); |
2998 |
+ } else { |
2999 |
+ bp->phy_node = of_parse_phandle(np, "phy-handle", 0); |
3000 |
+@@ -568,7 +563,7 @@ static int macb_mii_init(struct macb *bp) |
3001 |
+ { |
3002 |
+ struct macb_platform_data *pdata; |
3003 |
+ struct device_node *np; |
3004 |
+- int err; |
3005 |
++ int err = -ENXIO; |
3006 |
+ |
3007 |
+ /* Enable management port */ |
3008 |
+ macb_writel(bp, NCR, MACB_BIT(MPE)); |
3009 |
+@@ -591,12 +586,23 @@ static int macb_mii_init(struct macb *bp) |
3010 |
+ dev_set_drvdata(&bp->dev->dev, bp->mii_bus); |
3011 |
+ |
3012 |
+ np = bp->pdev->dev.of_node; |
3013 |
+- if (pdata) |
3014 |
+- bp->mii_bus->phy_mask = pdata->phy_mask; |
3015 |
++ if (np && of_phy_is_fixed_link(np)) { |
3016 |
++ if (of_phy_register_fixed_link(np) < 0) { |
3017 |
++ dev_err(&bp->pdev->dev, |
3018 |
++ "broken fixed-link specification %pOF\n", np); |
3019 |
++ goto err_out_free_mdiobus; |
3020 |
++ } |
3021 |
++ |
3022 |
++ err = mdiobus_register(bp->mii_bus); |
3023 |
++ } else { |
3024 |
++ if (pdata) |
3025 |
++ bp->mii_bus->phy_mask = pdata->phy_mask; |
3026 |
++ |
3027 |
++ err = of_mdiobus_register(bp->mii_bus, np); |
3028 |
++ } |
3029 |
+ |
3030 |
+- err = of_mdiobus_register(bp->mii_bus, np); |
3031 |
+ if (err) |
3032 |
+- goto err_out_free_mdiobus; |
3033 |
++ goto err_out_free_fixed_link; |
3034 |
+ |
3035 |
+ err = macb_mii_probe(bp->dev); |
3036 |
+ if (err) |
3037 |
+@@ -606,6 +612,7 @@ static int macb_mii_init(struct macb *bp) |
3038 |
+ |
3039 |
+ err_out_unregister_bus: |
3040 |
+ mdiobus_unregister(bp->mii_bus); |
3041 |
++err_out_free_fixed_link: |
3042 |
+ if (np && of_phy_is_fixed_link(np)) |
3043 |
+ of_phy_deregister_fixed_link(np); |
3044 |
+ err_out_free_mdiobus: |
3045 |
+@@ -1957,14 +1964,17 @@ static void macb_reset_hw(struct macb *bp) |
3046 |
+ { |
3047 |
+ struct macb_queue *queue; |
3048 |
+ unsigned int q; |
3049 |
++ u32 ctrl = macb_readl(bp, NCR); |
3050 |
+ |
3051 |
+ /* Disable RX and TX (XXX: Should we halt the transmission |
3052 |
+ * more gracefully?) |
3053 |
+ */ |
3054 |
+- macb_writel(bp, NCR, 0); |
3055 |
++ ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE)); |
3056 |
+ |
3057 |
+ /* Clear the stats registers (XXX: Update stats first?) */ |
3058 |
+- macb_writel(bp, NCR, MACB_BIT(CLRSTAT)); |
3059 |
++ ctrl |= MACB_BIT(CLRSTAT); |
3060 |
++ |
3061 |
++ macb_writel(bp, NCR, ctrl); |
3062 |
+ |
3063 |
+ /* Clear all status flags */ |
3064 |
+ macb_writel(bp, TSR, -1); |
3065 |
+@@ -2152,7 +2162,7 @@ static void macb_init_hw(struct macb *bp) |
3066 |
+ } |
3067 |
+ |
3068 |
+ /* Enable TX and RX */ |
3069 |
+- macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); |
3070 |
++ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE)); |
3071 |
+ } |
3072 |
+ |
3073 |
+ /* The hash address register is 64 bits long and takes up two |
3074 |
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c |
3075 |
+index d318d35e598f..6fd7ea8074b0 100644 |
3076 |
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c |
3077 |
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c |
3078 |
+@@ -3911,7 +3911,7 @@ static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) |
3079 |
+ #define HCLGE_FUNC_NUMBER_PER_DESC 6 |
3080 |
+ int i, j; |
3081 |
+ |
3082 |
+- for (i = 0; i < HCLGE_DESC_NUMBER; i++) |
3083 |
++ for (i = 1; i < HCLGE_DESC_NUMBER; i++) |
3084 |
+ for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++) |
3085 |
+ if (desc[i].data[j]) |
3086 |
+ return false; |
3087 |
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c |
3088 |
+index 9f7932e423b5..6315e8ad8467 100644 |
3089 |
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c |
3090 |
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c |
3091 |
+@@ -208,6 +208,8 @@ int hclge_mac_start_phy(struct hclge_dev *hdev) |
3092 |
+ if (!phydev) |
3093 |
+ return 0; |
3094 |
+ |
3095 |
++ phydev->supported &= ~SUPPORTED_FIBRE; |
3096 |
++ |
3097 |
+ ret = phy_connect_direct(netdev, phydev, |
3098 |
+ hclge_mac_adjust_link, |
3099 |
+ PHY_INTERFACE_MODE_SGMII); |
3100 |
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c |
3101 |
+index 86478a6b99c5..c8c315eb5128 100644 |
3102 |
+--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c |
3103 |
++++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c |
3104 |
+@@ -139,14 +139,15 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, |
3105 |
+ struct mlx5_wq_ctrl *wq_ctrl) |
3106 |
+ { |
3107 |
+ u32 sq_strides_offset; |
3108 |
++ u32 rq_pg_remainder; |
3109 |
+ int err; |
3110 |
+ |
3111 |
+ mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4, |
3112 |
+ MLX5_GET(qpc, qpc, log_rq_size), |
3113 |
+ &wq->rq.fbc); |
3114 |
+ |
3115 |
+- sq_strides_offset = |
3116 |
+- ((wq->rq.fbc.frag_sz_m1 + 1) % PAGE_SIZE) / MLX5_SEND_WQE_BB; |
3117 |
++ rq_pg_remainder = mlx5_wq_cyc_get_byte_size(&wq->rq) % PAGE_SIZE; |
3118 |
++ sq_strides_offset = rq_pg_remainder / MLX5_SEND_WQE_BB; |
3119 |
+ |
3120 |
+ mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB), |
3121 |
+ MLX5_GET(qpc, qpc, log_sq_size), |
3122 |
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h |
3123 |
+index 4a519d8edec8..3500c79e29cd 100644 |
3124 |
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h |
3125 |
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h |
3126 |
+@@ -433,6 +433,8 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp, |
3127 |
+ void |
3128 |
+ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); |
3129 |
+ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif); |
3130 |
++void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp, |
3131 |
++ struct net_device *dev); |
3132 |
+ |
3133 |
+ /* spectrum_kvdl.c */ |
3134 |
+ int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp); |
3135 |
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c |
3136 |
+index 77b2adb29341..cb43d17097fa 100644 |
3137 |
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c |
3138 |
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c |
3139 |
+@@ -6228,6 +6228,17 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif) |
3140 |
+ mlxsw_sp_vr_put(mlxsw_sp, vr); |
3141 |
+ } |
3142 |
+ |
3143 |
++void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp, |
3144 |
++ struct net_device *dev) |
3145 |
++{ |
3146 |
++ struct mlxsw_sp_rif *rif; |
3147 |
++ |
3148 |
++ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); |
3149 |
++ if (!rif) |
3150 |
++ return; |
3151 |
++ mlxsw_sp_rif_destroy(rif); |
3152 |
++} |
3153 |
++ |
3154 |
+ static void |
3155 |
+ mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params, |
3156 |
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) |
3157 |
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c |
3158 |
+index eea5666a86b2..6cb43dda8232 100644 |
3159 |
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c |
3160 |
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c |
3161 |
+@@ -160,6 +160,24 @@ bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp, |
3162 |
+ return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); |
3163 |
+ } |
3164 |
+ |
3165 |
++static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev, |
3166 |
++ void *data) |
3167 |
++{ |
3168 |
++ struct mlxsw_sp *mlxsw_sp = data; |
3169 |
++ |
3170 |
++ mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev); |
3171 |
++ return 0; |
3172 |
++} |
3173 |
++ |
3174 |
++static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp, |
3175 |
++ struct net_device *dev) |
3176 |
++{ |
3177 |
++ mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev); |
3178 |
++ netdev_walk_all_upper_dev_rcu(dev, |
3179 |
++ mlxsw_sp_bridge_device_upper_rif_destroy, |
3180 |
++ mlxsw_sp); |
3181 |
++} |
3182 |
++ |
3183 |
+ static struct mlxsw_sp_bridge_device * |
3184 |
+ mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, |
3185 |
+ struct net_device *br_dev) |
3186 |
+@@ -198,6 +216,8 @@ static void |
3187 |
+ mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge, |
3188 |
+ struct mlxsw_sp_bridge_device *bridge_device) |
3189 |
+ { |
3190 |
++ mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp, |
3191 |
++ bridge_device->dev); |
3192 |
+ list_del(&bridge_device->list); |
3193 |
+ if (bridge_device->vlan_enabled) |
3194 |
+ bridge->vlan_enabled_exists = false; |
3195 |
+diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c |
3196 |
+index d4c27f849f9b..c2a9e64bc57b 100644 |
3197 |
+--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c |
3198 |
++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c |
3199 |
+@@ -227,29 +227,16 @@ done: |
3200 |
+ spin_unlock_bh(&nn->reconfig_lock); |
3201 |
+ } |
3202 |
+ |
3203 |
+-/** |
3204 |
+- * nfp_net_reconfig() - Reconfigure the firmware |
3205 |
+- * @nn: NFP Net device to reconfigure |
3206 |
+- * @update: The value for the update field in the BAR config |
3207 |
+- * |
3208 |
+- * Write the update word to the BAR and ping the reconfig queue. The |
3209 |
+- * poll until the firmware has acknowledged the update by zeroing the |
3210 |
+- * update word. |
3211 |
+- * |
3212 |
+- * Return: Negative errno on error, 0 on success |
3213 |
+- */ |
3214 |
+-int nfp_net_reconfig(struct nfp_net *nn, u32 update) |
3215 |
++static void nfp_net_reconfig_sync_enter(struct nfp_net *nn) |
3216 |
+ { |
3217 |
+ bool cancelled_timer = false; |
3218 |
+ u32 pre_posted_requests; |
3219 |
+- int ret; |
3220 |
+ |
3221 |
+ spin_lock_bh(&nn->reconfig_lock); |
3222 |
+ |
3223 |
+ nn->reconfig_sync_present = true; |
3224 |
+ |
3225 |
+ if (nn->reconfig_timer_active) { |
3226 |
+- del_timer(&nn->reconfig_timer); |
3227 |
+ nn->reconfig_timer_active = false; |
3228 |
+ cancelled_timer = true; |
3229 |
+ } |
3230 |
+@@ -258,14 +245,43 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update) |
3231 |
+ |
3232 |
+ spin_unlock_bh(&nn->reconfig_lock); |
3233 |
+ |
3234 |
+- if (cancelled_timer) |
3235 |
++ if (cancelled_timer) { |
3236 |
++ del_timer_sync(&nn->reconfig_timer); |
3237 |
+ nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires); |
3238 |
++ } |
3239 |
+ |
3240 |
+ /* Run the posted reconfigs which were issued before we started */ |
3241 |
+ if (pre_posted_requests) { |
3242 |
+ nfp_net_reconfig_start(nn, pre_posted_requests); |
3243 |
+ nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); |
3244 |
+ } |
3245 |
++} |
3246 |
++ |
3247 |
++static void nfp_net_reconfig_wait_posted(struct nfp_net *nn) |
3248 |
++{ |
3249 |
++ nfp_net_reconfig_sync_enter(nn); |
3250 |
++ |
3251 |
++ spin_lock_bh(&nn->reconfig_lock); |
3252 |
++ nn->reconfig_sync_present = false; |
3253 |
++ spin_unlock_bh(&nn->reconfig_lock); |
3254 |
++} |
3255 |
++ |
3256 |
++/** |
3257 |
++ * nfp_net_reconfig() - Reconfigure the firmware |
3258 |
++ * @nn: NFP Net device to reconfigure |
3259 |
++ * @update: The value for the update field in the BAR config |
3260 |
++ * |
3261 |
++ * Write the update word to the BAR and ping the reconfig queue. The |
3262 |
++ * poll until the firmware has acknowledged the update by zeroing the |
3263 |
++ * update word. |
3264 |
++ * |
3265 |
++ * Return: Negative errno on error, 0 on success |
3266 |
++ */ |
3267 |
++int nfp_net_reconfig(struct nfp_net *nn, u32 update) |
3268 |
++{ |
3269 |
++ int ret; |
3270 |
++ |
3271 |
++ nfp_net_reconfig_sync_enter(nn); |
3272 |
+ |
3273 |
+ nfp_net_reconfig_start(nn, update); |
3274 |
+ ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); |
3275 |
+@@ -3609,6 +3625,7 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev, |
3276 |
+ */ |
3277 |
+ void nfp_net_free(struct nfp_net *nn) |
3278 |
+ { |
3279 |
++ WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted); |
3280 |
+ if (nn->dp.netdev) |
3281 |
+ free_netdev(nn->dp.netdev); |
3282 |
+ else |
3283 |
+@@ -3893,4 +3910,5 @@ void nfp_net_clean(struct nfp_net *nn) |
3284 |
+ return; |
3285 |
+ |
3286 |
+ unregister_netdev(nn->dp.netdev); |
3287 |
++ nfp_net_reconfig_wait_posted(nn); |
3288 |
+ } |
3289 |
+diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c |
3290 |
+index 353f1c129af1..059ba9429e51 100644 |
3291 |
+--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c |
3292 |
++++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c |
3293 |
+@@ -2384,26 +2384,20 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev, |
3294 |
+ return status; |
3295 |
+ } |
3296 |
+ |
3297 |
+-static netdev_features_t qlge_fix_features(struct net_device *ndev, |
3298 |
+- netdev_features_t features) |
3299 |
+-{ |
3300 |
+- int err; |
3301 |
+- |
3302 |
+- /* Update the behavior of vlan accel in the adapter */ |
3303 |
+- err = qlge_update_hw_vlan_features(ndev, features); |
3304 |
+- if (err) |
3305 |
+- return err; |
3306 |
+- |
3307 |
+- return features; |
3308 |
+-} |
3309 |
+- |
3310 |
+ static int qlge_set_features(struct net_device *ndev, |
3311 |
+ netdev_features_t features) |
3312 |
+ { |
3313 |
+ netdev_features_t changed = ndev->features ^ features; |
3314 |
++ int err; |
3315 |
++ |
3316 |
++ if (changed & NETIF_F_HW_VLAN_CTAG_RX) { |
3317 |
++ /* Update the behavior of vlan accel in the adapter */ |
3318 |
++ err = qlge_update_hw_vlan_features(ndev, features); |
3319 |
++ if (err) |
3320 |
++ return err; |
3321 |
+ |
3322 |
+- if (changed & NETIF_F_HW_VLAN_CTAG_RX) |
3323 |
+ qlge_vlan_mode(ndev, features); |
3324 |
++ } |
3325 |
+ |
3326 |
+ return 0; |
3327 |
+ } |
3328 |
+@@ -4719,7 +4713,6 @@ static const struct net_device_ops qlge_netdev_ops = { |
3329 |
+ .ndo_set_mac_address = qlge_set_mac_address, |
3330 |
+ .ndo_validate_addr = eth_validate_addr, |
3331 |
+ .ndo_tx_timeout = qlge_tx_timeout, |
3332 |
+- .ndo_fix_features = qlge_fix_features, |
3333 |
+ .ndo_set_features = qlge_set_features, |
3334 |
+ .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid, |
3335 |
+ .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid, |
3336 |
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c |
3337 |
+index 9ceb34bac3a9..e5eb361b973c 100644 |
3338 |
+--- a/drivers/net/ethernet/realtek/r8169.c |
3339 |
++++ b/drivers/net/ethernet/realtek/r8169.c |
3340 |
+@@ -303,6 +303,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = { |
3341 |
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 }, |
3342 |
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, |
3343 |
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, |
3344 |
++ { PCI_DEVICE(PCI_VENDOR_ID_NCUBE, 0x8168), 0, 0, RTL_CFG_1 }, |
3345 |
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, |
3346 |
+ { PCI_VENDOR_ID_DLINK, 0x4300, |
3347 |
+ PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 }, |
3348 |
+@@ -5038,7 +5039,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp) |
3349 |
+ rtl_hw_reset(tp); |
3350 |
+ } |
3351 |
+ |
3352 |
+-static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp) |
3353 |
++static void rtl_set_tx_config_registers(struct rtl8169_private *tp) |
3354 |
+ { |
3355 |
+ /* Set DMA burst size and Interframe Gap Time */ |
3356 |
+ RTL_W32(tp, TxConfig, (TX_DMA_BURST << TxDMAShift) | |
3357 |
+@@ -5149,12 +5150,14 @@ static void rtl_hw_start(struct rtl8169_private *tp) |
3358 |
+ |
3359 |
+ rtl_set_rx_max_size(tp); |
3360 |
+ rtl_set_rx_tx_desc_registers(tp); |
3361 |
+- rtl_set_rx_tx_config_registers(tp); |
3362 |
++ rtl_set_tx_config_registers(tp); |
3363 |
+ RTL_W8(tp, Cfg9346, Cfg9346_Lock); |
3364 |
+ |
3365 |
+ /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ |
3366 |
+ RTL_R8(tp, IntrMask); |
3367 |
+ RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); |
3368 |
++ rtl_init_rxcfg(tp); |
3369 |
++ |
3370 |
+ rtl_set_rx_mode(tp->dev); |
3371 |
+ /* no early-rx interrupts */ |
3372 |
+ RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); |
3373 |
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h |
3374 |
+index 76649adf8fb0..c0a855b7ab3b 100644 |
3375 |
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h |
3376 |
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h |
3377 |
+@@ -112,7 +112,6 @@ struct stmmac_priv { |
3378 |
+ u32 tx_count_frames; |
3379 |
+ u32 tx_coal_frames; |
3380 |
+ u32 tx_coal_timer; |
3381 |
+- bool tx_timer_armed; |
3382 |
+ |
3383 |
+ int tx_coalesce; |
3384 |
+ int hwts_tx_en; |
3385 |
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
3386 |
+index ef6a8d39db2f..c579d98b9666 100644 |
3387 |
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
3388 |
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
3389 |
+@@ -3126,16 +3126,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) |
3390 |
+ * element in case of no SG. |
3391 |
+ */ |
3392 |
+ priv->tx_count_frames += nfrags + 1; |
3393 |
+- if (likely(priv->tx_coal_frames > priv->tx_count_frames) && |
3394 |
+- !priv->tx_timer_armed) { |
3395 |
++ if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { |
3396 |
+ mod_timer(&priv->txtimer, |
3397 |
+ STMMAC_COAL_TIMER(priv->tx_coal_timer)); |
3398 |
+- priv->tx_timer_armed = true; |
3399 |
+ } else { |
3400 |
+ priv->tx_count_frames = 0; |
3401 |
+ stmmac_set_tx_ic(priv, desc); |
3402 |
+ priv->xstats.tx_set_ic_bit++; |
3403 |
+- priv->tx_timer_armed = false; |
3404 |
+ } |
3405 |
+ |
3406 |
+ skb_tx_timestamp(skb); |
3407 |
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c |
3408 |
+index dd1d6e115145..6d74cde68163 100644 |
3409 |
+--- a/drivers/net/hyperv/netvsc_drv.c |
3410 |
++++ b/drivers/net/hyperv/netvsc_drv.c |
3411 |
+@@ -29,6 +29,7 @@ |
3412 |
+ #include <linux/netdevice.h> |
3413 |
+ #include <linux/inetdevice.h> |
3414 |
+ #include <linux/etherdevice.h> |
3415 |
++#include <linux/pci.h> |
3416 |
+ #include <linux/skbuff.h> |
3417 |
+ #include <linux/if_vlan.h> |
3418 |
+ #include <linux/in.h> |
3419 |
+@@ -1939,12 +1940,16 @@ static int netvsc_register_vf(struct net_device *vf_netdev) |
3420 |
+ { |
3421 |
+ struct net_device *ndev; |
3422 |
+ struct net_device_context *net_device_ctx; |
3423 |
++ struct device *pdev = vf_netdev->dev.parent; |
3424 |
+ struct netvsc_device *netvsc_dev; |
3425 |
+ int ret; |
3426 |
+ |
3427 |
+ if (vf_netdev->addr_len != ETH_ALEN) |
3428 |
+ return NOTIFY_DONE; |
3429 |
+ |
3430 |
++ if (!pdev || !dev_is_pci(pdev) || dev_is_pf(pdev)) |
3431 |
++ return NOTIFY_DONE; |
3432 |
++ |
3433 |
+ /* |
3434 |
+ * We will use the MAC address to locate the synthetic interface to |
3435 |
+ * associate with the VF interface. If we don't find a matching |
3436 |
+@@ -2101,6 +2106,16 @@ static int netvsc_probe(struct hv_device *dev, |
3437 |
+ |
3438 |
+ memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); |
3439 |
+ |
3440 |
++ /* We must get rtnl lock before scheduling nvdev->subchan_work, |
3441 |
++ * otherwise netvsc_subchan_work() can get rtnl lock first and wait |
3442 |
++ * all subchannels to show up, but that may not happen because |
3443 |
++ * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer() |
3444 |
++ * -> ... -> device_add() -> ... -> __device_attach() can't get |
3445 |
++ * the device lock, so all the subchannels can't be processed -- |
3446 |
++ * finally netvsc_subchan_work() hangs for ever. |
3447 |
++ */ |
3448 |
++ rtnl_lock(); |
3449 |
++ |
3450 |
+ if (nvdev->num_chn > 1) |
3451 |
+ schedule_work(&nvdev->subchan_work); |
3452 |
+ |
3453 |
+@@ -2119,7 +2134,6 @@ static int netvsc_probe(struct hv_device *dev, |
3454 |
+ else |
3455 |
+ net->max_mtu = ETH_DATA_LEN; |
3456 |
+ |
3457 |
+- rtnl_lock(); |
3458 |
+ ret = register_netdevice(net); |
3459 |
+ if (ret != 0) { |
3460 |
+ pr_err("Unable to register netdev.\n"); |
3461 |
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c |
3462 |
+index 2a58607a6aea..1b07bb5e110d 100644 |
3463 |
+--- a/drivers/net/usb/r8152.c |
3464 |
++++ b/drivers/net/usb/r8152.c |
3465 |
+@@ -5214,8 +5214,8 @@ static int rtl8152_probe(struct usb_interface *intf, |
3466 |
+ netdev->hw_features &= ~NETIF_F_RXCSUM; |
3467 |
+ } |
3468 |
+ |
3469 |
+- if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && |
3470 |
+- udev->serial && !strcmp(udev->serial, "000001000000")) { |
3471 |
++ if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial && |
3472 |
++ (!strcmp(udev->serial, "000001000000") || !strcmp(udev->serial, "000002000000"))) { |
3473 |
+ dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation"); |
3474 |
+ set_bit(DELL_TB_RX_AGG_BUG, &tp->flags); |
3475 |
+ } |
3476 |
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c |
3477 |
+index b6122aad639e..7569f9af8d47 100644 |
3478 |
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c |
3479 |
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c |
3480 |
+@@ -6926,15 +6926,15 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, |
3481 |
+ cfg->d11inf.io_type = (u8)io_type; |
3482 |
+ brcmu_d11_attach(&cfg->d11inf); |
3483 |
+ |
3484 |
+- err = brcmf_setup_wiphy(wiphy, ifp); |
3485 |
+- if (err < 0) |
3486 |
+- goto priv_out; |
3487 |
+- |
3488 |
+ /* regulatory notifer below needs access to cfg so |
3489 |
+ * assign it now. |
3490 |
+ */ |
3491 |
+ drvr->config = cfg; |
3492 |
+ |
3493 |
++ err = brcmf_setup_wiphy(wiphy, ifp); |
3494 |
++ if (err < 0) |
3495 |
++ goto priv_out; |
3496 |
++ |
3497 |
+ brcmf_dbg(INFO, "Registering custom regulatory\n"); |
3498 |
+ wiphy->reg_notifier = brcmf_cfg80211_reg_notifier; |
3499 |
+ wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG; |
3500 |
+diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c |
3501 |
+index 23e270839e6a..f00df2384985 100644 |
3502 |
+--- a/drivers/pci/controller/pci-mvebu.c |
3503 |
++++ b/drivers/pci/controller/pci-mvebu.c |
3504 |
+@@ -1219,7 +1219,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev) |
3505 |
+ pcie->realio.start = PCIBIOS_MIN_IO; |
3506 |
+ pcie->realio.end = min_t(resource_size_t, |
3507 |
+ IO_SPACE_LIMIT, |
3508 |
+- resource_size(&pcie->io)); |
3509 |
++ resource_size(&pcie->io) - 1); |
3510 |
+ } else |
3511 |
+ pcie->realio = pcie->io; |
3512 |
+ |
3513 |
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c |
3514 |
+index b2857865c0aa..a1a243ee36bb 100644 |
3515 |
+--- a/drivers/pci/probe.c |
3516 |
++++ b/drivers/pci/probe.c |
3517 |
+@@ -1725,7 +1725,7 @@ int pci_setup_device(struct pci_dev *dev) |
3518 |
+ static void pci_configure_mps(struct pci_dev *dev) |
3519 |
+ { |
3520 |
+ struct pci_dev *bridge = pci_upstream_bridge(dev); |
3521 |
+- int mps, p_mps, rc; |
3522 |
++ int mps, mpss, p_mps, rc; |
3523 |
+ |
3524 |
+ if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge)) |
3525 |
+ return; |
3526 |
+@@ -1753,6 +1753,14 @@ static void pci_configure_mps(struct pci_dev *dev) |
3527 |
+ if (pcie_bus_config != PCIE_BUS_DEFAULT) |
3528 |
+ return; |
3529 |
+ |
3530 |
++ mpss = 128 << dev->pcie_mpss; |
3531 |
++ if (mpss < p_mps && pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) { |
3532 |
++ pcie_set_mps(bridge, mpss); |
3533 |
++ pci_info(dev, "Upstream bridge's Max Payload Size set to %d (was %d, max %d)\n", |
3534 |
++ mpss, p_mps, 128 << bridge->pcie_mpss); |
3535 |
++ p_mps = pcie_get_mps(bridge); |
3536 |
++ } |
3537 |
++ |
3538 |
+ rc = pcie_set_mps(dev, p_mps); |
3539 |
+ if (rc) { |
3540 |
+ pci_warn(dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n", |
3541 |
+@@ -1761,7 +1769,7 @@ static void pci_configure_mps(struct pci_dev *dev) |
3542 |
+ } |
3543 |
+ |
3544 |
+ pci_info(dev, "Max Payload Size set to %d (was %d, max %d)\n", |
3545 |
+- p_mps, mps, 128 << dev->pcie_mpss); |
3546 |
++ p_mps, mps, mpss); |
3547 |
+ } |
3548 |
+ |
3549 |
+ static struct hpp_type0 pci_default_type0 = { |
3550 |
+diff --git a/drivers/pinctrl/pinctrl-axp209.c b/drivers/pinctrl/pinctrl-axp209.c |
3551 |
+index a52779f33ad4..afd0b533c40a 100644 |
3552 |
+--- a/drivers/pinctrl/pinctrl-axp209.c |
3553 |
++++ b/drivers/pinctrl/pinctrl-axp209.c |
3554 |
+@@ -316,7 +316,7 @@ static const struct pinctrl_ops axp20x_pctrl_ops = { |
3555 |
+ .get_group_pins = axp20x_group_pins, |
3556 |
+ }; |
3557 |
+ |
3558 |
+-static void axp20x_funcs_groups_from_mask(struct device *dev, unsigned int mask, |
3559 |
++static int axp20x_funcs_groups_from_mask(struct device *dev, unsigned int mask, |
3560 |
+ unsigned int mask_len, |
3561 |
+ struct axp20x_pinctrl_function *func, |
3562 |
+ const struct pinctrl_pin_desc *pins) |
3563 |
+@@ -331,18 +331,22 @@ static void axp20x_funcs_groups_from_mask(struct device *dev, unsigned int mask, |
3564 |
+ func->groups = devm_kcalloc(dev, |
3565 |
+ ngroups, sizeof(const char *), |
3566 |
+ GFP_KERNEL); |
3567 |
++ if (!func->groups) |
3568 |
++ return -ENOMEM; |
3569 |
+ group = func->groups; |
3570 |
+ for_each_set_bit(bit, &mask_cpy, mask_len) { |
3571 |
+ *group = pins[bit].name; |
3572 |
+ group++; |
3573 |
+ } |
3574 |
+ } |
3575 |
++ |
3576 |
++ return 0; |
3577 |
+ } |
3578 |
+ |
3579 |
+-static void axp20x_build_funcs_groups(struct platform_device *pdev) |
3580 |
++static int axp20x_build_funcs_groups(struct platform_device *pdev) |
3581 |
+ { |
3582 |
+ struct axp20x_pctl *pctl = platform_get_drvdata(pdev); |
3583 |
+- int i, pin, npins = pctl->desc->npins; |
3584 |
++ int i, ret, pin, npins = pctl->desc->npins; |
3585 |
+ |
3586 |
+ pctl->funcs[AXP20X_FUNC_GPIO_OUT].name = "gpio_out"; |
3587 |
+ pctl->funcs[AXP20X_FUNC_GPIO_OUT].muxval = AXP20X_MUX_GPIO_OUT; |
3588 |
+@@ -366,13 +370,19 @@ static void axp20x_build_funcs_groups(struct platform_device *pdev) |
3589 |
+ pctl->funcs[i].groups[pin] = pctl->desc->pins[pin].name; |
3590 |
+ } |
3591 |
+ |
3592 |
+- axp20x_funcs_groups_from_mask(&pdev->dev, pctl->desc->ldo_mask, |
3593 |
++ ret = axp20x_funcs_groups_from_mask(&pdev->dev, pctl->desc->ldo_mask, |
3594 |
+ npins, &pctl->funcs[AXP20X_FUNC_LDO], |
3595 |
+ pctl->desc->pins); |
3596 |
++ if (ret) |
3597 |
++ return ret; |
3598 |
+ |
3599 |
+- axp20x_funcs_groups_from_mask(&pdev->dev, pctl->desc->adc_mask, |
3600 |
++ ret = axp20x_funcs_groups_from_mask(&pdev->dev, pctl->desc->adc_mask, |
3601 |
+ npins, &pctl->funcs[AXP20X_FUNC_ADC], |
3602 |
+ pctl->desc->pins); |
3603 |
++ if (ret) |
3604 |
++ return ret; |
3605 |
++ |
3606 |
++ return 0; |
3607 |
+ } |
3608 |
+ |
3609 |
+ static const struct of_device_id axp20x_pctl_match[] = { |
3610 |
+@@ -424,7 +434,11 @@ static int axp20x_pctl_probe(struct platform_device *pdev) |
3611 |
+ |
3612 |
+ platform_set_drvdata(pdev, pctl); |
3613 |
+ |
3614 |
+- axp20x_build_funcs_groups(pdev); |
3615 |
++ ret = axp20x_build_funcs_groups(pdev); |
3616 |
++ if (ret) { |
3617 |
++ dev_err(&pdev->dev, "failed to build groups\n"); |
3618 |
++ return ret; |
3619 |
++ } |
3620 |
+ |
3621 |
+ pctrl_desc = devm_kzalloc(&pdev->dev, sizeof(*pctrl_desc), GFP_KERNEL); |
3622 |
+ if (!pctrl_desc) |
3623 |
+diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c |
3624 |
+index 136ff2b4cce5..db2af09067db 100644 |
3625 |
+--- a/drivers/platform/x86/asus-nb-wmi.c |
3626 |
++++ b/drivers/platform/x86/asus-nb-wmi.c |
3627 |
+@@ -496,6 +496,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = { |
3628 |
+ { KE_KEY, 0xC4, { KEY_KBDILLUMUP } }, |
3629 |
+ { KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } }, |
3630 |
+ { KE_IGNORE, 0xC6, }, /* Ambient Light Sensor notification */ |
3631 |
++ { KE_KEY, 0xFA, { KEY_PROG2 } }, /* Lid flip action */ |
3632 |
+ { KE_END, 0}, |
3633 |
+ }; |
3634 |
+ |
3635 |
+diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c |
3636 |
+index b5b890127479..b7dfe06261f1 100644 |
3637 |
+--- a/drivers/platform/x86/intel_punit_ipc.c |
3638 |
++++ b/drivers/platform/x86/intel_punit_ipc.c |
3639 |
+@@ -17,6 +17,7 @@ |
3640 |
+ #include <linux/bitops.h> |
3641 |
+ #include <linux/device.h> |
3642 |
+ #include <linux/interrupt.h> |
3643 |
++#include <linux/io.h> |
3644 |
+ #include <linux/platform_device.h> |
3645 |
+ #include <asm/intel_punit_ipc.h> |
3646 |
+ |
3647 |
+diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c |
3648 |
+index 822860b4801a..c1ed641b3e26 100644 |
3649 |
+--- a/drivers/pwm/pwm-meson.c |
3650 |
++++ b/drivers/pwm/pwm-meson.c |
3651 |
+@@ -458,7 +458,6 @@ static int meson_pwm_init_channels(struct meson_pwm *meson, |
3652 |
+ struct meson_pwm_channel *channels) |
3653 |
+ { |
3654 |
+ struct device *dev = meson->chip.dev; |
3655 |
+- struct device_node *np = dev->of_node; |
3656 |
+ struct clk_init_data init; |
3657 |
+ unsigned int i; |
3658 |
+ char name[255]; |
3659 |
+@@ -467,7 +466,7 @@ static int meson_pwm_init_channels(struct meson_pwm *meson, |
3660 |
+ for (i = 0; i < meson->chip.npwm; i++) { |
3661 |
+ struct meson_pwm_channel *channel = &channels[i]; |
3662 |
+ |
3663 |
+- snprintf(name, sizeof(name), "%pOF#mux%u", np, i); |
3664 |
++ snprintf(name, sizeof(name), "%s#mux%u", dev_name(dev), i); |
3665 |
+ |
3666 |
+ init.name = name; |
3667 |
+ init.ops = &clk_mux_ops; |
3668 |
+diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c |
3669 |
+index bbf95b78ef5d..43e3398c9268 100644 |
3670 |
+--- a/drivers/s390/block/dasd_eckd.c |
3671 |
++++ b/drivers/s390/block/dasd_eckd.c |
3672 |
+@@ -1780,6 +1780,9 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device) |
3673 |
+ struct dasd_eckd_private *private = device->private; |
3674 |
+ int i; |
3675 |
+ |
3676 |
++ if (!private) |
3677 |
++ return; |
3678 |
++ |
3679 |
+ dasd_alias_disconnect_device_from_lcu(device); |
3680 |
+ private->ned = NULL; |
3681 |
+ private->sneq = NULL; |
3682 |
+@@ -2035,8 +2038,11 @@ static int dasd_eckd_basic_to_ready(struct dasd_device *device) |
3683 |
+ |
3684 |
+ static int dasd_eckd_online_to_ready(struct dasd_device *device) |
3685 |
+ { |
3686 |
+- cancel_work_sync(&device->reload_device); |
3687 |
+- cancel_work_sync(&device->kick_validate); |
3688 |
++ if (cancel_work_sync(&device->reload_device)) |
3689 |
++ dasd_put_device(device); |
3690 |
++ if (cancel_work_sync(&device->kick_validate)) |
3691 |
++ dasd_put_device(device); |
3692 |
++ |
3693 |
+ return 0; |
3694 |
+ }; |
3695 |
+ |
3696 |
+diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c |
3697 |
+index 80e5b283fd81..1391e5f35918 100644 |
3698 |
+--- a/drivers/scsi/aic94xx/aic94xx_init.c |
3699 |
++++ b/drivers/scsi/aic94xx/aic94xx_init.c |
3700 |
+@@ -1030,8 +1030,10 @@ static int __init aic94xx_init(void) |
3701 |
+ |
3702 |
+ aic94xx_transport_template = |
3703 |
+ sas_domain_attach_transport(&aic94xx_transport_functions); |
3704 |
+- if (!aic94xx_transport_template) |
3705 |
++ if (!aic94xx_transport_template) { |
3706 |
++ err = -ENOMEM; |
3707 |
+ goto out_destroy_caches; |
3708 |
++ } |
3709 |
+ |
3710 |
+ err = pci_register_driver(&aic94xx_pci_driver); |
3711 |
+ if (err) |
3712 |
+diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c |
3713 |
+index e40a2c0a9543..d3da39a9f567 100644 |
3714 |
+--- a/drivers/staging/comedi/drivers/ni_mio_common.c |
3715 |
++++ b/drivers/staging/comedi/drivers/ni_mio_common.c |
3716 |
+@@ -5446,11 +5446,11 @@ static int ni_E_init(struct comedi_device *dev, |
3717 |
+ /* Digital I/O (PFI) subdevice */ |
3718 |
+ s = &dev->subdevices[NI_PFI_DIO_SUBDEV]; |
3719 |
+ s->type = COMEDI_SUBD_DIO; |
3720 |
+- s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL; |
3721 |
+ s->maxdata = 1; |
3722 |
+ if (devpriv->is_m_series) { |
3723 |
+ s->n_chan = 16; |
3724 |
+ s->insn_bits = ni_pfi_insn_bits; |
3725 |
++ s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL; |
3726 |
+ |
3727 |
+ ni_writew(dev, s->state, NI_M_PFI_DO_REG); |
3728 |
+ for (i = 0; i < NUM_PFI_OUTPUT_SELECT_REGS; ++i) { |
3729 |
+@@ -5459,6 +5459,7 @@ static int ni_E_init(struct comedi_device *dev, |
3730 |
+ } |
3731 |
+ } else { |
3732 |
+ s->n_chan = 10; |
3733 |
++ s->subdev_flags = SDF_INTERNAL; |
3734 |
+ } |
3735 |
+ s->insn_config = ni_pfi_insn_config; |
3736 |
+ |
3737 |
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c |
3738 |
+index ed3114556fda..560ed8711706 100644 |
3739 |
+--- a/drivers/vhost/vhost.c |
3740 |
++++ b/drivers/vhost/vhost.c |
3741 |
+@@ -951,7 +951,7 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d, |
3742 |
+ list_for_each_entry_safe(node, n, &d->pending_list, node) { |
3743 |
+ struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb; |
3744 |
+ if (msg->iova <= vq_msg->iova && |
3745 |
+- msg->iova + msg->size - 1 > vq_msg->iova && |
3746 |
++ msg->iova + msg->size - 1 >= vq_msg->iova && |
3747 |
+ vq_msg->type == VHOST_IOTLB_MISS) { |
3748 |
+ vhost_poll_queue(&node->vq->poll); |
3749 |
+ list_del(&node->node); |
3750 |
+diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c |
3751 |
+index 2780886e8ba3..de062fb201bc 100644 |
3752 |
+--- a/drivers/virtio/virtio_pci_legacy.c |
3753 |
++++ b/drivers/virtio/virtio_pci_legacy.c |
3754 |
+@@ -122,6 +122,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, |
3755 |
+ struct virtqueue *vq; |
3756 |
+ u16 num; |
3757 |
+ int err; |
3758 |
++ u64 q_pfn; |
3759 |
+ |
3760 |
+ /* Select the queue we're interested in */ |
3761 |
+ iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); |
3762 |
+@@ -141,9 +142,17 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, |
3763 |
+ if (!vq) |
3764 |
+ return ERR_PTR(-ENOMEM); |
3765 |
+ |
3766 |
++ q_pfn = virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT; |
3767 |
++ if (q_pfn >> 32) { |
3768 |
++ dev_err(&vp_dev->pci_dev->dev, |
3769 |
++ "platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n", |
3770 |
++ 0x1ULL << (32 + PAGE_SHIFT - 30)); |
3771 |
++ err = -E2BIG; |
3772 |
++ goto out_del_vq; |
3773 |
++ } |
3774 |
++ |
3775 |
+ /* activate the queue */ |
3776 |
+- iowrite32(virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT, |
3777 |
+- vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); |
3778 |
++ iowrite32(q_pfn, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); |
3779 |
+ |
3780 |
+ vq->priv = (void __force *)vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY; |
3781 |
+ |
3782 |
+@@ -160,6 +169,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, |
3783 |
+ |
3784 |
+ out_deactivate: |
3785 |
+ iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); |
3786 |
++out_del_vq: |
3787 |
+ vring_del_virtqueue(vq); |
3788 |
+ return ERR_PTR(err); |
3789 |
+ } |
3790 |
+diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c |
3791 |
+index b437fccd4e62..294f35ce9e46 100644 |
3792 |
+--- a/drivers/xen/xen-balloon.c |
3793 |
++++ b/drivers/xen/xen-balloon.c |
3794 |
+@@ -81,7 +81,7 @@ static void watch_target(struct xenbus_watch *watch, |
3795 |
+ static_max = new_target; |
3796 |
+ else |
3797 |
+ static_max >>= PAGE_SHIFT - 10; |
3798 |
+- target_diff = xen_pv_domain() ? 0 |
3799 |
++ target_diff = (xen_pv_domain() || xen_initial_domain()) ? 0 |
3800 |
+ : static_max - balloon_stats.target_pages; |
3801 |
+ } |
3802 |
+ |
3803 |
+diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c |
3804 |
+index a3fdb4fe967d..daf45472bef9 100644 |
3805 |
+--- a/fs/btrfs/check-integrity.c |
3806 |
++++ b/fs/btrfs/check-integrity.c |
3807 |
+@@ -1539,7 +1539,12 @@ static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len, |
3808 |
+ } |
3809 |
+ |
3810 |
+ device = multi->stripes[0].dev; |
3811 |
+- block_ctx_out->dev = btrfsic_dev_state_lookup(device->bdev->bd_dev); |
3812 |
++ if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state) || |
3813 |
++ !device->bdev || !device->name) |
3814 |
++ block_ctx_out->dev = NULL; |
3815 |
++ else |
3816 |
++ block_ctx_out->dev = btrfsic_dev_state_lookup( |
3817 |
++ device->bdev->bd_dev); |
3818 |
+ block_ctx_out->dev_bytenr = multi->stripes[0].physical; |
3819 |
+ block_ctx_out->start = bytenr; |
3820 |
+ block_ctx_out->len = len; |
3821 |
+diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c |
3822 |
+index e2ba0419297a..d20b244623f2 100644 |
3823 |
+--- a/fs/btrfs/dev-replace.c |
3824 |
++++ b/fs/btrfs/dev-replace.c |
3825 |
+@@ -676,6 +676,12 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, |
3826 |
+ |
3827 |
+ btrfs_rm_dev_replace_unblocked(fs_info); |
3828 |
+ |
3829 |
++ /* |
3830 |
++ * Increment dev_stats_ccnt so that btrfs_run_dev_stats() will |
3831 |
++ * update on-disk dev stats value during commit transaction |
3832 |
++ */ |
3833 |
++ atomic_inc(&tgt_device->dev_stats_ccnt); |
3834 |
++ |
3835 |
+ /* |
3836 |
+ * this is again a consistent state where no dev_replace procedure |
3837 |
+ * is running, the target device is part of the filesystem, the |
3838 |
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c |
3839 |
+index 8aab7a6c1e58..53cac20650d8 100644 |
3840 |
+--- a/fs/btrfs/extent-tree.c |
3841 |
++++ b/fs/btrfs/extent-tree.c |
3842 |
+@@ -10687,7 +10687,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) |
3843 |
+ /* Don't want to race with allocators so take the groups_sem */ |
3844 |
+ down_write(&space_info->groups_sem); |
3845 |
+ spin_lock(&block_group->lock); |
3846 |
+- if (block_group->reserved || |
3847 |
++ if (block_group->reserved || block_group->pinned || |
3848 |
+ btrfs_block_group_used(&block_group->item) || |
3849 |
+ block_group->ro || |
3850 |
+ list_is_singular(&block_group->list)) { |
3851 |
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c |
3852 |
+index 879b76fa881a..be94c65bb4d2 100644 |
3853 |
+--- a/fs/btrfs/relocation.c |
3854 |
++++ b/fs/btrfs/relocation.c |
3855 |
+@@ -1321,18 +1321,19 @@ static void __del_reloc_root(struct btrfs_root *root) |
3856 |
+ struct mapping_node *node = NULL; |
3857 |
+ struct reloc_control *rc = fs_info->reloc_ctl; |
3858 |
+ |
3859 |
+- spin_lock(&rc->reloc_root_tree.lock); |
3860 |
+- rb_node = tree_search(&rc->reloc_root_tree.rb_root, |
3861 |
+- root->node->start); |
3862 |
+- if (rb_node) { |
3863 |
+- node = rb_entry(rb_node, struct mapping_node, rb_node); |
3864 |
+- rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); |
3865 |
++ if (rc) { |
3866 |
++ spin_lock(&rc->reloc_root_tree.lock); |
3867 |
++ rb_node = tree_search(&rc->reloc_root_tree.rb_root, |
3868 |
++ root->node->start); |
3869 |
++ if (rb_node) { |
3870 |
++ node = rb_entry(rb_node, struct mapping_node, rb_node); |
3871 |
++ rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); |
3872 |
++ } |
3873 |
++ spin_unlock(&rc->reloc_root_tree.lock); |
3874 |
++ if (!node) |
3875 |
++ return; |
3876 |
++ BUG_ON((struct btrfs_root *)node->data != root); |
3877 |
+ } |
3878 |
+- spin_unlock(&rc->reloc_root_tree.lock); |
3879 |
+- |
3880 |
+- if (!node) |
3881 |
+- return; |
3882 |
+- BUG_ON((struct btrfs_root *)node->data != root); |
3883 |
+ |
3884 |
+ spin_lock(&fs_info->trans_lock); |
3885 |
+ list_del_init(&root->root_list); |
3886 |
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c |
3887 |
+index bddfc28b27c0..9b25f29d0e73 100644 |
3888 |
+--- a/fs/btrfs/super.c |
3889 |
++++ b/fs/btrfs/super.c |
3890 |
+@@ -892,6 +892,8 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, |
3891 |
+ char *device_name, *opts, *orig, *p; |
3892 |
+ int error = 0; |
3893 |
+ |
3894 |
++ lockdep_assert_held(&uuid_mutex); |
3895 |
++ |
3896 |
+ if (!options) |
3897 |
+ return 0; |
3898 |
+ |
3899 |
+@@ -1526,12 +1528,6 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type, |
3900 |
+ if (!(flags & SB_RDONLY)) |
3901 |
+ mode |= FMODE_WRITE; |
3902 |
+ |
3903 |
+- error = btrfs_parse_early_options(data, mode, fs_type, |
3904 |
+- &fs_devices); |
3905 |
+- if (error) { |
3906 |
+- return ERR_PTR(error); |
3907 |
+- } |
3908 |
+- |
3909 |
+ security_init_mnt_opts(&new_sec_opts); |
3910 |
+ if (data) { |
3911 |
+ error = parse_security_options(data, &new_sec_opts); |
3912 |
+@@ -1539,10 +1535,6 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type, |
3913 |
+ return ERR_PTR(error); |
3914 |
+ } |
3915 |
+ |
3916 |
+- error = btrfs_scan_one_device(device_name, mode, fs_type, &fs_devices); |
3917 |
+- if (error) |
3918 |
+- goto error_sec_opts; |
3919 |
+- |
3920 |
+ /* |
3921 |
+ * Setup a dummy root and fs_info for test/set super. This is because |
3922 |
+ * we don't actually fill this stuff out until open_ctree, but we need |
3923 |
+@@ -1555,8 +1547,6 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type, |
3924 |
+ goto error_sec_opts; |
3925 |
+ } |
3926 |
+ |
3927 |
+- fs_info->fs_devices = fs_devices; |
3928 |
+- |
3929 |
+ fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL); |
3930 |
+ fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL); |
3931 |
+ security_init_mnt_opts(&fs_info->security_opts); |
3932 |
+@@ -1565,7 +1555,23 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type, |
3933 |
+ goto error_fs_info; |
3934 |
+ } |
3935 |
+ |
3936 |
++ mutex_lock(&uuid_mutex); |
3937 |
++ error = btrfs_parse_early_options(data, mode, fs_type, &fs_devices); |
3938 |
++ if (error) { |
3939 |
++ mutex_unlock(&uuid_mutex); |
3940 |
++ goto error_fs_info; |
3941 |
++ } |
3942 |
++ |
3943 |
++ error = btrfs_scan_one_device(device_name, mode, fs_type, &fs_devices); |
3944 |
++ if (error) { |
3945 |
++ mutex_unlock(&uuid_mutex); |
3946 |
++ goto error_fs_info; |
3947 |
++ } |
3948 |
++ |
3949 |
++ fs_info->fs_devices = fs_devices; |
3950 |
++ |
3951 |
+ error = btrfs_open_devices(fs_devices, mode, fs_type); |
3952 |
++ mutex_unlock(&uuid_mutex); |
3953 |
+ if (error) |
3954 |
+ goto error_fs_info; |
3955 |
+ |
3956 |
+@@ -2234,15 +2240,21 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd, |
3957 |
+ |
3958 |
+ switch (cmd) { |
3959 |
+ case BTRFS_IOC_SCAN_DEV: |
3960 |
++ mutex_lock(&uuid_mutex); |
3961 |
+ ret = btrfs_scan_one_device(vol->name, FMODE_READ, |
3962 |
+ &btrfs_root_fs_type, &fs_devices); |
3963 |
++ mutex_unlock(&uuid_mutex); |
3964 |
+ break; |
3965 |
+ case BTRFS_IOC_DEVICES_READY: |
3966 |
++ mutex_lock(&uuid_mutex); |
3967 |
+ ret = btrfs_scan_one_device(vol->name, FMODE_READ, |
3968 |
+ &btrfs_root_fs_type, &fs_devices); |
3969 |
+- if (ret) |
3970 |
++ if (ret) { |
3971 |
++ mutex_unlock(&uuid_mutex); |
3972 |
+ break; |
3973 |
++ } |
3974 |
+ ret = !(fs_devices->num_devices == fs_devices->total_devices); |
3975 |
++ mutex_unlock(&uuid_mutex); |
3976 |
+ break; |
3977 |
+ case BTRFS_IOC_GET_SUPPORTED_FEATURES: |
3978 |
+ ret = btrfs_ioctl_get_supported_features((void __user*)arg); |
3979 |
+@@ -2368,7 +2380,7 @@ static __cold void btrfs_interface_exit(void) |
3980 |
+ |
3981 |
+ static void __init btrfs_print_mod_info(void) |
3982 |
+ { |
3983 |
+- pr_info("Btrfs loaded, crc32c=%s" |
3984 |
++ static const char options[] = "" |
3985 |
+ #ifdef CONFIG_BTRFS_DEBUG |
3986 |
+ ", debug=on" |
3987 |
+ #endif |
3988 |
+@@ -2381,8 +2393,8 @@ static void __init btrfs_print_mod_info(void) |
3989 |
+ #ifdef CONFIG_BTRFS_FS_REF_VERIFY |
3990 |
+ ", ref-verify=on" |
3991 |
+ #endif |
3992 |
+- "\n", |
3993 |
+- crc32c_impl()); |
3994 |
++ ; |
3995 |
++ pr_info("Btrfs loaded, crc32c=%s%s\n", crc32c_impl(), options); |
3996 |
+ } |
3997 |
+ |
3998 |
+ static int __init init_btrfs_fs(void) |
3999 |
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c |
4000 |
+index 8d40e7dd8c30..d014af352ce0 100644 |
4001 |
+--- a/fs/btrfs/tree-checker.c |
4002 |
++++ b/fs/btrfs/tree-checker.c |
4003 |
+@@ -396,9 +396,22 @@ static int check_leaf(struct btrfs_fs_info *fs_info, struct extent_buffer *leaf, |
4004 |
+ * skip this check for relocation trees. |
4005 |
+ */ |
4006 |
+ if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) { |
4007 |
++ u64 owner = btrfs_header_owner(leaf); |
4008 |
+ struct btrfs_root *check_root; |
4009 |
+ |
4010 |
+- key.objectid = btrfs_header_owner(leaf); |
4011 |
++ /* These trees must never be empty */ |
4012 |
++ if (owner == BTRFS_ROOT_TREE_OBJECTID || |
4013 |
++ owner == BTRFS_CHUNK_TREE_OBJECTID || |
4014 |
++ owner == BTRFS_EXTENT_TREE_OBJECTID || |
4015 |
++ owner == BTRFS_DEV_TREE_OBJECTID || |
4016 |
++ owner == BTRFS_FS_TREE_OBJECTID || |
4017 |
++ owner == BTRFS_DATA_RELOC_TREE_OBJECTID) { |
4018 |
++ generic_err(fs_info, leaf, 0, |
4019 |
++ "invalid root, root %llu must never be empty", |
4020 |
++ owner); |
4021 |
++ return -EUCLEAN; |
4022 |
++ } |
4023 |
++ key.objectid = owner; |
4024 |
+ key.type = BTRFS_ROOT_ITEM_KEY; |
4025 |
+ key.offset = (u64)-1; |
4026 |
+ |
4027 |
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c |
4028 |
+index 1da162928d1a..5304b8d6ceb8 100644 |
4029 |
+--- a/fs/btrfs/volumes.c |
4030 |
++++ b/fs/btrfs/volumes.c |
4031 |
+@@ -634,44 +634,48 @@ static void pending_bios_fn(struct btrfs_work *work) |
4032 |
+ * devices. |
4033 |
+ */ |
4034 |
+ static void btrfs_free_stale_devices(const char *path, |
4035 |
+- struct btrfs_device *skip_dev) |
4036 |
++ struct btrfs_device *skip_device) |
4037 |
+ { |
4038 |
+- struct btrfs_fs_devices *fs_devs, *tmp_fs_devs; |
4039 |
+- struct btrfs_device *dev, *tmp_dev; |
4040 |
++ struct btrfs_fs_devices *fs_devices, *tmp_fs_devices; |
4041 |
++ struct btrfs_device *device, *tmp_device; |
4042 |
+ |
4043 |
+- list_for_each_entry_safe(fs_devs, tmp_fs_devs, &fs_uuids, fs_list) { |
4044 |
+- |
4045 |
+- if (fs_devs->opened) |
4046 |
++ list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) { |
4047 |
++ mutex_lock(&fs_devices->device_list_mutex); |
4048 |
++ if (fs_devices->opened) { |
4049 |
++ mutex_unlock(&fs_devices->device_list_mutex); |
4050 |
+ continue; |
4051 |
++ } |
4052 |
+ |
4053 |
+- list_for_each_entry_safe(dev, tmp_dev, |
4054 |
+- &fs_devs->devices, dev_list) { |
4055 |
++ list_for_each_entry_safe(device, tmp_device, |
4056 |
++ &fs_devices->devices, dev_list) { |
4057 |
+ int not_found = 0; |
4058 |
+ |
4059 |
+- if (skip_dev && skip_dev == dev) |
4060 |
++ if (skip_device && skip_device == device) |
4061 |
+ continue; |
4062 |
+- if (path && !dev->name) |
4063 |
++ if (path && !device->name) |
4064 |
+ continue; |
4065 |
+ |
4066 |
+ rcu_read_lock(); |
4067 |
+ if (path) |
4068 |
+- not_found = strcmp(rcu_str_deref(dev->name), |
4069 |
++ not_found = strcmp(rcu_str_deref(device->name), |
4070 |
+ path); |
4071 |
+ rcu_read_unlock(); |
4072 |
+ if (not_found) |
4073 |
+ continue; |
4074 |
+ |
4075 |
+ /* delete the stale device */ |
4076 |
+- if (fs_devs->num_devices == 1) { |
4077 |
+- btrfs_sysfs_remove_fsid(fs_devs); |
4078 |
+- list_del(&fs_devs->fs_list); |
4079 |
+- free_fs_devices(fs_devs); |
4080 |
++ fs_devices->num_devices--; |
4081 |
++ list_del(&device->dev_list); |
4082 |
++ btrfs_free_device(device); |
4083 |
++ |
4084 |
++ if (fs_devices->num_devices == 0) |
4085 |
+ break; |
4086 |
+- } else { |
4087 |
+- fs_devs->num_devices--; |
4088 |
+- list_del(&dev->dev_list); |
4089 |
+- btrfs_free_device(dev); |
4090 |
+- } |
4091 |
++ } |
4092 |
++ mutex_unlock(&fs_devices->device_list_mutex); |
4093 |
++ if (fs_devices->num_devices == 0) { |
4094 |
++ btrfs_sysfs_remove_fsid(fs_devices); |
4095 |
++ list_del(&fs_devices->fs_list); |
4096 |
++ free_fs_devices(fs_devices); |
4097 |
+ } |
4098 |
+ } |
4099 |
+ } |
4100 |
+@@ -750,7 +754,8 @@ error_brelse: |
4101 |
+ * error pointer when failed |
4102 |
+ */ |
4103 |
+ static noinline struct btrfs_device *device_list_add(const char *path, |
4104 |
+- struct btrfs_super_block *disk_super) |
4105 |
++ struct btrfs_super_block *disk_super, |
4106 |
++ bool *new_device_added) |
4107 |
+ { |
4108 |
+ struct btrfs_device *device; |
4109 |
+ struct btrfs_fs_devices *fs_devices; |
4110 |
+@@ -764,21 +769,26 @@ static noinline struct btrfs_device *device_list_add(const char *path, |
4111 |
+ if (IS_ERR(fs_devices)) |
4112 |
+ return ERR_CAST(fs_devices); |
4113 |
+ |
4114 |
++ mutex_lock(&fs_devices->device_list_mutex); |
4115 |
+ list_add(&fs_devices->fs_list, &fs_uuids); |
4116 |
+ |
4117 |
+ device = NULL; |
4118 |
+ } else { |
4119 |
++ mutex_lock(&fs_devices->device_list_mutex); |
4120 |
+ device = find_device(fs_devices, devid, |
4121 |
+ disk_super->dev_item.uuid); |
4122 |
+ } |
4123 |
+ |
4124 |
+ if (!device) { |
4125 |
+- if (fs_devices->opened) |
4126 |
++ if (fs_devices->opened) { |
4127 |
++ mutex_unlock(&fs_devices->device_list_mutex); |
4128 |
+ return ERR_PTR(-EBUSY); |
4129 |
++ } |
4130 |
+ |
4131 |
+ device = btrfs_alloc_device(NULL, &devid, |
4132 |
+ disk_super->dev_item.uuid); |
4133 |
+ if (IS_ERR(device)) { |
4134 |
++ mutex_unlock(&fs_devices->device_list_mutex); |
4135 |
+ /* we can safely leave the fs_devices entry around */ |
4136 |
+ return device; |
4137 |
+ } |
4138 |
+@@ -786,17 +796,16 @@ static noinline struct btrfs_device *device_list_add(const char *path, |
4139 |
+ name = rcu_string_strdup(path, GFP_NOFS); |
4140 |
+ if (!name) { |
4141 |
+ btrfs_free_device(device); |
4142 |
++ mutex_unlock(&fs_devices->device_list_mutex); |
4143 |
+ return ERR_PTR(-ENOMEM); |
4144 |
+ } |
4145 |
+ rcu_assign_pointer(device->name, name); |
4146 |
+ |
4147 |
+- mutex_lock(&fs_devices->device_list_mutex); |
4148 |
+ list_add_rcu(&device->dev_list, &fs_devices->devices); |
4149 |
+ fs_devices->num_devices++; |
4150 |
+- mutex_unlock(&fs_devices->device_list_mutex); |
4151 |
+ |
4152 |
+ device->fs_devices = fs_devices; |
4153 |
+- btrfs_free_stale_devices(path, device); |
4154 |
++ *new_device_added = true; |
4155 |
+ |
4156 |
+ if (disk_super->label[0]) |
4157 |
+ pr_info("BTRFS: device label %s devid %llu transid %llu %s\n", |
4158 |
+@@ -840,12 +849,15 @@ static noinline struct btrfs_device *device_list_add(const char *path, |
4159 |
+ * with larger generation number or the last-in if |
4160 |
+ * generation are equal. |
4161 |
+ */ |
4162 |
++ mutex_unlock(&fs_devices->device_list_mutex); |
4163 |
+ return ERR_PTR(-EEXIST); |
4164 |
+ } |
4165 |
+ |
4166 |
+ name = rcu_string_strdup(path, GFP_NOFS); |
4167 |
+- if (!name) |
4168 |
++ if (!name) { |
4169 |
++ mutex_unlock(&fs_devices->device_list_mutex); |
4170 |
+ return ERR_PTR(-ENOMEM); |
4171 |
++ } |
4172 |
+ rcu_string_free(device->name); |
4173 |
+ rcu_assign_pointer(device->name, name); |
4174 |
+ if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { |
4175 |
+@@ -865,6 +877,7 @@ static noinline struct btrfs_device *device_list_add(const char *path, |
4176 |
+ |
4177 |
+ fs_devices->total_devices = btrfs_super_num_devices(disk_super); |
4178 |
+ |
4179 |
++ mutex_unlock(&fs_devices->device_list_mutex); |
4180 |
+ return device; |
4181 |
+ } |
4182 |
+ |
4183 |
+@@ -1146,7 +1159,8 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, |
4184 |
+ { |
4185 |
+ int ret; |
4186 |
+ |
4187 |
+- mutex_lock(&uuid_mutex); |
4188 |
++ lockdep_assert_held(&uuid_mutex); |
4189 |
++ |
4190 |
+ mutex_lock(&fs_devices->device_list_mutex); |
4191 |
+ if (fs_devices->opened) { |
4192 |
+ fs_devices->opened++; |
4193 |
+@@ -1156,7 +1170,6 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, |
4194 |
+ ret = open_fs_devices(fs_devices, flags, holder); |
4195 |
+ } |
4196 |
+ mutex_unlock(&fs_devices->device_list_mutex); |
4197 |
+- mutex_unlock(&uuid_mutex); |
4198 |
+ |
4199 |
+ return ret; |
4200 |
+ } |
4201 |
+@@ -1221,12 +1234,15 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, |
4202 |
+ struct btrfs_fs_devices **fs_devices_ret) |
4203 |
+ { |
4204 |
+ struct btrfs_super_block *disk_super; |
4205 |
++ bool new_device_added = false; |
4206 |
+ struct btrfs_device *device; |
4207 |
+ struct block_device *bdev; |
4208 |
+ struct page *page; |
4209 |
+ int ret = 0; |
4210 |
+ u64 bytenr; |
4211 |
+ |
4212 |
++ lockdep_assert_held(&uuid_mutex); |
4213 |
++ |
4214 |
+ /* |
4215 |
+ * we would like to check all the supers, but that would make |
4216 |
+ * a btrfs mount succeed after a mkfs from a different FS. |
4217 |
+@@ -1245,13 +1261,14 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, |
4218 |
+ goto error_bdev_put; |
4219 |
+ } |
4220 |
+ |
4221 |
+- mutex_lock(&uuid_mutex); |
4222 |
+- device = device_list_add(path, disk_super); |
4223 |
+- if (IS_ERR(device)) |
4224 |
++ device = device_list_add(path, disk_super, &new_device_added); |
4225 |
++ if (IS_ERR(device)) { |
4226 |
+ ret = PTR_ERR(device); |
4227 |
+- else |
4228 |
++ } else { |
4229 |
+ *fs_devices_ret = device->fs_devices; |
4230 |
+- mutex_unlock(&uuid_mutex); |
4231 |
++ if (new_device_added) |
4232 |
++ btrfs_free_stale_devices(path, device); |
4233 |
++ } |
4234 |
+ |
4235 |
+ btrfs_release_disk_super(page); |
4236 |
+ |
4237 |
+@@ -2029,6 +2046,9 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path, |
4238 |
+ |
4239 |
+ cur_devices->num_devices--; |
4240 |
+ cur_devices->total_devices--; |
4241 |
++ /* Update total_devices of the parent fs_devices if it's seed */ |
4242 |
++ if (cur_devices != fs_devices) |
4243 |
++ fs_devices->total_devices--; |
4244 |
+ |
4245 |
+ if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) |
4246 |
+ cur_devices->missing_devices--; |
4247 |
+@@ -6563,10 +6583,14 @@ static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key, |
4248 |
+ write_lock(&map_tree->map_tree.lock); |
4249 |
+ ret = add_extent_mapping(&map_tree->map_tree, em, 0); |
4250 |
+ write_unlock(&map_tree->map_tree.lock); |
4251 |
+- BUG_ON(ret); /* Tree corruption */ |
4252 |
++ if (ret < 0) { |
4253 |
++ btrfs_err(fs_info, |
4254 |
++ "failed to add chunk map, start=%llu len=%llu: %d", |
4255 |
++ em->start, em->len, ret); |
4256 |
++ } |
4257 |
+ free_extent_map(em); |
4258 |
+ |
4259 |
+- return 0; |
4260 |
++ return ret; |
4261 |
+ } |
4262 |
+ |
4263 |
+ static void fill_device_from_item(struct extent_buffer *leaf, |
4264 |
+diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c |
4265 |
+index 991bfb271908..b20297988fe0 100644 |
4266 |
+--- a/fs/cifs/cifs_debug.c |
4267 |
++++ b/fs/cifs/cifs_debug.c |
4268 |
+@@ -383,6 +383,10 @@ static ssize_t cifs_stats_proc_write(struct file *file, |
4269 |
+ atomic_set(&totBufAllocCount, 0); |
4270 |
+ atomic_set(&totSmBufAllocCount, 0); |
4271 |
+ #endif /* CONFIG_CIFS_STATS2 */ |
4272 |
++ spin_lock(&GlobalMid_Lock); |
4273 |
++ GlobalMaxActiveXid = 0; |
4274 |
++ GlobalCurrentXid = 0; |
4275 |
++ spin_unlock(&GlobalMid_Lock); |
4276 |
+ spin_lock(&cifs_tcp_ses_lock); |
4277 |
+ list_for_each(tmp1, &cifs_tcp_ses_list) { |
4278 |
+ server = list_entry(tmp1, struct TCP_Server_Info, |
4279 |
+@@ -395,6 +399,10 @@ static ssize_t cifs_stats_proc_write(struct file *file, |
4280 |
+ struct cifs_tcon, |
4281 |
+ tcon_list); |
4282 |
+ atomic_set(&tcon->num_smbs_sent, 0); |
4283 |
++ spin_lock(&tcon->stat_lock); |
4284 |
++ tcon->bytes_read = 0; |
4285 |
++ tcon->bytes_written = 0; |
4286 |
++ spin_unlock(&tcon->stat_lock); |
4287 |
+ if (server->ops->clear_stats) |
4288 |
+ server->ops->clear_stats(tcon); |
4289 |
+ } |
4290 |
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c |
4291 |
+index 5df2c0698cda..9d02563b2147 100644 |
4292 |
+--- a/fs/cifs/connect.c |
4293 |
++++ b/fs/cifs/connect.c |
4294 |
+@@ -3031,11 +3031,15 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info) |
4295 |
+ } |
4296 |
+ |
4297 |
+ #ifdef CONFIG_CIFS_SMB311 |
4298 |
+- if ((volume_info->linux_ext) && (ses->server->posix_ext_supported)) { |
4299 |
+- if (ses->server->vals->protocol_id == SMB311_PROT_ID) { |
4300 |
++ if (volume_info->linux_ext) { |
4301 |
++ if (ses->server->posix_ext_supported) { |
4302 |
+ tcon->posix_extensions = true; |
4303 |
+ printk_once(KERN_WARNING |
4304 |
+ "SMB3.11 POSIX Extensions are experimental\n"); |
4305 |
++ } else { |
4306 |
++ cifs_dbg(VFS, "Server does not support mounting with posix SMB3.11 extensions.\n"); |
4307 |
++ rc = -EOPNOTSUPP; |
4308 |
++ goto out_fail; |
4309 |
+ } |
4310 |
+ } |
4311 |
+ #endif /* 311 */ |
4312 |
+diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c |
4313 |
+index 3ff7cec2da81..239215dcc00b 100644 |
4314 |
+--- a/fs/cifs/smb2misc.c |
4315 |
++++ b/fs/cifs/smb2misc.c |
4316 |
+@@ -240,6 +240,13 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr) |
4317 |
+ if (clc_len == len + 1) |
4318 |
+ return 0; |
4319 |
+ |
4320 |
++ /* |
4321 |
++ * Some windows servers (win2016) will pad also the final |
4322 |
++ * PDU in a compound to 8 bytes. |
4323 |
++ */ |
4324 |
++ if (((clc_len + 7) & ~7) == len) |
4325 |
++ return 0; |
4326 |
++ |
4327 |
+ /* |
4328 |
+ * MacOS server pads after SMB2.1 write response with 3 bytes |
4329 |
+ * of junk. Other servers match RFC1001 len to actual |
4330 |
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c |
4331 |
+index ffce77e00a58..44e511a35559 100644 |
4332 |
+--- a/fs/cifs/smb2pdu.c |
4333 |
++++ b/fs/cifs/smb2pdu.c |
4334 |
+@@ -360,7 +360,7 @@ smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, |
4335 |
+ total_len); |
4336 |
+ |
4337 |
+ if (tcon != NULL) { |
4338 |
+-#ifdef CONFIG_CIFS_STATS2 |
4339 |
++#ifdef CONFIG_CIFS_STATS |
4340 |
+ uint16_t com_code = le16_to_cpu(smb2_command); |
4341 |
+ cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]); |
4342 |
+ #endif |
4343 |
+@@ -1928,7 +1928,7 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode, |
4344 |
+ { |
4345 |
+ struct smb_rqst rqst; |
4346 |
+ struct smb2_create_req *req; |
4347 |
+- struct smb2_create_rsp *rsp; |
4348 |
++ struct smb2_create_rsp *rsp = NULL; |
4349 |
+ struct TCP_Server_Info *server; |
4350 |
+ struct cifs_ses *ses = tcon->ses; |
4351 |
+ struct kvec iov[3]; /* make sure at least one for each open context */ |
4352 |
+@@ -1943,27 +1943,31 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode, |
4353 |
+ char *pc_buf = NULL; |
4354 |
+ int flags = 0; |
4355 |
+ unsigned int total_len; |
4356 |
+- __le16 *path = cifs_convert_path_to_utf16(full_path, cifs_sb); |
4357 |
+- |
4358 |
+- if (!path) |
4359 |
+- return -ENOMEM; |
4360 |
++ __le16 *utf16_path = NULL; |
4361 |
+ |
4362 |
+ cifs_dbg(FYI, "mkdir\n"); |
4363 |
+ |
4364 |
++ /* resource #1: path allocation */ |
4365 |
++ utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb); |
4366 |
++ if (!utf16_path) |
4367 |
++ return -ENOMEM; |
4368 |
++ |
4369 |
+ if (ses && (ses->server)) |
4370 |
+ server = ses->server; |
4371 |
+- else |
4372 |
+- return -EIO; |
4373 |
++ else { |
4374 |
++ rc = -EIO; |
4375 |
++ goto err_free_path; |
4376 |
++ } |
4377 |
+ |
4378 |
++ /* resource #2: request */ |
4379 |
+ rc = smb2_plain_req_init(SMB2_CREATE, tcon, (void **) &req, &total_len); |
4380 |
+- |
4381 |
+ if (rc) |
4382 |
+- return rc; |
4383 |
++ goto err_free_path; |
4384 |
++ |
4385 |
+ |
4386 |
+ if (smb3_encryption_required(tcon)) |
4387 |
+ flags |= CIFS_TRANSFORM_REQ; |
4388 |
+ |
4389 |
+- |
4390 |
+ req->ImpersonationLevel = IL_IMPERSONATION; |
4391 |
+ req->DesiredAccess = cpu_to_le32(FILE_WRITE_ATTRIBUTES); |
4392 |
+ /* File attributes ignored on open (used in create though) */ |
4393 |
+@@ -1992,50 +1996,44 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode, |
4394 |
+ req->sync_hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS; |
4395 |
+ rc = alloc_path_with_tree_prefix(©_path, ©_size, |
4396 |
+ &name_len, |
4397 |
+- tcon->treeName, path); |
4398 |
+- if (rc) { |
4399 |
+- cifs_small_buf_release(req); |
4400 |
+- return rc; |
4401 |
+- } |
4402 |
++ tcon->treeName, utf16_path); |
4403 |
++ if (rc) |
4404 |
++ goto err_free_req; |
4405 |
++ |
4406 |
+ req->NameLength = cpu_to_le16(name_len * 2); |
4407 |
+ uni_path_len = copy_size; |
4408 |
+- path = copy_path; |
4409 |
++ /* free before overwriting resource */ |
4410 |
++ kfree(utf16_path); |
4411 |
++ utf16_path = copy_path; |
4412 |
+ } else { |
4413 |
+- uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2; |
4414 |
++ uni_path_len = (2 * UniStrnlen((wchar_t *)utf16_path, PATH_MAX)) + 2; |
4415 |
+ /* MUST set path len (NameLength) to 0 opening root of share */ |
4416 |
+ req->NameLength = cpu_to_le16(uni_path_len - 2); |
4417 |
+ if (uni_path_len % 8 != 0) { |
4418 |
+ copy_size = roundup(uni_path_len, 8); |
4419 |
+ copy_path = kzalloc(copy_size, GFP_KERNEL); |
4420 |
+ if (!copy_path) { |
4421 |
+- cifs_small_buf_release(req); |
4422 |
+- return -ENOMEM; |
4423 |
++ rc = -ENOMEM; |
4424 |
++ goto err_free_req; |
4425 |
+ } |
4426 |
+- memcpy((char *)copy_path, (const char *)path, |
4427 |
++ memcpy((char *)copy_path, (const char *)utf16_path, |
4428 |
+ uni_path_len); |
4429 |
+ uni_path_len = copy_size; |
4430 |
+- path = copy_path; |
4431 |
++ /* free before overwriting resource */ |
4432 |
++ kfree(utf16_path); |
4433 |
++ utf16_path = copy_path; |
4434 |
+ } |
4435 |
+ } |
4436 |
+ |
4437 |
+ iov[1].iov_len = uni_path_len; |
4438 |
+- iov[1].iov_base = path; |
4439 |
++ iov[1].iov_base = utf16_path; |
4440 |
+ req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_NONE; |
4441 |
+ |
4442 |
+ if (tcon->posix_extensions) { |
4443 |
+- if (n_iov > 2) { |
4444 |
+- struct create_context *ccontext = |
4445 |
+- (struct create_context *)iov[n_iov-1].iov_base; |
4446 |
+- ccontext->Next = |
4447 |
+- cpu_to_le32(iov[n_iov-1].iov_len); |
4448 |
+- } |
4449 |
+- |
4450 |
++ /* resource #3: posix buf */ |
4451 |
+ rc = add_posix_context(iov, &n_iov, mode); |
4452 |
+- if (rc) { |
4453 |
+- cifs_small_buf_release(req); |
4454 |
+- kfree(copy_path); |
4455 |
+- return rc; |
4456 |
+- } |
4457 |
++ if (rc) |
4458 |
++ goto err_free_req; |
4459 |
+ pc_buf = iov[n_iov-1].iov_base; |
4460 |
+ } |
4461 |
+ |
4462 |
+@@ -2044,32 +2042,33 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode, |
4463 |
+ rqst.rq_iov = iov; |
4464 |
+ rqst.rq_nvec = n_iov; |
4465 |
+ |
4466 |
+- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, |
4467 |
+- &rsp_iov); |
4468 |
+- |
4469 |
+- cifs_small_buf_release(req); |
4470 |
+- rsp = (struct smb2_create_rsp *)rsp_iov.iov_base; |
4471 |
+- |
4472 |
+- if (rc != 0) { |
4473 |
++ /* resource #4: response buffer */ |
4474 |
++ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov); |
4475 |
++ if (rc) { |
4476 |
+ cifs_stats_fail_inc(tcon, SMB2_CREATE_HE); |
4477 |
+ trace_smb3_posix_mkdir_err(xid, tcon->tid, ses->Suid, |
4478 |
+- CREATE_NOT_FILE, FILE_WRITE_ATTRIBUTES, rc); |
4479 |
+- goto smb311_mkdir_exit; |
4480 |
+- } else |
4481 |
+- trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid, |
4482 |
+- ses->Suid, CREATE_NOT_FILE, |
4483 |
+- FILE_WRITE_ATTRIBUTES); |
4484 |
++ CREATE_NOT_FILE, |
4485 |
++ FILE_WRITE_ATTRIBUTES, rc); |
4486 |
++ goto err_free_rsp_buf; |
4487 |
++ } |
4488 |
++ |
4489 |
++ rsp = (struct smb2_create_rsp *)rsp_iov.iov_base; |
4490 |
++ trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid, |
4491 |
++ ses->Suid, CREATE_NOT_FILE, |
4492 |
++ FILE_WRITE_ATTRIBUTES); |
4493 |
+ |
4494 |
+ SMB2_close(xid, tcon, rsp->PersistentFileId, rsp->VolatileFileId); |
4495 |
+ |
4496 |
+ /* Eventually save off posix specific response info and timestaps */ |
4497 |
+ |
4498 |
+-smb311_mkdir_exit: |
4499 |
+- kfree(copy_path); |
4500 |
+- kfree(pc_buf); |
4501 |
++err_free_rsp_buf: |
4502 |
+ free_rsp_buf(resp_buftype, rsp); |
4503 |
++ kfree(pc_buf); |
4504 |
++err_free_req: |
4505 |
++ cifs_small_buf_release(req); |
4506 |
++err_free_path: |
4507 |
++ kfree(utf16_path); |
4508 |
+ return rc; |
4509 |
+- |
4510 |
+ } |
4511 |
+ #endif /* SMB311 */ |
4512 |
+ |
4513 |
+diff --git a/fs/dcache.c b/fs/dcache.c |
4514 |
+index ceb7b491d1b9..d19a0dc46c04 100644 |
4515 |
+--- a/fs/dcache.c |
4516 |
++++ b/fs/dcache.c |
4517 |
+@@ -292,7 +292,8 @@ void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry |
4518 |
+ spin_unlock(&dentry->d_lock); |
4519 |
+ name->name = p->name; |
4520 |
+ } else { |
4521 |
+- memcpy(name->inline_name, dentry->d_iname, DNAME_INLINE_LEN); |
4522 |
++ memcpy(name->inline_name, dentry->d_iname, |
4523 |
++ dentry->d_name.len + 1); |
4524 |
+ spin_unlock(&dentry->d_lock); |
4525 |
+ name->name = name->inline_name; |
4526 |
+ } |
4527 |
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c |
4528 |
+index 8f931d699287..b61954d40c25 100644 |
4529 |
+--- a/fs/f2fs/data.c |
4530 |
++++ b/fs/f2fs/data.c |
4531 |
+@@ -2149,8 +2149,12 @@ static void f2fs_write_failed(struct address_space *mapping, loff_t to) |
4532 |
+ |
4533 |
+ if (to > i_size) { |
4534 |
+ down_write(&F2FS_I(inode)->i_mmap_sem); |
4535 |
++ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); |
4536 |
++ |
4537 |
+ truncate_pagecache(inode, i_size); |
4538 |
+ f2fs_truncate_blocks(inode, i_size, true); |
4539 |
++ |
4540 |
++ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); |
4541 |
+ up_write(&F2FS_I(inode)->i_mmap_sem); |
4542 |
+ } |
4543 |
+ } |
4544 |
+@@ -2490,6 +2494,10 @@ static int f2fs_set_data_page_dirty(struct page *page) |
4545 |
+ if (!PageUptodate(page)) |
4546 |
+ SetPageUptodate(page); |
4547 |
+ |
4548 |
++ /* don't remain PG_checked flag which was set during GC */ |
4549 |
++ if (is_cold_data(page)) |
4550 |
++ clear_cold_data(page); |
4551 |
++ |
4552 |
+ if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) { |
4553 |
+ if (!IS_ATOMIC_WRITTEN_PAGE(page)) { |
4554 |
+ f2fs_register_inmem_page(inode, page); |
4555 |
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c |
4556 |
+index 6880c6f78d58..3ffa341cf586 100644 |
4557 |
+--- a/fs/f2fs/file.c |
4558 |
++++ b/fs/f2fs/file.c |
4559 |
+@@ -782,22 +782,26 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr) |
4560 |
+ } |
4561 |
+ |
4562 |
+ if (attr->ia_valid & ATTR_SIZE) { |
4563 |
+- if (attr->ia_size <= i_size_read(inode)) { |
4564 |
+- down_write(&F2FS_I(inode)->i_mmap_sem); |
4565 |
+- truncate_setsize(inode, attr->ia_size); |
4566 |
++ bool to_smaller = (attr->ia_size <= i_size_read(inode)); |
4567 |
++ |
4568 |
++ down_write(&F2FS_I(inode)->i_mmap_sem); |
4569 |
++ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); |
4570 |
++ |
4571 |
++ truncate_setsize(inode, attr->ia_size); |
4572 |
++ |
4573 |
++ if (to_smaller) |
4574 |
+ err = f2fs_truncate(inode); |
4575 |
+- up_write(&F2FS_I(inode)->i_mmap_sem); |
4576 |
+- if (err) |
4577 |
+- return err; |
4578 |
+- } else { |
4579 |
+- /* |
4580 |
+- * do not trim all blocks after i_size if target size is |
4581 |
+- * larger than i_size. |
4582 |
+- */ |
4583 |
+- down_write(&F2FS_I(inode)->i_mmap_sem); |
4584 |
+- truncate_setsize(inode, attr->ia_size); |
4585 |
+- up_write(&F2FS_I(inode)->i_mmap_sem); |
4586 |
++ /* |
4587 |
++ * do not trim all blocks after i_size if target size is |
4588 |
++ * larger than i_size. |
4589 |
++ */ |
4590 |
++ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); |
4591 |
++ up_write(&F2FS_I(inode)->i_mmap_sem); |
4592 |
+ |
4593 |
++ if (err) |
4594 |
++ return err; |
4595 |
++ |
4596 |
++ if (!to_smaller) { |
4597 |
+ /* should convert inline inode here */ |
4598 |
+ if (!f2fs_may_inline_data(inode)) { |
4599 |
+ err = f2fs_convert_inline_inode(inode); |
4600 |
+@@ -944,13 +948,18 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len) |
4601 |
+ |
4602 |
+ blk_start = (loff_t)pg_start << PAGE_SHIFT; |
4603 |
+ blk_end = (loff_t)pg_end << PAGE_SHIFT; |
4604 |
++ |
4605 |
+ down_write(&F2FS_I(inode)->i_mmap_sem); |
4606 |
++ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); |
4607 |
++ |
4608 |
+ truncate_inode_pages_range(mapping, blk_start, |
4609 |
+ blk_end - 1); |
4610 |
+ |
4611 |
+ f2fs_lock_op(sbi); |
4612 |
+ ret = f2fs_truncate_hole(inode, pg_start, pg_end); |
4613 |
+ f2fs_unlock_op(sbi); |
4614 |
++ |
4615 |
++ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); |
4616 |
+ up_write(&F2FS_I(inode)->i_mmap_sem); |
4617 |
+ } |
4618 |
+ } |
4619 |
+@@ -1295,8 +1304,6 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, |
4620 |
+ if (ret) |
4621 |
+ goto out_sem; |
4622 |
+ |
4623 |
+- truncate_pagecache_range(inode, offset, offset + len - 1); |
4624 |
+- |
4625 |
+ pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; |
4626 |
+ pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; |
4627 |
+ |
4628 |
+@@ -1326,12 +1333,19 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, |
4629 |
+ unsigned int end_offset; |
4630 |
+ pgoff_t end; |
4631 |
+ |
4632 |
++ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); |
4633 |
++ |
4634 |
++ truncate_pagecache_range(inode, |
4635 |
++ (loff_t)index << PAGE_SHIFT, |
4636 |
++ ((loff_t)pg_end << PAGE_SHIFT) - 1); |
4637 |
++ |
4638 |
+ f2fs_lock_op(sbi); |
4639 |
+ |
4640 |
+ set_new_dnode(&dn, inode, NULL, NULL, 0); |
4641 |
+ ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE); |
4642 |
+ if (ret) { |
4643 |
+ f2fs_unlock_op(sbi); |
4644 |
++ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); |
4645 |
+ goto out; |
4646 |
+ } |
4647 |
+ |
4648 |
+@@ -1340,7 +1354,9 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, |
4649 |
+ |
4650 |
+ ret = f2fs_do_zero_range(&dn, index, end); |
4651 |
+ f2fs_put_dnode(&dn); |
4652 |
++ |
4653 |
+ f2fs_unlock_op(sbi); |
4654 |
++ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); |
4655 |
+ |
4656 |
+ f2fs_balance_fs(sbi, dn.node_changed); |
4657 |
+ |
4658 |
+diff --git a/fs/fat/cache.c b/fs/fat/cache.c |
4659 |
+index e9bed49df6b7..78d501c1fb65 100644 |
4660 |
+--- a/fs/fat/cache.c |
4661 |
++++ b/fs/fat/cache.c |
4662 |
+@@ -225,7 +225,8 @@ static inline void cache_init(struct fat_cache_id *cid, int fclus, int dclus) |
4663 |
+ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus) |
4664 |
+ { |
4665 |
+ struct super_block *sb = inode->i_sb; |
4666 |
+- const int limit = sb->s_maxbytes >> MSDOS_SB(sb)->cluster_bits; |
4667 |
++ struct msdos_sb_info *sbi = MSDOS_SB(sb); |
4668 |
++ const int limit = sb->s_maxbytes >> sbi->cluster_bits; |
4669 |
+ struct fat_entry fatent; |
4670 |
+ struct fat_cache_id cid; |
4671 |
+ int nr; |
4672 |
+@@ -234,6 +235,12 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus) |
4673 |
+ |
4674 |
+ *fclus = 0; |
4675 |
+ *dclus = MSDOS_I(inode)->i_start; |
4676 |
++ if (!fat_valid_entry(sbi, *dclus)) { |
4677 |
++ fat_fs_error_ratelimit(sb, |
4678 |
++ "%s: invalid start cluster (i_pos %lld, start %08x)", |
4679 |
++ __func__, MSDOS_I(inode)->i_pos, *dclus); |
4680 |
++ return -EIO; |
4681 |
++ } |
4682 |
+ if (cluster == 0) |
4683 |
+ return 0; |
4684 |
+ |
4685 |
+@@ -250,9 +257,8 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus) |
4686 |
+ /* prevent the infinite loop of cluster chain */ |
4687 |
+ if (*fclus > limit) { |
4688 |
+ fat_fs_error_ratelimit(sb, |
4689 |
+- "%s: detected the cluster chain loop" |
4690 |
+- " (i_pos %lld)", __func__, |
4691 |
+- MSDOS_I(inode)->i_pos); |
4692 |
++ "%s: detected the cluster chain loop (i_pos %lld)", |
4693 |
++ __func__, MSDOS_I(inode)->i_pos); |
4694 |
+ nr = -EIO; |
4695 |
+ goto out; |
4696 |
+ } |
4697 |
+@@ -262,9 +268,8 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus) |
4698 |
+ goto out; |
4699 |
+ else if (nr == FAT_ENT_FREE) { |
4700 |
+ fat_fs_error_ratelimit(sb, |
4701 |
+- "%s: invalid cluster chain (i_pos %lld)", |
4702 |
+- __func__, |
4703 |
+- MSDOS_I(inode)->i_pos); |
4704 |
++ "%s: invalid cluster chain (i_pos %lld)", |
4705 |
++ __func__, MSDOS_I(inode)->i_pos); |
4706 |
+ nr = -EIO; |
4707 |
+ goto out; |
4708 |
+ } else if (nr == FAT_ENT_EOF) { |
4709 |
+diff --git a/fs/fat/fat.h b/fs/fat/fat.h |
4710 |
+index 8fc1093da47d..a0a00f3734bc 100644 |
4711 |
+--- a/fs/fat/fat.h |
4712 |
++++ b/fs/fat/fat.h |
4713 |
+@@ -348,6 +348,11 @@ static inline void fatent_brelse(struct fat_entry *fatent) |
4714 |
+ fatent->fat_inode = NULL; |
4715 |
+ } |
4716 |
+ |
4717 |
++static inline bool fat_valid_entry(struct msdos_sb_info *sbi, int entry) |
4718 |
++{ |
4719 |
++ return FAT_START_ENT <= entry && entry < sbi->max_cluster; |
4720 |
++} |
4721 |
++ |
4722 |
+ extern void fat_ent_access_init(struct super_block *sb); |
4723 |
+ extern int fat_ent_read(struct inode *inode, struct fat_entry *fatent, |
4724 |
+ int entry); |
4725 |
+diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c |
4726 |
+index bac10de678cc..3aef8630a4b9 100644 |
4727 |
+--- a/fs/fat/fatent.c |
4728 |
++++ b/fs/fat/fatent.c |
4729 |
+@@ -23,7 +23,7 @@ static void fat12_ent_blocknr(struct super_block *sb, int entry, |
4730 |
+ { |
4731 |
+ struct msdos_sb_info *sbi = MSDOS_SB(sb); |
4732 |
+ int bytes = entry + (entry >> 1); |
4733 |
+- WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry); |
4734 |
++ WARN_ON(!fat_valid_entry(sbi, entry)); |
4735 |
+ *offset = bytes & (sb->s_blocksize - 1); |
4736 |
+ *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits); |
4737 |
+ } |
4738 |
+@@ -33,7 +33,7 @@ static void fat_ent_blocknr(struct super_block *sb, int entry, |
4739 |
+ { |
4740 |
+ struct msdos_sb_info *sbi = MSDOS_SB(sb); |
4741 |
+ int bytes = (entry << sbi->fatent_shift); |
4742 |
+- WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry); |
4743 |
++ WARN_ON(!fat_valid_entry(sbi, entry)); |
4744 |
+ *offset = bytes & (sb->s_blocksize - 1); |
4745 |
+ *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits); |
4746 |
+ } |
4747 |
+@@ -353,7 +353,7 @@ int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry) |
4748 |
+ int err, offset; |
4749 |
+ sector_t blocknr; |
4750 |
+ |
4751 |
+- if (entry < FAT_START_ENT || sbi->max_cluster <= entry) { |
4752 |
++ if (!fat_valid_entry(sbi, entry)) { |
4753 |
+ fatent_brelse(fatent); |
4754 |
+ fat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", entry); |
4755 |
+ return -EIO; |
4756 |
+diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c |
4757 |
+index ad04a5741016..9a8772465a90 100644 |
4758 |
+--- a/fs/hfs/brec.c |
4759 |
++++ b/fs/hfs/brec.c |
4760 |
+@@ -75,9 +75,10 @@ int hfs_brec_insert(struct hfs_find_data *fd, void *entry, int entry_len) |
4761 |
+ if (!fd->bnode) { |
4762 |
+ if (!tree->root) |
4763 |
+ hfs_btree_inc_height(tree); |
4764 |
+- fd->bnode = hfs_bnode_find(tree, tree->leaf_head); |
4765 |
+- if (IS_ERR(fd->bnode)) |
4766 |
+- return PTR_ERR(fd->bnode); |
4767 |
++ node = hfs_bnode_find(tree, tree->leaf_head); |
4768 |
++ if (IS_ERR(node)) |
4769 |
++ return PTR_ERR(node); |
4770 |
++ fd->bnode = node; |
4771 |
+ fd->record = -1; |
4772 |
+ } |
4773 |
+ new_node = NULL; |
4774 |
+diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c |
4775 |
+index b5254378f011..cd017d7dbdfa 100644 |
4776 |
+--- a/fs/hfsplus/dir.c |
4777 |
++++ b/fs/hfsplus/dir.c |
4778 |
+@@ -78,13 +78,13 @@ again: |
4779 |
+ cpu_to_be32(HFSP_HARDLINK_TYPE) && |
4780 |
+ entry.file.user_info.fdCreator == |
4781 |
+ cpu_to_be32(HFSP_HFSPLUS_CREATOR) && |
4782 |
++ HFSPLUS_SB(sb)->hidden_dir && |
4783 |
+ (entry.file.create_date == |
4784 |
+ HFSPLUS_I(HFSPLUS_SB(sb)->hidden_dir)-> |
4785 |
+ create_date || |
4786 |
+ entry.file.create_date == |
4787 |
+ HFSPLUS_I(d_inode(sb->s_root))-> |
4788 |
+- create_date) && |
4789 |
+- HFSPLUS_SB(sb)->hidden_dir) { |
4790 |
++ create_date)) { |
4791 |
+ struct qstr str; |
4792 |
+ char name[32]; |
4793 |
+ |
4794 |
+diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c |
4795 |
+index a6c0f54c48c3..80abba550bfa 100644 |
4796 |
+--- a/fs/hfsplus/super.c |
4797 |
++++ b/fs/hfsplus/super.c |
4798 |
+@@ -524,8 +524,10 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) |
4799 |
+ goto out_put_root; |
4800 |
+ if (!hfs_brec_read(&fd, &entry, sizeof(entry))) { |
4801 |
+ hfs_find_exit(&fd); |
4802 |
+- if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) |
4803 |
++ if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) { |
4804 |
++ err = -EINVAL; |
4805 |
+ goto out_put_root; |
4806 |
++ } |
4807 |
+ inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id)); |
4808 |
+ if (IS_ERR(inode)) { |
4809 |
+ err = PTR_ERR(inode); |
4810 |
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c |
4811 |
+index 464db0c0f5c8..ff98e2a3f3cc 100644 |
4812 |
+--- a/fs/nfs/nfs4proc.c |
4813 |
++++ b/fs/nfs/nfs4proc.c |
4814 |
+@@ -7734,7 +7734,7 @@ static int nfs4_sp4_select_mode(struct nfs_client *clp, |
4815 |
+ } |
4816 |
+ out: |
4817 |
+ clp->cl_sp4_flags = flags; |
4818 |
+- return 0; |
4819 |
++ return ret; |
4820 |
+ } |
4821 |
+ |
4822 |
+ struct nfs41_exchange_id_data { |
4823 |
+diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c |
4824 |
+index e64ecb9f2720..66c373230e60 100644 |
4825 |
+--- a/fs/proc/kcore.c |
4826 |
++++ b/fs/proc/kcore.c |
4827 |
+@@ -384,8 +384,10 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff) |
4828 |
+ phdr->p_flags = PF_R|PF_W|PF_X; |
4829 |
+ phdr->p_offset = kc_vaddr_to_offset(m->addr) + dataoff; |
4830 |
+ phdr->p_vaddr = (size_t)m->addr; |
4831 |
+- if (m->type == KCORE_RAM || m->type == KCORE_TEXT) |
4832 |
++ if (m->type == KCORE_RAM) |
4833 |
+ phdr->p_paddr = __pa(m->addr); |
4834 |
++ else if (m->type == KCORE_TEXT) |
4835 |
++ phdr->p_paddr = __pa_symbol(m->addr); |
4836 |
+ else |
4837 |
+ phdr->p_paddr = (elf_addr_t)-1; |
4838 |
+ phdr->p_filesz = phdr->p_memsz = m->size; |
4839 |
+diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c |
4840 |
+index cfb6674331fd..0651646dd04d 100644 |
4841 |
+--- a/fs/proc/vmcore.c |
4842 |
++++ b/fs/proc/vmcore.c |
4843 |
+@@ -225,6 +225,7 @@ out_unlock: |
4844 |
+ return ret; |
4845 |
+ } |
4846 |
+ |
4847 |
++#ifdef CONFIG_MMU |
4848 |
+ static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst, |
4849 |
+ u64 start, size_t size) |
4850 |
+ { |
4851 |
+@@ -259,6 +260,7 @@ out_unlock: |
4852 |
+ mutex_unlock(&vmcoredd_mutex); |
4853 |
+ return ret; |
4854 |
+ } |
4855 |
++#endif /* CONFIG_MMU */ |
4856 |
+ #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ |
4857 |
+ |
4858 |
+ /* Read from the ELF header and then the crash dump. On error, negative value is |
4859 |
+diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h |
4860 |
+index ae4811fecc1f..6d670bd9ab6b 100644 |
4861 |
+--- a/fs/reiserfs/reiserfs.h |
4862 |
++++ b/fs/reiserfs/reiserfs.h |
4863 |
+@@ -271,7 +271,7 @@ struct reiserfs_journal_list { |
4864 |
+ |
4865 |
+ struct mutex j_commit_mutex; |
4866 |
+ unsigned int j_trans_id; |
4867 |
+- time_t j_timestamp; |
4868 |
++ time64_t j_timestamp; /* write-only but useful for crash dump analysis */ |
4869 |
+ struct reiserfs_list_bitmap *j_list_bitmap; |
4870 |
+ struct buffer_head *j_commit_bh; /* commit buffer head */ |
4871 |
+ struct reiserfs_journal_cnode *j_realblock; |
4872 |
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h |
4873 |
+index 29502238e510..bf85e152af05 100644 |
4874 |
+--- a/include/linux/pci_ids.h |
4875 |
++++ b/include/linux/pci_ids.h |
4876 |
+@@ -3082,4 +3082,6 @@ |
4877 |
+ |
4878 |
+ #define PCI_VENDOR_ID_OCZ 0x1b85 |
4879 |
+ |
4880 |
++#define PCI_VENDOR_ID_NCUBE 0x10ff |
4881 |
++ |
4882 |
+ #endif /* _LINUX_PCI_IDS_H */ |
4883 |
+diff --git a/include/net/tcp.h b/include/net/tcp.h |
4884 |
+index cd3ecda9386a..106e01c721e6 100644 |
4885 |
+--- a/include/net/tcp.h |
4886 |
++++ b/include/net/tcp.h |
4887 |
+@@ -2023,6 +2023,10 @@ int tcp_set_ulp_id(struct sock *sk, const int ulp); |
4888 |
+ void tcp_get_available_ulp(char *buf, size_t len); |
4889 |
+ void tcp_cleanup_ulp(struct sock *sk); |
4890 |
+ |
4891 |
++#define MODULE_ALIAS_TCP_ULP(name) \ |
4892 |
++ __MODULE_INFO(alias, alias_userspace, name); \ |
4893 |
++ __MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name) |
4894 |
++ |
4895 |
+ /* Call BPF_SOCK_OPS program that returns an int. If the return value |
4896 |
+ * is < 0, then the BPF op failed (for example if the loaded BPF |
4897 |
+ * program does not support the chosen operation or there is no BPF |
4898 |
+diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h |
4899 |
+index 7b8c9e19bad1..910cc4334b21 100644 |
4900 |
+--- a/include/uapi/linux/keyctl.h |
4901 |
++++ b/include/uapi/linux/keyctl.h |
4902 |
+@@ -65,7 +65,7 @@ |
4903 |
+ |
4904 |
+ /* keyctl structures */ |
4905 |
+ struct keyctl_dh_params { |
4906 |
+- __s32 private; |
4907 |
++ __s32 dh_private; |
4908 |
+ __s32 prime; |
4909 |
+ __s32 base; |
4910 |
+ }; |
4911 |
+diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c |
4912 |
+index 76efe9a183f5..fc5b103512e7 100644 |
4913 |
+--- a/kernel/bpf/inode.c |
4914 |
++++ b/kernel/bpf/inode.c |
4915 |
+@@ -196,19 +196,21 @@ static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos) |
4916 |
+ { |
4917 |
+ struct bpf_map *map = seq_file_to_map(m); |
4918 |
+ void *key = map_iter(m)->key; |
4919 |
++ void *prev_key; |
4920 |
+ |
4921 |
+ if (map_iter(m)->done) |
4922 |
+ return NULL; |
4923 |
+ |
4924 |
+ if (unlikely(v == SEQ_START_TOKEN)) |
4925 |
+- goto done; |
4926 |
++ prev_key = NULL; |
4927 |
++ else |
4928 |
++ prev_key = key; |
4929 |
+ |
4930 |
+- if (map->ops->map_get_next_key(map, key, key)) { |
4931 |
++ if (map->ops->map_get_next_key(map, prev_key, key)) { |
4932 |
+ map_iter(m)->done = true; |
4933 |
+ return NULL; |
4934 |
+ } |
4935 |
+ |
4936 |
+-done: |
4937 |
+ ++(*pos); |
4938 |
+ return key; |
4939 |
+ } |
4940 |
+diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c |
4941 |
+index c4d75c52b4fc..58899601fccf 100644 |
4942 |
+--- a/kernel/bpf/sockmap.c |
4943 |
++++ b/kernel/bpf/sockmap.c |
4944 |
+@@ -58,6 +58,7 @@ struct bpf_stab { |
4945 |
+ struct bpf_map map; |
4946 |
+ struct sock **sock_map; |
4947 |
+ struct bpf_sock_progs progs; |
4948 |
++ raw_spinlock_t lock; |
4949 |
+ }; |
4950 |
+ |
4951 |
+ struct bucket { |
4952 |
+@@ -89,9 +90,9 @@ enum smap_psock_state { |
4953 |
+ |
4954 |
+ struct smap_psock_map_entry { |
4955 |
+ struct list_head list; |
4956 |
++ struct bpf_map *map; |
4957 |
+ struct sock **entry; |
4958 |
+ struct htab_elem __rcu *hash_link; |
4959 |
+- struct bpf_htab __rcu *htab; |
4960 |
+ }; |
4961 |
+ |
4962 |
+ struct smap_psock { |
4963 |
+@@ -343,13 +344,18 @@ static void bpf_tcp_close(struct sock *sk, long timeout) |
4964 |
+ e = psock_map_pop(sk, psock); |
4965 |
+ while (e) { |
4966 |
+ if (e->entry) { |
4967 |
+- osk = cmpxchg(e->entry, sk, NULL); |
4968 |
++ struct bpf_stab *stab = container_of(e->map, struct bpf_stab, map); |
4969 |
++ |
4970 |
++ raw_spin_lock_bh(&stab->lock); |
4971 |
++ osk = *e->entry; |
4972 |
+ if (osk == sk) { |
4973 |
++ *e->entry = NULL; |
4974 |
+ smap_release_sock(psock, sk); |
4975 |
+ } |
4976 |
++ raw_spin_unlock_bh(&stab->lock); |
4977 |
+ } else { |
4978 |
+ struct htab_elem *link = rcu_dereference(e->hash_link); |
4979 |
+- struct bpf_htab *htab = rcu_dereference(e->htab); |
4980 |
++ struct bpf_htab *htab = container_of(e->map, struct bpf_htab, map); |
4981 |
+ struct hlist_head *head; |
4982 |
+ struct htab_elem *l; |
4983 |
+ struct bucket *b; |
4984 |
+@@ -370,6 +376,7 @@ static void bpf_tcp_close(struct sock *sk, long timeout) |
4985 |
+ } |
4986 |
+ raw_spin_unlock_bh(&b->lock); |
4987 |
+ } |
4988 |
++ kfree(e); |
4989 |
+ e = psock_map_pop(sk, psock); |
4990 |
+ } |
4991 |
+ rcu_read_unlock(); |
4992 |
+@@ -1644,6 +1651,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr) |
4993 |
+ return ERR_PTR(-ENOMEM); |
4994 |
+ |
4995 |
+ bpf_map_init_from_attr(&stab->map, attr); |
4996 |
++ raw_spin_lock_init(&stab->lock); |
4997 |
+ |
4998 |
+ /* make sure page count doesn't overflow */ |
4999 |
+ cost = (u64) stab->map.max_entries * sizeof(struct sock *); |
5000 |
+@@ -1678,8 +1686,10 @@ static void smap_list_map_remove(struct smap_psock *psock, |
5001 |
+ |
5002 |
+ spin_lock_bh(&psock->maps_lock); |
5003 |
+ list_for_each_entry_safe(e, tmp, &psock->maps, list) { |
5004 |
+- if (e->entry == entry) |
5005 |
++ if (e->entry == entry) { |
5006 |
+ list_del(&e->list); |
5007 |
++ kfree(e); |
5008 |
++ } |
5009 |
+ } |
5010 |
+ spin_unlock_bh(&psock->maps_lock); |
5011 |
+ } |
5012 |
+@@ -1693,8 +1703,10 @@ static void smap_list_hash_remove(struct smap_psock *psock, |
5013 |
+ list_for_each_entry_safe(e, tmp, &psock->maps, list) { |
5014 |
+ struct htab_elem *c = rcu_dereference(e->hash_link); |
5015 |
+ |
5016 |
+- if (c == hash_link) |
5017 |
++ if (c == hash_link) { |
5018 |
+ list_del(&e->list); |
5019 |
++ kfree(e); |
5020 |
++ } |
5021 |
+ } |
5022 |
+ spin_unlock_bh(&psock->maps_lock); |
5023 |
+ } |
5024 |
+@@ -1714,14 +1726,15 @@ static void sock_map_free(struct bpf_map *map) |
5025 |
+ * and a grace period expire to ensure psock is really safe to remove. |
5026 |
+ */ |
5027 |
+ rcu_read_lock(); |
5028 |
++ raw_spin_lock_bh(&stab->lock); |
5029 |
+ for (i = 0; i < stab->map.max_entries; i++) { |
5030 |
+ struct smap_psock *psock; |
5031 |
+ struct sock *sock; |
5032 |
+ |
5033 |
+- sock = xchg(&stab->sock_map[i], NULL); |
5034 |
++ sock = stab->sock_map[i]; |
5035 |
+ if (!sock) |
5036 |
+ continue; |
5037 |
+- |
5038 |
++ stab->sock_map[i] = NULL; |
5039 |
+ psock = smap_psock_sk(sock); |
5040 |
+ /* This check handles a racing sock event that can get the |
5041 |
+ * sk_callback_lock before this case but after xchg happens |
5042 |
+@@ -1733,6 +1746,7 @@ static void sock_map_free(struct bpf_map *map) |
5043 |
+ smap_release_sock(psock, sock); |
5044 |
+ } |
5045 |
+ } |
5046 |
++ raw_spin_unlock_bh(&stab->lock); |
5047 |
+ rcu_read_unlock(); |
5048 |
+ |
5049 |
+ sock_map_remove_complete(stab); |
5050 |
+@@ -1776,19 +1790,23 @@ static int sock_map_delete_elem(struct bpf_map *map, void *key) |
5051 |
+ if (k >= map->max_entries) |
5052 |
+ return -EINVAL; |
5053 |
+ |
5054 |
+- sock = xchg(&stab->sock_map[k], NULL); |
5055 |
++ raw_spin_lock_bh(&stab->lock); |
5056 |
++ sock = stab->sock_map[k]; |
5057 |
++ stab->sock_map[k] = NULL; |
5058 |
++ raw_spin_unlock_bh(&stab->lock); |
5059 |
+ if (!sock) |
5060 |
+ return -EINVAL; |
5061 |
+ |
5062 |
+ psock = smap_psock_sk(sock); |
5063 |
+ if (!psock) |
5064 |
+- goto out; |
5065 |
+- |
5066 |
+- if (psock->bpf_parse) |
5067 |
++ return 0; |
5068 |
++ if (psock->bpf_parse) { |
5069 |
++ write_lock_bh(&sock->sk_callback_lock); |
5070 |
+ smap_stop_sock(psock, sock); |
5071 |
++ write_unlock_bh(&sock->sk_callback_lock); |
5072 |
++ } |
5073 |
+ smap_list_map_remove(psock, &stab->sock_map[k]); |
5074 |
+ smap_release_sock(psock, sock); |
5075 |
+-out: |
5076 |
+ return 0; |
5077 |
+ } |
5078 |
+ |
5079 |
+@@ -1824,11 +1842,9 @@ out: |
5080 |
+ static int __sock_map_ctx_update_elem(struct bpf_map *map, |
5081 |
+ struct bpf_sock_progs *progs, |
5082 |
+ struct sock *sock, |
5083 |
+- struct sock **map_link, |
5084 |
+ void *key) |
5085 |
+ { |
5086 |
+ struct bpf_prog *verdict, *parse, *tx_msg; |
5087 |
+- struct smap_psock_map_entry *e = NULL; |
5088 |
+ struct smap_psock *psock; |
5089 |
+ bool new = false; |
5090 |
+ int err = 0; |
5091 |
+@@ -1901,14 +1917,6 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map, |
5092 |
+ new = true; |
5093 |
+ } |
5094 |
+ |
5095 |
+- if (map_link) { |
5096 |
+- e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN); |
5097 |
+- if (!e) { |
5098 |
+- err = -ENOMEM; |
5099 |
+- goto out_free; |
5100 |
+- } |
5101 |
+- } |
5102 |
+- |
5103 |
+ /* 3. At this point we have a reference to a valid psock that is |
5104 |
+ * running. Attach any BPF programs needed. |
5105 |
+ */ |
5106 |
+@@ -1930,17 +1938,6 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map, |
5107 |
+ write_unlock_bh(&sock->sk_callback_lock); |
5108 |
+ } |
5109 |
+ |
5110 |
+- /* 4. Place psock in sockmap for use and stop any programs on |
5111 |
+- * the old sock assuming its not the same sock we are replacing |
5112 |
+- * it with. Because we can only have a single set of programs if |
5113 |
+- * old_sock has a strp we can stop it. |
5114 |
+- */ |
5115 |
+- if (map_link) { |
5116 |
+- e->entry = map_link; |
5117 |
+- spin_lock_bh(&psock->maps_lock); |
5118 |
+- list_add_tail(&e->list, &psock->maps); |
5119 |
+- spin_unlock_bh(&psock->maps_lock); |
5120 |
+- } |
5121 |
+ return err; |
5122 |
+ out_free: |
5123 |
+ smap_release_sock(psock, sock); |
5124 |
+@@ -1951,7 +1948,6 @@ out_progs: |
5125 |
+ } |
5126 |
+ if (tx_msg) |
5127 |
+ bpf_prog_put(tx_msg); |
5128 |
+- kfree(e); |
5129 |
+ return err; |
5130 |
+ } |
5131 |
+ |
5132 |
+@@ -1961,36 +1957,57 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops, |
5133 |
+ { |
5134 |
+ struct bpf_stab *stab = container_of(map, struct bpf_stab, map); |
5135 |
+ struct bpf_sock_progs *progs = &stab->progs; |
5136 |
+- struct sock *osock, *sock; |
5137 |
++ struct sock *osock, *sock = skops->sk; |
5138 |
++ struct smap_psock_map_entry *e; |
5139 |
++ struct smap_psock *psock; |
5140 |
+ u32 i = *(u32 *)key; |
5141 |
+ int err; |
5142 |
+ |
5143 |
+ if (unlikely(flags > BPF_EXIST)) |
5144 |
+ return -EINVAL; |
5145 |
+- |
5146 |
+ if (unlikely(i >= stab->map.max_entries)) |
5147 |
+ return -E2BIG; |
5148 |
+ |
5149 |
+- sock = READ_ONCE(stab->sock_map[i]); |
5150 |
+- if (flags == BPF_EXIST && !sock) |
5151 |
+- return -ENOENT; |
5152 |
+- else if (flags == BPF_NOEXIST && sock) |
5153 |
+- return -EEXIST; |
5154 |
++ e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN); |
5155 |
++ if (!e) |
5156 |
++ return -ENOMEM; |
5157 |
+ |
5158 |
+- sock = skops->sk; |
5159 |
+- err = __sock_map_ctx_update_elem(map, progs, sock, &stab->sock_map[i], |
5160 |
+- key); |
5161 |
++ err = __sock_map_ctx_update_elem(map, progs, sock, key); |
5162 |
+ if (err) |
5163 |
+ goto out; |
5164 |
+ |
5165 |
+- osock = xchg(&stab->sock_map[i], sock); |
5166 |
+- if (osock) { |
5167 |
+- struct smap_psock *opsock = smap_psock_sk(osock); |
5168 |
++ /* psock guaranteed to be present. */ |
5169 |
++ psock = smap_psock_sk(sock); |
5170 |
++ raw_spin_lock_bh(&stab->lock); |
5171 |
++ osock = stab->sock_map[i]; |
5172 |
++ if (osock && flags == BPF_NOEXIST) { |
5173 |
++ err = -EEXIST; |
5174 |
++ goto out_unlock; |
5175 |
++ } |
5176 |
++ if (!osock && flags == BPF_EXIST) { |
5177 |
++ err = -ENOENT; |
5178 |
++ goto out_unlock; |
5179 |
++ } |
5180 |
+ |
5181 |
+- smap_list_map_remove(opsock, &stab->sock_map[i]); |
5182 |
+- smap_release_sock(opsock, osock); |
5183 |
++ e->entry = &stab->sock_map[i]; |
5184 |
++ e->map = map; |
5185 |
++ spin_lock_bh(&psock->maps_lock); |
5186 |
++ list_add_tail(&e->list, &psock->maps); |
5187 |
++ spin_unlock_bh(&psock->maps_lock); |
5188 |
++ |
5189 |
++ stab->sock_map[i] = sock; |
5190 |
++ if (osock) { |
5191 |
++ psock = smap_psock_sk(osock); |
5192 |
++ smap_list_map_remove(psock, &stab->sock_map[i]); |
5193 |
++ smap_release_sock(psock, osock); |
5194 |
+ } |
5195 |
++ raw_spin_unlock_bh(&stab->lock); |
5196 |
++ return 0; |
5197 |
++out_unlock: |
5198 |
++ smap_release_sock(psock, sock); |
5199 |
++ raw_spin_unlock_bh(&stab->lock); |
5200 |
+ out: |
5201 |
++ kfree(e); |
5202 |
+ return err; |
5203 |
+ } |
5204 |
+ |
5205 |
+@@ -2353,7 +2370,7 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops, |
5206 |
+ b = __select_bucket(htab, hash); |
5207 |
+ head = &b->head; |
5208 |
+ |
5209 |
+- err = __sock_map_ctx_update_elem(map, progs, sock, NULL, key); |
5210 |
++ err = __sock_map_ctx_update_elem(map, progs, sock, key); |
5211 |
+ if (err) |
5212 |
+ goto err; |
5213 |
+ |
5214 |
+@@ -2379,8 +2396,7 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops, |
5215 |
+ } |
5216 |
+ |
5217 |
+ rcu_assign_pointer(e->hash_link, l_new); |
5218 |
+- rcu_assign_pointer(e->htab, |
5219 |
+- container_of(map, struct bpf_htab, map)); |
5220 |
++ e->map = map; |
5221 |
+ spin_lock_bh(&psock->maps_lock); |
5222 |
+ list_add_tail(&e->list, &psock->maps); |
5223 |
+ spin_unlock_bh(&psock->maps_lock); |
5224 |
+diff --git a/kernel/fork.c b/kernel/fork.c |
5225 |
+index 1b27babc4c78..8ed48ca2cc43 100644 |
5226 |
+--- a/kernel/fork.c |
5227 |
++++ b/kernel/fork.c |
5228 |
+@@ -549,8 +549,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, |
5229 |
+ goto out; |
5230 |
+ } |
5231 |
+ /* a new mm has just been created */ |
5232 |
+- arch_dup_mmap(oldmm, mm); |
5233 |
+- retval = 0; |
5234 |
++ retval = arch_dup_mmap(oldmm, mm); |
5235 |
+ out: |
5236 |
+ up_write(&mm->mmap_sem); |
5237 |
+ flush_tlb_mm(oldmm); |
5238 |
+@@ -1417,7 +1416,9 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) |
5239 |
+ return -ENOMEM; |
5240 |
+ |
5241 |
+ atomic_set(&sig->count, 1); |
5242 |
++ spin_lock_irq(¤t->sighand->siglock); |
5243 |
+ memcpy(sig->action, current->sighand->action, sizeof(sig->action)); |
5244 |
++ spin_unlock_irq(¤t->sighand->siglock); |
5245 |
+ return 0; |
5246 |
+ } |
5247 |
+ |
5248 |
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c |
5249 |
+index 5f78c6e41796..0280deac392e 100644 |
5250 |
+--- a/kernel/workqueue.c |
5251 |
++++ b/kernel/workqueue.c |
5252 |
+@@ -2652,6 +2652,9 @@ void flush_workqueue(struct workqueue_struct *wq) |
5253 |
+ if (WARN_ON(!wq_online)) |
5254 |
+ return; |
5255 |
+ |
5256 |
++ lock_map_acquire(&wq->lockdep_map); |
5257 |
++ lock_map_release(&wq->lockdep_map); |
5258 |
++ |
5259 |
+ mutex_lock(&wq->mutex); |
5260 |
+ |
5261 |
+ /* |
5262 |
+@@ -2843,7 +2846,8 @@ reflush: |
5263 |
+ } |
5264 |
+ EXPORT_SYMBOL_GPL(drain_workqueue); |
5265 |
+ |
5266 |
+-static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr) |
5267 |
++static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, |
5268 |
++ bool from_cancel) |
5269 |
+ { |
5270 |
+ struct worker *worker = NULL; |
5271 |
+ struct worker_pool *pool; |
5272 |
+@@ -2885,7 +2889,8 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr) |
5273 |
+ * workqueues the deadlock happens when the rescuer stalls, blocking |
5274 |
+ * forward progress. |
5275 |
+ */ |
5276 |
+- if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer) { |
5277 |
++ if (!from_cancel && |
5278 |
++ (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) { |
5279 |
+ lock_map_acquire(&pwq->wq->lockdep_map); |
5280 |
+ lock_map_release(&pwq->wq->lockdep_map); |
5281 |
+ } |
5282 |
+@@ -2896,6 +2901,27 @@ already_gone: |
5283 |
+ return false; |
5284 |
+ } |
5285 |
+ |
5286 |
++static bool __flush_work(struct work_struct *work, bool from_cancel) |
5287 |
++{ |
5288 |
++ struct wq_barrier barr; |
5289 |
++ |
5290 |
++ if (WARN_ON(!wq_online)) |
5291 |
++ return false; |
5292 |
++ |
5293 |
++ if (!from_cancel) { |
5294 |
++ lock_map_acquire(&work->lockdep_map); |
5295 |
++ lock_map_release(&work->lockdep_map); |
5296 |
++ } |
5297 |
++ |
5298 |
++ if (start_flush_work(work, &barr, from_cancel)) { |
5299 |
++ wait_for_completion(&barr.done); |
5300 |
++ destroy_work_on_stack(&barr.work); |
5301 |
++ return true; |
5302 |
++ } else { |
5303 |
++ return false; |
5304 |
++ } |
5305 |
++} |
5306 |
++ |
5307 |
+ /** |
5308 |
+ * flush_work - wait for a work to finish executing the last queueing instance |
5309 |
+ * @work: the work to flush |
5310 |
+@@ -2909,18 +2935,7 @@ already_gone: |
5311 |
+ */ |
5312 |
+ bool flush_work(struct work_struct *work) |
5313 |
+ { |
5314 |
+- struct wq_barrier barr; |
5315 |
+- |
5316 |
+- if (WARN_ON(!wq_online)) |
5317 |
+- return false; |
5318 |
+- |
5319 |
+- if (start_flush_work(work, &barr)) { |
5320 |
+- wait_for_completion(&barr.done); |
5321 |
+- destroy_work_on_stack(&barr.work); |
5322 |
+- return true; |
5323 |
+- } else { |
5324 |
+- return false; |
5325 |
+- } |
5326 |
++ return __flush_work(work, false); |
5327 |
+ } |
5328 |
+ EXPORT_SYMBOL_GPL(flush_work); |
5329 |
+ |
5330 |
+@@ -2986,7 +3001,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) |
5331 |
+ * isn't executing. |
5332 |
+ */ |
5333 |
+ if (wq_online) |
5334 |
+- flush_work(work); |
5335 |
++ __flush_work(work, true); |
5336 |
+ |
5337 |
+ clear_work_data(work); |
5338 |
+ |
5339 |
+diff --git a/lib/debugobjects.c b/lib/debugobjects.c |
5340 |
+index 994be4805cec..24c1df0d7466 100644 |
5341 |
+--- a/lib/debugobjects.c |
5342 |
++++ b/lib/debugobjects.c |
5343 |
+@@ -360,9 +360,12 @@ static void debug_object_is_on_stack(void *addr, int onstack) |
5344 |
+ |
5345 |
+ limit++; |
5346 |
+ if (is_on_stack) |
5347 |
+- pr_warn("object is on stack, but not annotated\n"); |
5348 |
++ pr_warn("object %p is on stack %p, but NOT annotated.\n", addr, |
5349 |
++ task_stack_page(current)); |
5350 |
+ else |
5351 |
+- pr_warn("object is not on stack, but annotated\n"); |
5352 |
++ pr_warn("object %p is NOT on stack %p, but annotated.\n", addr, |
5353 |
++ task_stack_page(current)); |
5354 |
++ |
5355 |
+ WARN_ON(1); |
5356 |
+ } |
5357 |
+ |
5358 |
+diff --git a/mm/Kconfig b/mm/Kconfig |
5359 |
+index ce95491abd6a..94af022b7f3d 100644 |
5360 |
+--- a/mm/Kconfig |
5361 |
++++ b/mm/Kconfig |
5362 |
+@@ -635,7 +635,7 @@ config DEFERRED_STRUCT_PAGE_INIT |
5363 |
+ bool "Defer initialisation of struct pages to kthreads" |
5364 |
+ default n |
5365 |
+ depends on NO_BOOTMEM |
5366 |
+- depends on !FLATMEM |
5367 |
++ depends on SPARSEMEM |
5368 |
+ depends on !NEED_PER_CPU_KM |
5369 |
+ help |
5370 |
+ Ordinarily all struct pages are initialised during early boot in a |
5371 |
+diff --git a/mm/fadvise.c b/mm/fadvise.c |
5372 |
+index afa41491d324..2d8376e3c640 100644 |
5373 |
+--- a/mm/fadvise.c |
5374 |
++++ b/mm/fadvise.c |
5375 |
+@@ -72,8 +72,12 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) |
5376 |
+ goto out; |
5377 |
+ } |
5378 |
+ |
5379 |
+- /* Careful about overflows. Len == 0 means "as much as possible" */ |
5380 |
+- endbyte = offset + len; |
5381 |
++ /* |
5382 |
++ * Careful about overflows. Len == 0 means "as much as possible". Use |
5383 |
++ * unsigned math because signed overflows are undefined and UBSan |
5384 |
++ * complains. |
5385 |
++ */ |
5386 |
++ endbyte = (u64)offset + (u64)len; |
5387 |
+ if (!len || endbyte < len) |
5388 |
+ endbyte = -1; |
5389 |
+ else |
5390 |
+diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c |
5391 |
+index ef456395645a..7fb60dd4be79 100644 |
5392 |
+--- a/net/9p/trans_fd.c |
5393 |
++++ b/net/9p/trans_fd.c |
5394 |
+@@ -199,15 +199,14 @@ static void p9_mux_poll_stop(struct p9_conn *m) |
5395 |
+ static void p9_conn_cancel(struct p9_conn *m, int err) |
5396 |
+ { |
5397 |
+ struct p9_req_t *req, *rtmp; |
5398 |
+- unsigned long flags; |
5399 |
+ LIST_HEAD(cancel_list); |
5400 |
+ |
5401 |
+ p9_debug(P9_DEBUG_ERROR, "mux %p err %d\n", m, err); |
5402 |
+ |
5403 |
+- spin_lock_irqsave(&m->client->lock, flags); |
5404 |
++ spin_lock(&m->client->lock); |
5405 |
+ |
5406 |
+ if (m->err) { |
5407 |
+- spin_unlock_irqrestore(&m->client->lock, flags); |
5408 |
++ spin_unlock(&m->client->lock); |
5409 |
+ return; |
5410 |
+ } |
5411 |
+ |
5412 |
+@@ -219,7 +218,6 @@ static void p9_conn_cancel(struct p9_conn *m, int err) |
5413 |
+ list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) { |
5414 |
+ list_move(&req->req_list, &cancel_list); |
5415 |
+ } |
5416 |
+- spin_unlock_irqrestore(&m->client->lock, flags); |
5417 |
+ |
5418 |
+ list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { |
5419 |
+ p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req); |
5420 |
+@@ -228,6 +226,7 @@ static void p9_conn_cancel(struct p9_conn *m, int err) |
5421 |
+ req->t_err = err; |
5422 |
+ p9_client_cb(m->client, req, REQ_STATUS_ERROR); |
5423 |
+ } |
5424 |
++ spin_unlock(&m->client->lock); |
5425 |
+ } |
5426 |
+ |
5427 |
+ static __poll_t |
5428 |
+@@ -375,8 +374,9 @@ static void p9_read_work(struct work_struct *work) |
5429 |
+ if (m->req->status != REQ_STATUS_ERROR) |
5430 |
+ status = REQ_STATUS_RCVD; |
5431 |
+ list_del(&m->req->req_list); |
5432 |
+- spin_unlock(&m->client->lock); |
5433 |
++ /* update req->status while holding client->lock */ |
5434 |
+ p9_client_cb(m->client, m->req, status); |
5435 |
++ spin_unlock(&m->client->lock); |
5436 |
+ m->rc.sdata = NULL; |
5437 |
+ m->rc.offset = 0; |
5438 |
+ m->rc.capacity = 0; |
5439 |
+diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c |
5440 |
+index 4c2da2513c8b..2dc1c293092b 100644 |
5441 |
+--- a/net/9p/trans_virtio.c |
5442 |
++++ b/net/9p/trans_virtio.c |
5443 |
+@@ -571,7 +571,7 @@ static int p9_virtio_probe(struct virtio_device *vdev) |
5444 |
+ chan->vq = virtio_find_single_vq(vdev, req_done, "requests"); |
5445 |
+ if (IS_ERR(chan->vq)) { |
5446 |
+ err = PTR_ERR(chan->vq); |
5447 |
+- goto out_free_vq; |
5448 |
++ goto out_free_chan; |
5449 |
+ } |
5450 |
+ chan->vq->vdev->priv = chan; |
5451 |
+ spin_lock_init(&chan->lock); |
5452 |
+@@ -624,6 +624,7 @@ out_free_tag: |
5453 |
+ kfree(tag); |
5454 |
+ out_free_vq: |
5455 |
+ vdev->config->del_vqs(vdev); |
5456 |
++out_free_chan: |
5457 |
+ kfree(chan); |
5458 |
+ fail: |
5459 |
+ return err; |
5460 |
+diff --git a/net/core/xdp.c b/net/core/xdp.c |
5461 |
+index 6771f1855b96..2657056130a4 100644 |
5462 |
+--- a/net/core/xdp.c |
5463 |
++++ b/net/core/xdp.c |
5464 |
+@@ -95,23 +95,15 @@ static void __xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq) |
5465 |
+ { |
5466 |
+ struct xdp_mem_allocator *xa; |
5467 |
+ int id = xdp_rxq->mem.id; |
5468 |
+- int err; |
5469 |
+ |
5470 |
+ if (id == 0) |
5471 |
+ return; |
5472 |
+ |
5473 |
+ mutex_lock(&mem_id_lock); |
5474 |
+ |
5475 |
+- xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params); |
5476 |
+- if (!xa) { |
5477 |
+- mutex_unlock(&mem_id_lock); |
5478 |
+- return; |
5479 |
+- } |
5480 |
+- |
5481 |
+- err = rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params); |
5482 |
+- WARN_ON(err); |
5483 |
+- |
5484 |
+- call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free); |
5485 |
++ xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params); |
5486 |
++ if (xa && !rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params)) |
5487 |
++ call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free); |
5488 |
+ |
5489 |
+ mutex_unlock(&mem_id_lock); |
5490 |
+ } |
5491 |
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c |
5492 |
+index 2d8efeecf619..055f4bbba86b 100644 |
5493 |
+--- a/net/ipv4/ip_gre.c |
5494 |
++++ b/net/ipv4/ip_gre.c |
5495 |
+@@ -1511,11 +1511,14 @@ nla_put_failure: |
5496 |
+ |
5497 |
+ static void erspan_setup(struct net_device *dev) |
5498 |
+ { |
5499 |
++ struct ip_tunnel *t = netdev_priv(dev); |
5500 |
++ |
5501 |
+ ether_setup(dev); |
5502 |
+ dev->netdev_ops = &erspan_netdev_ops; |
5503 |
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING; |
5504 |
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; |
5505 |
+ ip_tunnel_setup(dev, erspan_net_id); |
5506 |
++ t->erspan_ver = 1; |
5507 |
+ } |
5508 |
+ |
5509 |
+ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = { |
5510 |
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c |
5511 |
+index 3b2711e33e4c..488b201851d7 100644 |
5512 |
+--- a/net/ipv4/tcp_ipv4.c |
5513 |
++++ b/net/ipv4/tcp_ipv4.c |
5514 |
+@@ -2516,6 +2516,12 @@ static int __net_init tcp_sk_init(struct net *net) |
5515 |
+ if (res) |
5516 |
+ goto fail; |
5517 |
+ sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); |
5518 |
++ |
5519 |
++ /* Please enforce IP_DF and IPID==0 for RST and |
5520 |
++ * ACK sent in SYN-RECV and TIME-WAIT state. |
5521 |
++ */ |
5522 |
++ inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO; |
5523 |
++ |
5524 |
+ *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk; |
5525 |
+ } |
5526 |
+ |
5527 |
+diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c |
5528 |
+index 1dda1341a223..b690132f5da2 100644 |
5529 |
+--- a/net/ipv4/tcp_minisocks.c |
5530 |
++++ b/net/ipv4/tcp_minisocks.c |
5531 |
+@@ -184,8 +184,9 @@ kill: |
5532 |
+ inet_twsk_deschedule_put(tw); |
5533 |
+ return TCP_TW_SUCCESS; |
5534 |
+ } |
5535 |
++ } else { |
5536 |
++ inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); |
5537 |
+ } |
5538 |
+- inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); |
5539 |
+ |
5540 |
+ if (tmp_opt.saw_tstamp) { |
5541 |
+ tcptw->tw_ts_recent = tmp_opt.rcv_tsval; |
5542 |
+diff --git a/net/ipv4/tcp_ulp.c b/net/ipv4/tcp_ulp.c |
5543 |
+index 622caa4039e0..a5995bb2eaca 100644 |
5544 |
+--- a/net/ipv4/tcp_ulp.c |
5545 |
++++ b/net/ipv4/tcp_ulp.c |
5546 |
+@@ -51,7 +51,7 @@ static const struct tcp_ulp_ops *__tcp_ulp_find_autoload(const char *name) |
5547 |
+ #ifdef CONFIG_MODULES |
5548 |
+ if (!ulp && capable(CAP_NET_ADMIN)) { |
5549 |
+ rcu_read_unlock(); |
5550 |
+- request_module("%s", name); |
5551 |
++ request_module("tcp-ulp-%s", name); |
5552 |
+ rcu_read_lock(); |
5553 |
+ ulp = tcp_ulp_find(name); |
5554 |
+ } |
5555 |
+@@ -129,6 +129,8 @@ void tcp_cleanup_ulp(struct sock *sk) |
5556 |
+ if (icsk->icsk_ulp_ops->release) |
5557 |
+ icsk->icsk_ulp_ops->release(sk); |
5558 |
+ module_put(icsk->icsk_ulp_ops->owner); |
5559 |
++ |
5560 |
++ icsk->icsk_ulp_ops = NULL; |
5561 |
+ } |
5562 |
+ |
5563 |
+ /* Change upper layer protocol for socket */ |
5564 |
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c |
5565 |
+index d212738e9d10..5516f55e214b 100644 |
5566 |
+--- a/net/ipv6/ip6_fib.c |
5567 |
++++ b/net/ipv6/ip6_fib.c |
5568 |
+@@ -198,6 +198,8 @@ void fib6_info_destroy_rcu(struct rcu_head *head) |
5569 |
+ } |
5570 |
+ } |
5571 |
+ |
5572 |
++ lwtstate_put(f6i->fib6_nh.nh_lwtstate); |
5573 |
++ |
5574 |
+ if (f6i->fib6_nh.nh_dev) |
5575 |
+ dev_put(f6i->fib6_nh.nh_dev); |
5576 |
+ |
5577 |
+@@ -987,7 +989,10 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt, |
5578 |
+ fib6_clean_expires(iter); |
5579 |
+ else |
5580 |
+ fib6_set_expires(iter, rt->expires); |
5581 |
+- fib6_metric_set(iter, RTAX_MTU, rt->fib6_pmtu); |
5582 |
++ |
5583 |
++ if (rt->fib6_pmtu) |
5584 |
++ fib6_metric_set(iter, RTAX_MTU, |
5585 |
++ rt->fib6_pmtu); |
5586 |
+ return -EEXIST; |
5587 |
+ } |
5588 |
+ /* If we have the same destination and the same metric, |
5589 |
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c |
5590 |
+index cd2cfb04e5d8..7ec997fcbc43 100644 |
5591 |
+--- a/net/ipv6/ip6_gre.c |
5592 |
++++ b/net/ipv6/ip6_gre.c |
5593 |
+@@ -1776,6 +1776,7 @@ static void ip6gre_netlink_parms(struct nlattr *data[], |
5594 |
+ if (data[IFLA_GRE_COLLECT_METADATA]) |
5595 |
+ parms->collect_md = true; |
5596 |
+ |
5597 |
++ parms->erspan_ver = 1; |
5598 |
+ if (data[IFLA_GRE_ERSPAN_VER]) |
5599 |
+ parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); |
5600 |
+ |
5601 |
+diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c |
5602 |
+index c72ae3a4fe09..c31a7c4a9249 100644 |
5603 |
+--- a/net/ipv6/ip6_vti.c |
5604 |
++++ b/net/ipv6/ip6_vti.c |
5605 |
+@@ -481,7 +481,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) |
5606 |
+ } |
5607 |
+ |
5608 |
+ mtu = dst_mtu(dst); |
5609 |
+- if (!skb->ignore_df && skb->len > mtu) { |
5610 |
++ if (skb->len > mtu) { |
5611 |
+ skb_dst_update_pmtu(skb, mtu); |
5612 |
+ |
5613 |
+ if (skb->protocol == htons(ETH_P_IPV6)) { |
5614 |
+@@ -1102,7 +1102,8 @@ static void __net_exit vti6_destroy_tunnels(struct vti6_net *ip6n, |
5615 |
+ } |
5616 |
+ |
5617 |
+ t = rtnl_dereference(ip6n->tnls_wc[0]); |
5618 |
+- unregister_netdevice_queue(t->dev, list); |
5619 |
++ if (t) |
5620 |
++ unregister_netdevice_queue(t->dev, list); |
5621 |
+ } |
5622 |
+ |
5623 |
+ static int __net_init vti6_init_net(struct net *net) |
5624 |
+@@ -1114,6 +1115,8 @@ static int __net_init vti6_init_net(struct net *net) |
5625 |
+ ip6n->tnls[0] = ip6n->tnls_wc; |
5626 |
+ ip6n->tnls[1] = ip6n->tnls_r_l; |
5627 |
+ |
5628 |
++ if (!net_has_fallback_tunnels(net)) |
5629 |
++ return 0; |
5630 |
+ err = -ENOMEM; |
5631 |
+ ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6_vti0", |
5632 |
+ NET_NAME_UNKNOWN, vti6_dev_setup); |
5633 |
+diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c |
5634 |
+index 0fe61ede77c6..c3c6b09acdc4 100644 |
5635 |
+--- a/net/ipv6/netfilter/ip6t_rpfilter.c |
5636 |
++++ b/net/ipv6/netfilter/ip6t_rpfilter.c |
5637 |
+@@ -26,6 +26,12 @@ static bool rpfilter_addr_unicast(const struct in6_addr *addr) |
5638 |
+ return addr_type & IPV6_ADDR_UNICAST; |
5639 |
+ } |
5640 |
+ |
5641 |
++static bool rpfilter_addr_linklocal(const struct in6_addr *addr) |
5642 |
++{ |
5643 |
++ int addr_type = ipv6_addr_type(addr); |
5644 |
++ return addr_type & IPV6_ADDR_LINKLOCAL; |
5645 |
++} |
5646 |
++ |
5647 |
+ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb, |
5648 |
+ const struct net_device *dev, u8 flags) |
5649 |
+ { |
5650 |
+@@ -48,7 +54,11 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb, |
5651 |
+ } |
5652 |
+ |
5653 |
+ fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0; |
5654 |
+- if ((flags & XT_RPFILTER_LOOSE) == 0) |
5655 |
++ |
5656 |
++ if (rpfilter_addr_linklocal(&iph->saddr)) { |
5657 |
++ lookup_flags |= RT6_LOOKUP_F_IFACE; |
5658 |
++ fl6.flowi6_oif = dev->ifindex; |
5659 |
++ } else if ((flags & XT_RPFILTER_LOOSE) == 0) |
5660 |
+ fl6.flowi6_oif = dev->ifindex; |
5661 |
+ |
5662 |
+ rt = (void *)ip6_route_lookup(net, &fl6, skb, lookup_flags); |
5663 |
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c |
5664 |
+index 7208c16302f6..18e00ce1719a 100644 |
5665 |
+--- a/net/ipv6/route.c |
5666 |
++++ b/net/ipv6/route.c |
5667 |
+@@ -956,7 +956,7 @@ static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort) |
5668 |
+ rt->dst.error = 0; |
5669 |
+ rt->dst.output = ip6_output; |
5670 |
+ |
5671 |
+- if (ort->fib6_type == RTN_LOCAL) { |
5672 |
++ if (ort->fib6_type == RTN_LOCAL || ort->fib6_type == RTN_ANYCAST) { |
5673 |
+ rt->dst.input = ip6_input; |
5674 |
+ } else if (ipv6_addr_type(&ort->fib6_dst.addr) & IPV6_ADDR_MULTICAST) { |
5675 |
+ rt->dst.input = ip6_mc_input; |
5676 |
+@@ -996,7 +996,6 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort) |
5677 |
+ rt->rt6i_src = ort->fib6_src; |
5678 |
+ #endif |
5679 |
+ rt->rt6i_prefsrc = ort->fib6_prefsrc; |
5680 |
+- rt->dst.lwtstate = lwtstate_get(ort->fib6_nh.nh_lwtstate); |
5681 |
+ } |
5682 |
+ |
5683 |
+ static struct fib6_node* fib6_backtrack(struct fib6_node *fn, |
5684 |
+diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c |
5685 |
+index 0679dd101e72..7ca926a03b81 100644 |
5686 |
+--- a/net/netfilter/ipvs/ip_vs_core.c |
5687 |
++++ b/net/netfilter/ipvs/ip_vs_core.c |
5688 |
+@@ -1972,13 +1972,20 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int |
5689 |
+ if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) { |
5690 |
+ /* the destination server is not available */ |
5691 |
+ |
5692 |
+- if (sysctl_expire_nodest_conn(ipvs)) { |
5693 |
++ __u32 flags = cp->flags; |
5694 |
++ |
5695 |
++ /* when timer already started, silently drop the packet.*/ |
5696 |
++ if (timer_pending(&cp->timer)) |
5697 |
++ __ip_vs_conn_put(cp); |
5698 |
++ else |
5699 |
++ ip_vs_conn_put(cp); |
5700 |
++ |
5701 |
++ if (sysctl_expire_nodest_conn(ipvs) && |
5702 |
++ !(flags & IP_VS_CONN_F_ONE_PACKET)) { |
5703 |
+ /* try to expire the connection immediately */ |
5704 |
+ ip_vs_conn_expire_now(cp); |
5705 |
+ } |
5706 |
+- /* don't restart its timer, and silently |
5707 |
+- drop the packet. */ |
5708 |
+- __ip_vs_conn_put(cp); |
5709 |
++ |
5710 |
+ return NF_DROP; |
5711 |
+ } |
5712 |
+ |
5713 |
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c |
5714 |
+index 20a2e37c76d1..e952eedf44b4 100644 |
5715 |
+--- a/net/netfilter/nf_conntrack_netlink.c |
5716 |
++++ b/net/netfilter/nf_conntrack_netlink.c |
5717 |
+@@ -821,6 +821,21 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[]) |
5718 |
+ #endif |
5719 |
+ } |
5720 |
+ |
5721 |
++static int ctnetlink_start(struct netlink_callback *cb) |
5722 |
++{ |
5723 |
++ const struct nlattr * const *cda = cb->data; |
5724 |
++ struct ctnetlink_filter *filter = NULL; |
5725 |
++ |
5726 |
++ if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) { |
5727 |
++ filter = ctnetlink_alloc_filter(cda); |
5728 |
++ if (IS_ERR(filter)) |
5729 |
++ return PTR_ERR(filter); |
5730 |
++ } |
5731 |
++ |
5732 |
++ cb->data = filter; |
5733 |
++ return 0; |
5734 |
++} |
5735 |
++ |
5736 |
+ static int ctnetlink_filter_match(struct nf_conn *ct, void *data) |
5737 |
+ { |
5738 |
+ struct ctnetlink_filter *filter = data; |
5739 |
+@@ -1240,19 +1255,12 @@ static int ctnetlink_get_conntrack(struct net *net, struct sock *ctnl, |
5740 |
+ |
5741 |
+ if (nlh->nlmsg_flags & NLM_F_DUMP) { |
5742 |
+ struct netlink_dump_control c = { |
5743 |
++ .start = ctnetlink_start, |
5744 |
+ .dump = ctnetlink_dump_table, |
5745 |
+ .done = ctnetlink_done, |
5746 |
++ .data = (void *)cda, |
5747 |
+ }; |
5748 |
+ |
5749 |
+- if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) { |
5750 |
+- struct ctnetlink_filter *filter; |
5751 |
+- |
5752 |
+- filter = ctnetlink_alloc_filter(cda); |
5753 |
+- if (IS_ERR(filter)) |
5754 |
+- return PTR_ERR(filter); |
5755 |
+- |
5756 |
+- c.data = filter; |
5757 |
+- } |
5758 |
+ return netlink_dump_start(ctnl, skb, nlh, &c); |
5759 |
+ } |
5760 |
+ |
5761 |
+diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c |
5762 |
+index a0e5adf0b3b6..8fa8bf7c48e6 100644 |
5763 |
+--- a/net/netfilter/nfnetlink_acct.c |
5764 |
++++ b/net/netfilter/nfnetlink_acct.c |
5765 |
+@@ -238,29 +238,33 @@ static const struct nla_policy filter_policy[NFACCT_FILTER_MAX + 1] = { |
5766 |
+ [NFACCT_FILTER_VALUE] = { .type = NLA_U32 }, |
5767 |
+ }; |
5768 |
+ |
5769 |
+-static struct nfacct_filter * |
5770 |
+-nfacct_filter_alloc(const struct nlattr * const attr) |
5771 |
++static int nfnl_acct_start(struct netlink_callback *cb) |
5772 |
+ { |
5773 |
+- struct nfacct_filter *filter; |
5774 |
++ const struct nlattr *const attr = cb->data; |
5775 |
+ struct nlattr *tb[NFACCT_FILTER_MAX + 1]; |
5776 |
++ struct nfacct_filter *filter; |
5777 |
+ int err; |
5778 |
+ |
5779 |
++ if (!attr) |
5780 |
++ return 0; |
5781 |
++ |
5782 |
+ err = nla_parse_nested(tb, NFACCT_FILTER_MAX, attr, filter_policy, |
5783 |
+ NULL); |
5784 |
+ if (err < 0) |
5785 |
+- return ERR_PTR(err); |
5786 |
++ return err; |
5787 |
+ |
5788 |
+ if (!tb[NFACCT_FILTER_MASK] || !tb[NFACCT_FILTER_VALUE]) |
5789 |
+- return ERR_PTR(-EINVAL); |
5790 |
++ return -EINVAL; |
5791 |
+ |
5792 |
+ filter = kzalloc(sizeof(struct nfacct_filter), GFP_KERNEL); |
5793 |
+ if (!filter) |
5794 |
+- return ERR_PTR(-ENOMEM); |
5795 |
++ return -ENOMEM; |
5796 |
+ |
5797 |
+ filter->mask = ntohl(nla_get_be32(tb[NFACCT_FILTER_MASK])); |
5798 |
+ filter->value = ntohl(nla_get_be32(tb[NFACCT_FILTER_VALUE])); |
5799 |
++ cb->data = filter; |
5800 |
+ |
5801 |
+- return filter; |
5802 |
++ return 0; |
5803 |
+ } |
5804 |
+ |
5805 |
+ static int nfnl_acct_get(struct net *net, struct sock *nfnl, |
5806 |
+@@ -275,18 +279,11 @@ static int nfnl_acct_get(struct net *net, struct sock *nfnl, |
5807 |
+ if (nlh->nlmsg_flags & NLM_F_DUMP) { |
5808 |
+ struct netlink_dump_control c = { |
5809 |
+ .dump = nfnl_acct_dump, |
5810 |
++ .start = nfnl_acct_start, |
5811 |
+ .done = nfnl_acct_done, |
5812 |
++ .data = (void *)tb[NFACCT_FILTER], |
5813 |
+ }; |
5814 |
+ |
5815 |
+- if (tb[NFACCT_FILTER]) { |
5816 |
+- struct nfacct_filter *filter; |
5817 |
+- |
5818 |
+- filter = nfacct_filter_alloc(tb[NFACCT_FILTER]); |
5819 |
+- if (IS_ERR(filter)) |
5820 |
+- return PTR_ERR(filter); |
5821 |
+- |
5822 |
+- c.data = filter; |
5823 |
+- } |
5824 |
+ return netlink_dump_start(nfnl, skb, nlh, &c); |
5825 |
+ } |
5826 |
+ |
5827 |
+diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c |
5828 |
+index d0d8397c9588..aecadd471e1d 100644 |
5829 |
+--- a/net/netfilter/x_tables.c |
5830 |
++++ b/net/netfilter/x_tables.c |
5831 |
+@@ -1178,12 +1178,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size) |
5832 |
+ if (sz < sizeof(*info) || sz >= XT_MAX_TABLE_SIZE) |
5833 |
+ return NULL; |
5834 |
+ |
5835 |
+- /* __GFP_NORETRY is not fully supported by kvmalloc but it should |
5836 |
+- * work reasonably well if sz is too large and bail out rather |
5837 |
+- * than shoot all processes down before realizing there is nothing |
5838 |
+- * more to reclaim. |
5839 |
+- */ |
5840 |
+- info = kvmalloc(sz, GFP_KERNEL | __GFP_NORETRY); |
5841 |
++ info = kvmalloc(sz, GFP_KERNEL_ACCOUNT); |
5842 |
+ if (!info) |
5843 |
+ return NULL; |
5844 |
+ |
5845 |
+diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c |
5846 |
+index d152e48ea371..8596eed6d9a8 100644 |
5847 |
+--- a/net/rds/ib_frmr.c |
5848 |
++++ b/net/rds/ib_frmr.c |
5849 |
+@@ -61,6 +61,7 @@ static struct rds_ib_mr *rds_ib_alloc_frmr(struct rds_ib_device *rds_ibdev, |
5850 |
+ pool->fmr_attr.max_pages); |
5851 |
+ if (IS_ERR(frmr->mr)) { |
5852 |
+ pr_warn("RDS/IB: %s failed to allocate MR", __func__); |
5853 |
++ err = PTR_ERR(frmr->mr); |
5854 |
+ goto out_no_cigar; |
5855 |
+ } |
5856 |
+ |
5857 |
+diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c |
5858 |
+index 20d7d36b2fc9..005cb21348c9 100644 |
5859 |
+--- a/net/sched/act_ife.c |
5860 |
++++ b/net/sched/act_ife.c |
5861 |
+@@ -265,10 +265,8 @@ static const char *ife_meta_id2name(u32 metaid) |
5862 |
+ #endif |
5863 |
+ |
5864 |
+ /* called when adding new meta information |
5865 |
+- * under ife->tcf_lock for existing action |
5866 |
+ */ |
5867 |
+-static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid, |
5868 |
+- void *val, int len, bool exists) |
5869 |
++static int load_metaops_and_vet(u32 metaid, void *val, int len) |
5870 |
+ { |
5871 |
+ struct tcf_meta_ops *ops = find_ife_oplist(metaid); |
5872 |
+ int ret = 0; |
5873 |
+@@ -276,13 +274,9 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid, |
5874 |
+ if (!ops) { |
5875 |
+ ret = -ENOENT; |
5876 |
+ #ifdef CONFIG_MODULES |
5877 |
+- if (exists) |
5878 |
+- spin_unlock_bh(&ife->tcf_lock); |
5879 |
+ rtnl_unlock(); |
5880 |
+ request_module("ife-meta-%s", ife_meta_id2name(metaid)); |
5881 |
+ rtnl_lock(); |
5882 |
+- if (exists) |
5883 |
+- spin_lock_bh(&ife->tcf_lock); |
5884 |
+ ops = find_ife_oplist(metaid); |
5885 |
+ #endif |
5886 |
+ } |
5887 |
+@@ -299,24 +293,17 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid, |
5888 |
+ } |
5889 |
+ |
5890 |
+ /* called when adding new meta information |
5891 |
+- * under ife->tcf_lock for existing action |
5892 |
+ */ |
5893 |
+-static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, |
5894 |
+- int len, bool atomic) |
5895 |
++static int __add_metainfo(const struct tcf_meta_ops *ops, |
5896 |
++ struct tcf_ife_info *ife, u32 metaid, void *metaval, |
5897 |
++ int len, bool atomic, bool exists) |
5898 |
+ { |
5899 |
+ struct tcf_meta_info *mi = NULL; |
5900 |
+- struct tcf_meta_ops *ops = find_ife_oplist(metaid); |
5901 |
+ int ret = 0; |
5902 |
+ |
5903 |
+- if (!ops) |
5904 |
+- return -ENOENT; |
5905 |
+- |
5906 |
+ mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL); |
5907 |
+- if (!mi) { |
5908 |
+- /*put back what find_ife_oplist took */ |
5909 |
+- module_put(ops->owner); |
5910 |
++ if (!mi) |
5911 |
+ return -ENOMEM; |
5912 |
+- } |
5913 |
+ |
5914 |
+ mi->metaid = metaid; |
5915 |
+ mi->ops = ops; |
5916 |
+@@ -324,17 +311,49 @@ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, |
5917 |
+ ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL); |
5918 |
+ if (ret != 0) { |
5919 |
+ kfree(mi); |
5920 |
+- module_put(ops->owner); |
5921 |
+ return ret; |
5922 |
+ } |
5923 |
+ } |
5924 |
+ |
5925 |
++ if (exists) |
5926 |
++ spin_lock_bh(&ife->tcf_lock); |
5927 |
+ list_add_tail(&mi->metalist, &ife->metalist); |
5928 |
++ if (exists) |
5929 |
++ spin_unlock_bh(&ife->tcf_lock); |
5930 |
+ |
5931 |
+ return ret; |
5932 |
+ } |
5933 |
+ |
5934 |
+-static int use_all_metadata(struct tcf_ife_info *ife) |
5935 |
++static int add_metainfo_and_get_ops(const struct tcf_meta_ops *ops, |
5936 |
++ struct tcf_ife_info *ife, u32 metaid, |
5937 |
++ bool exists) |
5938 |
++{ |
5939 |
++ int ret; |
5940 |
++ |
5941 |
++ if (!try_module_get(ops->owner)) |
5942 |
++ return -ENOENT; |
5943 |
++ ret = __add_metainfo(ops, ife, metaid, NULL, 0, true, exists); |
5944 |
++ if (ret) |
5945 |
++ module_put(ops->owner); |
5946 |
++ return ret; |
5947 |
++} |
5948 |
++ |
5949 |
++static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, |
5950 |
++ int len, bool exists) |
5951 |
++{ |
5952 |
++ const struct tcf_meta_ops *ops = find_ife_oplist(metaid); |
5953 |
++ int ret; |
5954 |
++ |
5955 |
++ if (!ops) |
5956 |
++ return -ENOENT; |
5957 |
++ ret = __add_metainfo(ops, ife, metaid, metaval, len, false, exists); |
5958 |
++ if (ret) |
5959 |
++ /*put back what find_ife_oplist took */ |
5960 |
++ module_put(ops->owner); |
5961 |
++ return ret; |
5962 |
++} |
5963 |
++ |
5964 |
++static int use_all_metadata(struct tcf_ife_info *ife, bool exists) |
5965 |
+ { |
5966 |
+ struct tcf_meta_ops *o; |
5967 |
+ int rc = 0; |
5968 |
+@@ -342,7 +361,7 @@ static int use_all_metadata(struct tcf_ife_info *ife) |
5969 |
+ |
5970 |
+ read_lock(&ife_mod_lock); |
5971 |
+ list_for_each_entry(o, &ifeoplist, list) { |
5972 |
+- rc = add_metainfo(ife, o->metaid, NULL, 0, true); |
5973 |
++ rc = add_metainfo_and_get_ops(o, ife, o->metaid, exists); |
5974 |
+ if (rc == 0) |
5975 |
+ installed += 1; |
5976 |
+ } |
5977 |
+@@ -393,7 +412,6 @@ static void _tcf_ife_cleanup(struct tc_action *a) |
5978 |
+ struct tcf_meta_info *e, *n; |
5979 |
+ |
5980 |
+ list_for_each_entry_safe(e, n, &ife->metalist, metalist) { |
5981 |
+- module_put(e->ops->owner); |
5982 |
+ list_del(&e->metalist); |
5983 |
+ if (e->metaval) { |
5984 |
+ if (e->ops->release) |
5985 |
+@@ -401,6 +419,7 @@ static void _tcf_ife_cleanup(struct tc_action *a) |
5986 |
+ else |
5987 |
+ kfree(e->metaval); |
5988 |
+ } |
5989 |
++ module_put(e->ops->owner); |
5990 |
+ kfree(e); |
5991 |
+ } |
5992 |
+ } |
5993 |
+@@ -419,7 +438,6 @@ static void tcf_ife_cleanup(struct tc_action *a) |
5994 |
+ kfree_rcu(p, rcu); |
5995 |
+ } |
5996 |
+ |
5997 |
+-/* under ife->tcf_lock for existing action */ |
5998 |
+ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, |
5999 |
+ bool exists) |
6000 |
+ { |
6001 |
+@@ -433,7 +451,7 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, |
6002 |
+ val = nla_data(tb[i]); |
6003 |
+ len = nla_len(tb[i]); |
6004 |
+ |
6005 |
+- rc = load_metaops_and_vet(ife, i, val, len, exists); |
6006 |
++ rc = load_metaops_and_vet(i, val, len); |
6007 |
+ if (rc != 0) |
6008 |
+ return rc; |
6009 |
+ |
6010 |
+@@ -531,8 +549,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, |
6011 |
+ p->eth_type = ife_type; |
6012 |
+ } |
6013 |
+ |
6014 |
+- if (exists) |
6015 |
+- spin_lock_bh(&ife->tcf_lock); |
6016 |
+ |
6017 |
+ if (ret == ACT_P_CREATED) |
6018 |
+ INIT_LIST_HEAD(&ife->metalist); |
6019 |
+@@ -544,9 +560,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, |
6020 |
+ metadata_parse_err: |
6021 |
+ if (ret == ACT_P_CREATED) |
6022 |
+ tcf_idr_release(*a, bind); |
6023 |
+- |
6024 |
+- if (exists) |
6025 |
+- spin_unlock_bh(&ife->tcf_lock); |
6026 |
+ kfree(p); |
6027 |
+ return err; |
6028 |
+ } |
6029 |
+@@ -561,18 +574,17 @@ metadata_parse_err: |
6030 |
+ * as we can. You better have at least one else we are |
6031 |
+ * going to bail out |
6032 |
+ */ |
6033 |
+- err = use_all_metadata(ife); |
6034 |
++ err = use_all_metadata(ife, exists); |
6035 |
+ if (err) { |
6036 |
+ if (ret == ACT_P_CREATED) |
6037 |
+ tcf_idr_release(*a, bind); |
6038 |
+- |
6039 |
+- if (exists) |
6040 |
+- spin_unlock_bh(&ife->tcf_lock); |
6041 |
+ kfree(p); |
6042 |
+ return err; |
6043 |
+ } |
6044 |
+ } |
6045 |
+ |
6046 |
++ if (exists) |
6047 |
++ spin_lock_bh(&ife->tcf_lock); |
6048 |
+ ife->tcf_action = parm->action; |
6049 |
+ if (exists) |
6050 |
+ spin_unlock_bh(&ife->tcf_lock); |
6051 |
+diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c |
6052 |
+index 8a925c72db5f..bad475c87688 100644 |
6053 |
+--- a/net/sched/act_pedit.c |
6054 |
++++ b/net/sched/act_pedit.c |
6055 |
+@@ -109,16 +109,18 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb, |
6056 |
+ { |
6057 |
+ struct nlattr *keys_start = nla_nest_start(skb, TCA_PEDIT_KEYS_EX); |
6058 |
+ |
6059 |
++ if (!keys_start) |
6060 |
++ goto nla_failure; |
6061 |
+ for (; n > 0; n--) { |
6062 |
+ struct nlattr *key_start; |
6063 |
+ |
6064 |
+ key_start = nla_nest_start(skb, TCA_PEDIT_KEY_EX); |
6065 |
++ if (!key_start) |
6066 |
++ goto nla_failure; |
6067 |
+ |
6068 |
+ if (nla_put_u16(skb, TCA_PEDIT_KEY_EX_HTYPE, keys_ex->htype) || |
6069 |
+- nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd)) { |
6070 |
+- nlmsg_trim(skb, keys_start); |
6071 |
+- return -EINVAL; |
6072 |
+- } |
6073 |
++ nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd)) |
6074 |
++ goto nla_failure; |
6075 |
+ |
6076 |
+ nla_nest_end(skb, key_start); |
6077 |
+ |
6078 |
+@@ -128,6 +130,9 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb, |
6079 |
+ nla_nest_end(skb, keys_start); |
6080 |
+ |
6081 |
+ return 0; |
6082 |
++nla_failure: |
6083 |
++ nla_nest_cancel(skb, keys_start); |
6084 |
++ return -EINVAL; |
6085 |
+ } |
6086 |
+ |
6087 |
+ static int tcf_pedit_init(struct net *net, struct nlattr *nla, |
6088 |
+@@ -395,7 +400,10 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a, |
6089 |
+ opt->bindcnt = p->tcf_bindcnt - bind; |
6090 |
+ |
6091 |
+ if (p->tcfp_keys_ex) { |
6092 |
+- tcf_pedit_key_ex_dump(skb, p->tcfp_keys_ex, p->tcfp_nkeys); |
6093 |
++ if (tcf_pedit_key_ex_dump(skb, |
6094 |
++ p->tcfp_keys_ex, |
6095 |
++ p->tcfp_nkeys)) |
6096 |
++ goto nla_put_failure; |
6097 |
+ |
6098 |
+ if (nla_put(skb, TCA_PEDIT_PARMS_EX, s, opt)) |
6099 |
+ goto nla_put_failure; |
6100 |
+diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c |
6101 |
+index fb861f90fde6..260749956ef3 100644 |
6102 |
+--- a/net/sched/cls_u32.c |
6103 |
++++ b/net/sched/cls_u32.c |
6104 |
+@@ -912,6 +912,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, |
6105 |
+ struct nlattr *opt = tca[TCA_OPTIONS]; |
6106 |
+ struct nlattr *tb[TCA_U32_MAX + 1]; |
6107 |
+ u32 htid, flags = 0; |
6108 |
++ size_t sel_size; |
6109 |
+ int err; |
6110 |
+ #ifdef CONFIG_CLS_U32_PERF |
6111 |
+ size_t size; |
6112 |
+@@ -1074,8 +1075,13 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, |
6113 |
+ } |
6114 |
+ |
6115 |
+ s = nla_data(tb[TCA_U32_SEL]); |
6116 |
++ sel_size = struct_size(s, keys, s->nkeys); |
6117 |
++ if (nla_len(tb[TCA_U32_SEL]) < sel_size) { |
6118 |
++ err = -EINVAL; |
6119 |
++ goto erridr; |
6120 |
++ } |
6121 |
+ |
6122 |
+- n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL); |
6123 |
++ n = kzalloc(offsetof(typeof(*n), sel) + sel_size, GFP_KERNEL); |
6124 |
+ if (n == NULL) { |
6125 |
+ err = -ENOBUFS; |
6126 |
+ goto erridr; |
6127 |
+@@ -1090,7 +1096,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, |
6128 |
+ } |
6129 |
+ #endif |
6130 |
+ |
6131 |
+- memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key)); |
6132 |
++ memcpy(&n->sel, s, sel_size); |
6133 |
+ RCU_INIT_POINTER(n->ht_up, ht); |
6134 |
+ n->handle = handle; |
6135 |
+ n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0; |
6136 |
+diff --git a/net/sctp/proc.c b/net/sctp/proc.c |
6137 |
+index ef5c9a82d4e8..a644292f9faf 100644 |
6138 |
+--- a/net/sctp/proc.c |
6139 |
++++ b/net/sctp/proc.c |
6140 |
+@@ -215,7 +215,6 @@ static const struct seq_operations sctp_eps_ops = { |
6141 |
+ struct sctp_ht_iter { |
6142 |
+ struct seq_net_private p; |
6143 |
+ struct rhashtable_iter hti; |
6144 |
+- int start_fail; |
6145 |
+ }; |
6146 |
+ |
6147 |
+ static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos) |
6148 |
+@@ -224,7 +223,6 @@ static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos) |
6149 |
+ |
6150 |
+ sctp_transport_walk_start(&iter->hti); |
6151 |
+ |
6152 |
+- iter->start_fail = 0; |
6153 |
+ return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos); |
6154 |
+ } |
6155 |
+ |
6156 |
+@@ -232,8 +230,6 @@ static void sctp_transport_seq_stop(struct seq_file *seq, void *v) |
6157 |
+ { |
6158 |
+ struct sctp_ht_iter *iter = seq->private; |
6159 |
+ |
6160 |
+- if (iter->start_fail) |
6161 |
+- return; |
6162 |
+ sctp_transport_walk_stop(&iter->hti); |
6163 |
+ } |
6164 |
+ |
6165 |
+@@ -264,8 +260,6 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) |
6166 |
+ } |
6167 |
+ |
6168 |
+ transport = (struct sctp_transport *)v; |
6169 |
+- if (!sctp_transport_hold(transport)) |
6170 |
+- return 0; |
6171 |
+ assoc = transport->asoc; |
6172 |
+ epb = &assoc->base; |
6173 |
+ sk = epb->sk; |
6174 |
+@@ -322,8 +316,6 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) |
6175 |
+ } |
6176 |
+ |
6177 |
+ transport = (struct sctp_transport *)v; |
6178 |
+- if (!sctp_transport_hold(transport)) |
6179 |
+- return 0; |
6180 |
+ assoc = transport->asoc; |
6181 |
+ |
6182 |
+ list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list, |
6183 |
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c |
6184 |
+index ce620e878538..50ee07cd20c4 100644 |
6185 |
+--- a/net/sctp/socket.c |
6186 |
++++ b/net/sctp/socket.c |
6187 |
+@@ -4881,9 +4881,14 @@ struct sctp_transport *sctp_transport_get_next(struct net *net, |
6188 |
+ break; |
6189 |
+ } |
6190 |
+ |
6191 |
++ if (!sctp_transport_hold(t)) |
6192 |
++ continue; |
6193 |
++ |
6194 |
+ if (net_eq(sock_net(t->asoc->base.sk), net) && |
6195 |
+ t->asoc->peer.primary_path == t) |
6196 |
+ break; |
6197 |
++ |
6198 |
++ sctp_transport_put(t); |
6199 |
+ } |
6200 |
+ |
6201 |
+ return t; |
6202 |
+@@ -4893,13 +4898,18 @@ struct sctp_transport *sctp_transport_get_idx(struct net *net, |
6203 |
+ struct rhashtable_iter *iter, |
6204 |
+ int pos) |
6205 |
+ { |
6206 |
+- void *obj = SEQ_START_TOKEN; |
6207 |
++ struct sctp_transport *t; |
6208 |
+ |
6209 |
+- while (pos && (obj = sctp_transport_get_next(net, iter)) && |
6210 |
+- !IS_ERR(obj)) |
6211 |
+- pos--; |
6212 |
++ if (!pos) |
6213 |
++ return SEQ_START_TOKEN; |
6214 |
+ |
6215 |
+- return obj; |
6216 |
++ while ((t = sctp_transport_get_next(net, iter)) && !IS_ERR(t)) { |
6217 |
++ if (!--pos) |
6218 |
++ break; |
6219 |
++ sctp_transport_put(t); |
6220 |
++ } |
6221 |
++ |
6222 |
++ return t; |
6223 |
+ } |
6224 |
+ |
6225 |
+ int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), |
6226 |
+@@ -4958,8 +4968,6 @@ again: |
6227 |
+ |
6228 |
+ tsp = sctp_transport_get_idx(net, &hti, *pos + 1); |
6229 |
+ for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) { |
6230 |
+- if (!sctp_transport_hold(tsp)) |
6231 |
+- continue; |
6232 |
+ ret = cb(tsp, p); |
6233 |
+ if (ret) |
6234 |
+ break; |
6235 |
+diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c |
6236 |
+index 8654494b4d0a..834eb2b9e41b 100644 |
6237 |
+--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c |
6238 |
++++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c |
6239 |
+@@ -169,7 +169,7 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen, |
6240 |
+ struct scatterlist sg[1]; |
6241 |
+ int err = -1; |
6242 |
+ u8 *checksumdata; |
6243 |
+- u8 rc4salt[4]; |
6244 |
++ u8 *rc4salt; |
6245 |
+ struct crypto_ahash *md5; |
6246 |
+ struct crypto_ahash *hmac_md5; |
6247 |
+ struct ahash_request *req; |
6248 |
+@@ -183,14 +183,18 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen, |
6249 |
+ return GSS_S_FAILURE; |
6250 |
+ } |
6251 |
+ |
6252 |
++ rc4salt = kmalloc_array(4, sizeof(*rc4salt), GFP_NOFS); |
6253 |
++ if (!rc4salt) |
6254 |
++ return GSS_S_FAILURE; |
6255 |
++ |
6256 |
+ if (arcfour_hmac_md5_usage_to_salt(usage, rc4salt)) { |
6257 |
+ dprintk("%s: invalid usage value %u\n", __func__, usage); |
6258 |
+- return GSS_S_FAILURE; |
6259 |
++ goto out_free_rc4salt; |
6260 |
+ } |
6261 |
+ |
6262 |
+ checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS); |
6263 |
+ if (!checksumdata) |
6264 |
+- return GSS_S_FAILURE; |
6265 |
++ goto out_free_rc4salt; |
6266 |
+ |
6267 |
+ md5 = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC); |
6268 |
+ if (IS_ERR(md5)) |
6269 |
+@@ -258,6 +262,8 @@ out_free_md5: |
6270 |
+ crypto_free_ahash(md5); |
6271 |
+ out_free_cksum: |
6272 |
+ kfree(checksumdata); |
6273 |
++out_free_rc4salt: |
6274 |
++ kfree(rc4salt); |
6275 |
+ return err ? GSS_S_FAILURE : 0; |
6276 |
+ } |
6277 |
+ |
6278 |
+diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c |
6279 |
+index bebe88cae07b..ff968c7afef6 100644 |
6280 |
+--- a/net/tipc/name_table.c |
6281 |
++++ b/net/tipc/name_table.c |
6282 |
+@@ -980,20 +980,17 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb) |
6283 |
+ |
6284 |
+ struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port) |
6285 |
+ { |
6286 |
+- u64 value = (u64)node << 32 | port; |
6287 |
+ struct tipc_dest *dst; |
6288 |
+ |
6289 |
+ list_for_each_entry(dst, l, list) { |
6290 |
+- if (dst->value != value) |
6291 |
+- continue; |
6292 |
+- return dst; |
6293 |
++ if (dst->node == node && dst->port == port) |
6294 |
++ return dst; |
6295 |
+ } |
6296 |
+ return NULL; |
6297 |
+ } |
6298 |
+ |
6299 |
+ bool tipc_dest_push(struct list_head *l, u32 node, u32 port) |
6300 |
+ { |
6301 |
+- u64 value = (u64)node << 32 | port; |
6302 |
+ struct tipc_dest *dst; |
6303 |
+ |
6304 |
+ if (tipc_dest_find(l, node, port)) |
6305 |
+@@ -1002,7 +999,8 @@ bool tipc_dest_push(struct list_head *l, u32 node, u32 port) |
6306 |
+ dst = kmalloc(sizeof(*dst), GFP_ATOMIC); |
6307 |
+ if (unlikely(!dst)) |
6308 |
+ return false; |
6309 |
+- dst->value = value; |
6310 |
++ dst->node = node; |
6311 |
++ dst->port = port; |
6312 |
+ list_add(&dst->list, l); |
6313 |
+ return true; |
6314 |
+ } |
6315 |
+diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h |
6316 |
+index 0febba41da86..892bd750b85f 100644 |
6317 |
+--- a/net/tipc/name_table.h |
6318 |
++++ b/net/tipc/name_table.h |
6319 |
+@@ -133,13 +133,8 @@ void tipc_nametbl_stop(struct net *net); |
6320 |
+ |
6321 |
+ struct tipc_dest { |
6322 |
+ struct list_head list; |
6323 |
+- union { |
6324 |
+- struct { |
6325 |
+- u32 port; |
6326 |
+- u32 node; |
6327 |
+- }; |
6328 |
+- u64 value; |
6329 |
+- }; |
6330 |
++ u32 port; |
6331 |
++ u32 node; |
6332 |
+ }; |
6333 |
+ |
6334 |
+ struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port); |
6335 |
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c |
6336 |
+index 930852c54d7a..0a5fa347135e 100644 |
6337 |
+--- a/net/tipc/socket.c |
6338 |
++++ b/net/tipc/socket.c |
6339 |
+@@ -2675,6 +2675,8 @@ void tipc_sk_reinit(struct net *net) |
6340 |
+ |
6341 |
+ rhashtable_walk_stop(&iter); |
6342 |
+ } while (tsk == ERR_PTR(-EAGAIN)); |
6343 |
++ |
6344 |
++ rhashtable_walk_exit(&iter); |
6345 |
+ } |
6346 |
+ |
6347 |
+ static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid) |
6348 |
+diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c |
6349 |
+index 301f22430469..45188d920013 100644 |
6350 |
+--- a/net/tls/tls_main.c |
6351 |
++++ b/net/tls/tls_main.c |
6352 |
+@@ -45,6 +45,7 @@ |
6353 |
+ MODULE_AUTHOR("Mellanox Technologies"); |
6354 |
+ MODULE_DESCRIPTION("Transport Layer Security Support"); |
6355 |
+ MODULE_LICENSE("Dual BSD/GPL"); |
6356 |
++MODULE_ALIAS_TCP_ULP("tls"); |
6357 |
+ |
6358 |
+ enum { |
6359 |
+ TLSV4, |
6360 |
+diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c |
6361 |
+index 4b4d78fffe30..da9070889223 100644 |
6362 |
+--- a/samples/bpf/xdp_redirect_cpu_user.c |
6363 |
++++ b/samples/bpf/xdp_redirect_cpu_user.c |
6364 |
+@@ -679,8 +679,9 @@ int main(int argc, char **argv) |
6365 |
+ return EXIT_FAIL_OPTION; |
6366 |
+ } |
6367 |
+ |
6368 |
+- /* Remove XDP program when program is interrupted */ |
6369 |
++ /* Remove XDP program when program is interrupted or killed */ |
6370 |
+ signal(SIGINT, int_exit); |
6371 |
++ signal(SIGTERM, int_exit); |
6372 |
+ |
6373 |
+ if (bpf_set_link_xdp_fd(ifindex, prog_fd[prog_num], xdp_flags) < 0) { |
6374 |
+ fprintf(stderr, "link set xdp fd failed\n"); |
6375 |
+diff --git a/samples/bpf/xdp_rxq_info_user.c b/samples/bpf/xdp_rxq_info_user.c |
6376 |
+index e4e9ba52bff0..bb278447299c 100644 |
6377 |
+--- a/samples/bpf/xdp_rxq_info_user.c |
6378 |
++++ b/samples/bpf/xdp_rxq_info_user.c |
6379 |
+@@ -534,8 +534,9 @@ int main(int argc, char **argv) |
6380 |
+ exit(EXIT_FAIL_BPF); |
6381 |
+ } |
6382 |
+ |
6383 |
+- /* Remove XDP program when program is interrupted */ |
6384 |
++ /* Remove XDP program when program is interrupted or killed */ |
6385 |
+ signal(SIGINT, int_exit); |
6386 |
++ signal(SIGTERM, int_exit); |
6387 |
+ |
6388 |
+ if (bpf_set_link_xdp_fd(ifindex, prog_fd, xdp_flags) < 0) { |
6389 |
+ fprintf(stderr, "link set xdp fd failed\n"); |
6390 |
+diff --git a/scripts/coccicheck b/scripts/coccicheck |
6391 |
+index 9fedca611b7f..e04d328210ac 100755 |
6392 |
+--- a/scripts/coccicheck |
6393 |
++++ b/scripts/coccicheck |
6394 |
+@@ -128,9 +128,10 @@ run_cmd_parmap() { |
6395 |
+ fi |
6396 |
+ echo $@ >>$DEBUG_FILE |
6397 |
+ $@ 2>>$DEBUG_FILE |
6398 |
+- if [[ $? -ne 0 ]]; then |
6399 |
++ err=$? |
6400 |
++ if [[ $err -ne 0 ]]; then |
6401 |
+ echo "coccicheck failed" |
6402 |
+- exit $? |
6403 |
++ exit $err |
6404 |
+ fi |
6405 |
+ } |
6406 |
+ |
6407 |
+diff --git a/scripts/depmod.sh b/scripts/depmod.sh |
6408 |
+index 999d585eaa73..e5f0aad75b96 100755 |
6409 |
+--- a/scripts/depmod.sh |
6410 |
++++ b/scripts/depmod.sh |
6411 |
+@@ -15,9 +15,9 @@ if ! test -r System.map ; then |
6412 |
+ fi |
6413 |
+ |
6414 |
+ if [ -z $(command -v $DEPMOD) ]; then |
6415 |
+- echo "'make modules_install' requires $DEPMOD. Please install it." >&2 |
6416 |
++ echo "Warning: 'make modules_install' requires $DEPMOD. Please install it." >&2 |
6417 |
+ echo "This is probably in the kmod package." >&2 |
6418 |
+- exit 1 |
6419 |
++ exit 0 |
6420 |
+ fi |
6421 |
+ |
6422 |
+ # older versions of depmod require the version string to start with three |
6423 |
+diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c |
6424 |
+index 1663fb19343a..b95cf57782a3 100644 |
6425 |
+--- a/scripts/mod/modpost.c |
6426 |
++++ b/scripts/mod/modpost.c |
6427 |
+@@ -672,7 +672,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info, |
6428 |
+ if (ELF_ST_TYPE(sym->st_info) == STT_SPARC_REGISTER) |
6429 |
+ break; |
6430 |
+ if (symname[0] == '.') { |
6431 |
+- char *munged = strdup(symname); |
6432 |
++ char *munged = NOFAIL(strdup(symname)); |
6433 |
+ munged[0] = '_'; |
6434 |
+ munged[1] = toupper(munged[1]); |
6435 |
+ symname = munged; |
6436 |
+@@ -1318,7 +1318,7 @@ static Elf_Sym *find_elf_symbol2(struct elf_info *elf, Elf_Addr addr, |
6437 |
+ static char *sec2annotation(const char *s) |
6438 |
+ { |
6439 |
+ if (match(s, init_exit_sections)) { |
6440 |
+- char *p = malloc(20); |
6441 |
++ char *p = NOFAIL(malloc(20)); |
6442 |
+ char *r = p; |
6443 |
+ |
6444 |
+ *p++ = '_'; |
6445 |
+@@ -1338,7 +1338,7 @@ static char *sec2annotation(const char *s) |
6446 |
+ strcat(p, " "); |
6447 |
+ return r; |
6448 |
+ } else { |
6449 |
+- return strdup(""); |
6450 |
++ return NOFAIL(strdup("")); |
6451 |
+ } |
6452 |
+ } |
6453 |
+ |
6454 |
+@@ -2036,7 +2036,7 @@ void buf_write(struct buffer *buf, const char *s, int len) |
6455 |
+ { |
6456 |
+ if (buf->size - buf->pos < len) { |
6457 |
+ buf->size += len + SZ; |
6458 |
+- buf->p = realloc(buf->p, buf->size); |
6459 |
++ buf->p = NOFAIL(realloc(buf->p, buf->size)); |
6460 |
+ } |
6461 |
+ strncpy(buf->p + buf->pos, s, len); |
6462 |
+ buf->pos += len; |
6463 |
+diff --git a/security/apparmor/policy_ns.c b/security/apparmor/policy_ns.c |
6464 |
+index b0f9dc3f765a..1a7cec5d9cac 100644 |
6465 |
+--- a/security/apparmor/policy_ns.c |
6466 |
++++ b/security/apparmor/policy_ns.c |
6467 |
+@@ -255,7 +255,7 @@ static struct aa_ns *__aa_create_ns(struct aa_ns *parent, const char *name, |
6468 |
+ |
6469 |
+ ns = alloc_ns(parent->base.hname, name); |
6470 |
+ if (!ns) |
6471 |
+- return NULL; |
6472 |
++ return ERR_PTR(-ENOMEM); |
6473 |
+ ns->level = parent->level + 1; |
6474 |
+ mutex_lock_nested(&ns->lock, ns->level); |
6475 |
+ error = __aafs_ns_mkdir(ns, ns_subns_dir(parent), name, dir); |
6476 |
+diff --git a/security/keys/dh.c b/security/keys/dh.c |
6477 |
+index b203f7758f97..1a68d27e72b4 100644 |
6478 |
+--- a/security/keys/dh.c |
6479 |
++++ b/security/keys/dh.c |
6480 |
+@@ -300,7 +300,7 @@ long __keyctl_dh_compute(struct keyctl_dh_params __user *params, |
6481 |
+ } |
6482 |
+ dh_inputs.g_size = dlen; |
6483 |
+ |
6484 |
+- dlen = dh_data_from_key(pcopy.private, &dh_inputs.key); |
6485 |
++ dlen = dh_data_from_key(pcopy.dh_private, &dh_inputs.key); |
6486 |
+ if (dlen < 0) { |
6487 |
+ ret = dlen; |
6488 |
+ goto out2; |
6489 |
+diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c |
6490 |
+index 79d3709b0671..0b66d7283b00 100644 |
6491 |
+--- a/security/selinux/selinuxfs.c |
6492 |
++++ b/security/selinux/selinuxfs.c |
6493 |
+@@ -1365,13 +1365,18 @@ static int sel_make_bools(struct selinux_fs_info *fsi) |
6494 |
+ |
6495 |
+ ret = -ENOMEM; |
6496 |
+ inode = sel_make_inode(dir->d_sb, S_IFREG | S_IRUGO | S_IWUSR); |
6497 |
+- if (!inode) |
6498 |
++ if (!inode) { |
6499 |
++ dput(dentry); |
6500 |
+ goto out; |
6501 |
++ } |
6502 |
+ |
6503 |
+ ret = -ENAMETOOLONG; |
6504 |
+ len = snprintf(page, PAGE_SIZE, "/%s/%s", BOOL_DIR_NAME, names[i]); |
6505 |
+- if (len >= PAGE_SIZE) |
6506 |
++ if (len >= PAGE_SIZE) { |
6507 |
++ dput(dentry); |
6508 |
++ iput(inode); |
6509 |
+ goto out; |
6510 |
++ } |
6511 |
+ |
6512 |
+ isec = (struct inode_security_struct *)inode->i_security; |
6513 |
+ ret = security_genfs_sid(fsi->state, "selinuxfs", page, |
6514 |
+@@ -1586,8 +1591,10 @@ static int sel_make_avc_files(struct dentry *dir) |
6515 |
+ return -ENOMEM; |
6516 |
+ |
6517 |
+ inode = sel_make_inode(dir->d_sb, S_IFREG|files[i].mode); |
6518 |
+- if (!inode) |
6519 |
++ if (!inode) { |
6520 |
++ dput(dentry); |
6521 |
+ return -ENOMEM; |
6522 |
++ } |
6523 |
+ |
6524 |
+ inode->i_fop = files[i].ops; |
6525 |
+ inode->i_ino = ++fsi->last_ino; |
6526 |
+@@ -1632,8 +1639,10 @@ static int sel_make_initcon_files(struct dentry *dir) |
6527 |
+ return -ENOMEM; |
6528 |
+ |
6529 |
+ inode = sel_make_inode(dir->d_sb, S_IFREG|S_IRUGO); |
6530 |
+- if (!inode) |
6531 |
++ if (!inode) { |
6532 |
++ dput(dentry); |
6533 |
+ return -ENOMEM; |
6534 |
++ } |
6535 |
+ |
6536 |
+ inode->i_fop = &sel_initcon_ops; |
6537 |
+ inode->i_ino = i|SEL_INITCON_INO_OFFSET; |
6538 |
+@@ -1733,8 +1742,10 @@ static int sel_make_perm_files(char *objclass, int classvalue, |
6539 |
+ |
6540 |
+ rc = -ENOMEM; |
6541 |
+ inode = sel_make_inode(dir->d_sb, S_IFREG|S_IRUGO); |
6542 |
+- if (!inode) |
6543 |
++ if (!inode) { |
6544 |
++ dput(dentry); |
6545 |
+ goto out; |
6546 |
++ } |
6547 |
+ |
6548 |
+ inode->i_fop = &sel_perm_ops; |
6549 |
+ /* i+1 since perm values are 1-indexed */ |
6550 |
+@@ -1763,8 +1774,10 @@ static int sel_make_class_dir_entries(char *classname, int index, |
6551 |
+ return -ENOMEM; |
6552 |
+ |
6553 |
+ inode = sel_make_inode(dir->d_sb, S_IFREG|S_IRUGO); |
6554 |
+- if (!inode) |
6555 |
++ if (!inode) { |
6556 |
++ dput(dentry); |
6557 |
+ return -ENOMEM; |
6558 |
++ } |
6559 |
+ |
6560 |
+ inode->i_fop = &sel_class_ops; |
6561 |
+ inode->i_ino = sel_class_to_ino(index); |
6562 |
+@@ -1838,8 +1851,10 @@ static int sel_make_policycap(struct selinux_fs_info *fsi) |
6563 |
+ return -ENOMEM; |
6564 |
+ |
6565 |
+ inode = sel_make_inode(fsi->sb, S_IFREG | 0444); |
6566 |
+- if (inode == NULL) |
6567 |
++ if (inode == NULL) { |
6568 |
++ dput(dentry); |
6569 |
+ return -ENOMEM; |
6570 |
++ } |
6571 |
+ |
6572 |
+ inode->i_fop = &sel_policycap_ops; |
6573 |
+ inode->i_ino = iter | SEL_POLICYCAP_INO_OFFSET; |
6574 |
+@@ -1932,8 +1947,10 @@ static int sel_fill_super(struct super_block *sb, void *data, int silent) |
6575 |
+ |
6576 |
+ ret = -ENOMEM; |
6577 |
+ inode = sel_make_inode(sb, S_IFCHR | S_IRUGO | S_IWUGO); |
6578 |
+- if (!inode) |
6579 |
++ if (!inode) { |
6580 |
++ dput(dentry); |
6581 |
+ goto err; |
6582 |
++ } |
6583 |
+ |
6584 |
+ inode->i_ino = ++fsi->last_ino; |
6585 |
+ isec = (struct inode_security_struct *)inode->i_security; |
6586 |
+diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c |
6587 |
+index 8a0181a2db08..47feef30dadb 100644 |
6588 |
+--- a/sound/soc/codecs/rt5677.c |
6589 |
++++ b/sound/soc/codecs/rt5677.c |
6590 |
+@@ -5007,7 +5007,7 @@ static const struct regmap_config rt5677_regmap = { |
6591 |
+ }; |
6592 |
+ |
6593 |
+ static const struct of_device_id rt5677_of_match[] = { |
6594 |
+- { .compatible = "realtek,rt5677", RT5677 }, |
6595 |
++ { .compatible = "realtek,rt5677", .data = (const void *)RT5677 }, |
6596 |
+ { } |
6597 |
+ }; |
6598 |
+ MODULE_DEVICE_TABLE(of, rt5677_of_match); |
6599 |
+diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c |
6600 |
+index 7fdfdf3f6e67..14f1b0c0d286 100644 |
6601 |
+--- a/sound/soc/codecs/wm8994.c |
6602 |
++++ b/sound/soc/codecs/wm8994.c |
6603 |
+@@ -2432,6 +2432,7 @@ static int wm8994_set_dai_sysclk(struct snd_soc_dai *dai, |
6604 |
+ snd_soc_component_update_bits(component, WM8994_POWER_MANAGEMENT_2, |
6605 |
+ WM8994_OPCLK_ENA, 0); |
6606 |
+ } |
6607 |
++ break; |
6608 |
+ |
6609 |
+ default: |
6610 |
+ return -EINVAL; |
6611 |
+diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c |
6612 |
+index 1120e39c1b00..5ccfce87e693 100644 |
6613 |
+--- a/tools/perf/arch/arm64/util/arm-spe.c |
6614 |
++++ b/tools/perf/arch/arm64/util/arm-spe.c |
6615 |
+@@ -194,6 +194,7 @@ struct auxtrace_record *arm_spe_recording_init(int *err, |
6616 |
+ sper->itr.read_finish = arm_spe_read_finish; |
6617 |
+ sper->itr.alignment = 0; |
6618 |
+ |
6619 |
++ *err = 0; |
6620 |
+ return &sper->itr; |
6621 |
+ } |
6622 |
+ |
6623 |
+diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c |
6624 |
+index 53d83d7e6a09..20e7d74d86cd 100644 |
6625 |
+--- a/tools/perf/arch/powerpc/util/sym-handling.c |
6626 |
++++ b/tools/perf/arch/powerpc/util/sym-handling.c |
6627 |
+@@ -141,8 +141,10 @@ void arch__post_process_probe_trace_events(struct perf_probe_event *pev, |
6628 |
+ for (i = 0; i < ntevs; i++) { |
6629 |
+ tev = &pev->tevs[i]; |
6630 |
+ map__for_each_symbol(map, sym, tmp) { |
6631 |
+- if (map->unmap_ip(map, sym->start) == tev->point.address) |
6632 |
++ if (map->unmap_ip(map, sym->start) == tev->point.address) { |
6633 |
+ arch__fix_tev_from_maps(pev, tev, map, sym); |
6634 |
++ break; |
6635 |
++ } |
6636 |
+ } |
6637 |
+ } |
6638 |
+ } |
6639 |
+diff --git a/tools/perf/util/namespaces.c b/tools/perf/util/namespaces.c |
6640 |
+index 5be021701f34..cf8bd123cf73 100644 |
6641 |
+--- a/tools/perf/util/namespaces.c |
6642 |
++++ b/tools/perf/util/namespaces.c |
6643 |
+@@ -139,6 +139,9 @@ struct nsinfo *nsinfo__copy(struct nsinfo *nsi) |
6644 |
+ { |
6645 |
+ struct nsinfo *nnsi; |
6646 |
+ |
6647 |
++ if (nsi == NULL) |
6648 |
++ return NULL; |
6649 |
++ |
6650 |
+ nnsi = calloc(1, sizeof(*nnsi)); |
6651 |
+ if (nnsi != NULL) { |
6652 |
+ nnsi->pid = nsi->pid; |
6653 |
+diff --git a/tools/testing/selftests/powerpc/harness.c b/tools/testing/selftests/powerpc/harness.c |
6654 |
+index 66d31de60b9a..9d7166dfad1e 100644 |
6655 |
+--- a/tools/testing/selftests/powerpc/harness.c |
6656 |
++++ b/tools/testing/selftests/powerpc/harness.c |
6657 |
+@@ -85,13 +85,13 @@ wait: |
6658 |
+ return status; |
6659 |
+ } |
6660 |
+ |
6661 |
+-static void alarm_handler(int signum) |
6662 |
++static void sig_handler(int signum) |
6663 |
+ { |
6664 |
+- /* Jut wake us up from waitpid */ |
6665 |
++ /* Just wake us up from waitpid */ |
6666 |
+ } |
6667 |
+ |
6668 |
+-static struct sigaction alarm_action = { |
6669 |
+- .sa_handler = alarm_handler, |
6670 |
++static struct sigaction sig_action = { |
6671 |
++ .sa_handler = sig_handler, |
6672 |
+ }; |
6673 |
+ |
6674 |
+ void test_harness_set_timeout(uint64_t time) |
6675 |
+@@ -106,8 +106,14 @@ int test_harness(int (test_function)(void), char *name) |
6676 |
+ test_start(name); |
6677 |
+ test_set_git_version(GIT_VERSION); |
6678 |
+ |
6679 |
+- if (sigaction(SIGALRM, &alarm_action, NULL)) { |
6680 |
+- perror("sigaction"); |
6681 |
++ if (sigaction(SIGINT, &sig_action, NULL)) { |
6682 |
++ perror("sigaction (sigint)"); |
6683 |
++ test_error(name); |
6684 |
++ return 1; |
6685 |
++ } |
6686 |
++ |
6687 |
++ if (sigaction(SIGALRM, &sig_action, NULL)) { |
6688 |
++ perror("sigaction (sigalrm)"); |
6689 |
+ test_error(name); |
6690 |
+ return 1; |
6691 |
+ } |