1 |
commit: 472a7a400815ff96bb67b3245ec107d9783f8590 |
2 |
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
3 |
AuthorDate: Wed Jul 31 10:11:05 2019 +0000 |
4 |
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
5 |
CommitDate: Wed Jul 31 10:11:05 2019 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=472a7a40 |
7 |
|
8 |
Linux patch 5.2.5 |
9 |
|
10 |
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org> |
11 |
|
12 |
0000_README | 4 + |
13 |
1004_linux-5.2.5.patch | 7465 ++++++++++++++++++++++++++++++++++++++++++++++++ |
14 |
2 files changed, 7469 insertions(+) |
15 |
|
16 |
diff --git a/0000_README b/0000_README |
17 |
index ff4bd8b..983b9f0 100644 |
18 |
--- a/0000_README |
19 |
+++ b/0000_README |
20 |
@@ -59,6 +59,10 @@ Patch: 1003_linux-5.2.4.patch |
21 |
From: https://www.kernel.org |
22 |
Desc: Linux 5.2.4 |
23 |
|
24 |
+Patch: 1004_linux-5.2.5.patch |
25 |
+From: https://www.kernel.org |
26 |
+Desc: Linux 5.2.5 |
27 |
+ |
28 |
Patch: 1500_XATTR_USER_PREFIX.patch |
29 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
30 |
Desc: Support for namespace user.pax.* on tmpfs. |
31 |
|
32 |
diff --git a/1004_linux-5.2.5.patch b/1004_linux-5.2.5.patch |
33 |
new file mode 100644 |
34 |
index 0000000..0cbf6b4 |
35 |
--- /dev/null |
36 |
+++ b/1004_linux-5.2.5.patch |
37 |
@@ -0,0 +1,7465 @@ |
38 |
+diff --git a/Documentation/devicetree/bindings/display/panel/armadeus,st0700-adapt.txt b/Documentation/devicetree/bindings/display/panel/armadeus,st0700-adapt.txt |
39 |
+new file mode 100644 |
40 |
+index 000000000000..a30d63db3c8f |
41 |
+--- /dev/null |
42 |
++++ b/Documentation/devicetree/bindings/display/panel/armadeus,st0700-adapt.txt |
43 |
+@@ -0,0 +1,9 @@ |
44 |
++Armadeus ST0700 Adapt. A Santek ST0700I5Y-RBSLW 7.0" WVGA (800x480) TFT with |
45 |
++an adapter board. |
46 |
++ |
47 |
++Required properties: |
48 |
++- compatible: "armadeus,st0700-adapt" |
49 |
++- power-supply: see panel-common.txt |
50 |
++ |
51 |
++Optional properties: |
52 |
++- backlight: see panel-common.txt |
53 |
+diff --git a/Documentation/devicetree/bindings/leds/backlight/lm3630a-backlight.yaml b/Documentation/devicetree/bindings/leds/backlight/lm3630a-backlight.yaml |
54 |
+index 4d61fe0a98a4..dc129d9a329e 100644 |
55 |
+--- a/Documentation/devicetree/bindings/leds/backlight/lm3630a-backlight.yaml |
56 |
++++ b/Documentation/devicetree/bindings/leds/backlight/lm3630a-backlight.yaml |
57 |
+@@ -23,16 +23,17 @@ properties: |
58 |
+ reg: |
59 |
+ maxItems: 1 |
60 |
+ |
61 |
+- ti,linear-mapping-mode: |
62 |
+- description: | |
63 |
+- Enable linear mapping mode. If disabled, then it will use exponential |
64 |
+- mapping mode in which the ramp up/down appears to have a more uniform |
65 |
+- transition to the human eye. |
66 |
+- type: boolean |
67 |
++ '#address-cells': |
68 |
++ const: 1 |
69 |
++ |
70 |
++ '#size-cells': |
71 |
++ const: 0 |
72 |
+ |
73 |
+ required: |
74 |
+ - compatible |
75 |
+ - reg |
76 |
++ - '#address-cells' |
77 |
++ - '#size-cells' |
78 |
+ |
79 |
+ patternProperties: |
80 |
+ "^led@[01]$": |
81 |
+@@ -48,7 +49,6 @@ patternProperties: |
82 |
+ in this property. The two current sinks can be controlled |
83 |
+ independently with both banks, or bank A can be configured to control |
84 |
+ both sinks with the led-sources property. |
85 |
+- maxItems: 1 |
86 |
+ minimum: 0 |
87 |
+ maximum: 1 |
88 |
+ |
89 |
+@@ -73,6 +73,13 @@ patternProperties: |
90 |
+ minimum: 0 |
91 |
+ maximum: 255 |
92 |
+ |
93 |
++ ti,linear-mapping-mode: |
94 |
++ description: | |
95 |
++ Enable linear mapping mode. If disabled, then it will use exponential |
96 |
++ mapping mode in which the ramp up/down appears to have a more uniform |
97 |
++ transition to the human eye. |
98 |
++ type: boolean |
99 |
++ |
100 |
+ required: |
101 |
+ - reg |
102 |
+ |
103 |
+diff --git a/Documentation/devicetree/bindings/usb/usb251xb.txt b/Documentation/devicetree/bindings/usb/usb251xb.txt |
104 |
+index bc7945e9dbfe..17915f64b8ee 100644 |
105 |
+--- a/Documentation/devicetree/bindings/usb/usb251xb.txt |
106 |
++++ b/Documentation/devicetree/bindings/usb/usb251xb.txt |
107 |
+@@ -64,10 +64,8 @@ Optional properties : |
108 |
+ - power-on-time-ms : Specifies the time it takes from the time the host |
109 |
+ initiates the power-on sequence to a port until the port has adequate |
110 |
+ power. The value is given in ms in a 0 - 510 range (default is 100ms). |
111 |
+- - swap-dx-lanes : Specifies the downstream ports which will swap the |
112 |
+- differential-pair (D+/D-), default is not-swapped. |
113 |
+- - swap-us-lanes : Selects the upstream port differential-pair (D+/D-) |
114 |
+- swapping (boolean, default is not-swapped) |
115 |
++ - swap-dx-lanes : Specifies the ports which will swap the differential-pair |
116 |
++ (D+/D-), default is not-swapped. |
117 |
+ |
118 |
+ Examples: |
119 |
+ usb2512b@2c { |
120 |
+diff --git a/Makefile b/Makefile |
121 |
+index 68ee97784c4d..78bd926c8439 100644 |
122 |
+--- a/Makefile |
123 |
++++ b/Makefile |
124 |
+@@ -1,7 +1,7 @@ |
125 |
+ # SPDX-License-Identifier: GPL-2.0 |
126 |
+ VERSION = 5 |
127 |
+ PATCHLEVEL = 2 |
128 |
+-SUBLEVEL = 4 |
129 |
++SUBLEVEL = 5 |
130 |
+ EXTRAVERSION = |
131 |
+ NAME = Bobtail Squid |
132 |
+ |
133 |
+@@ -528,6 +528,7 @@ ifneq ($(GCC_TOOLCHAIN),) |
134 |
+ CLANG_FLAGS += --gcc-toolchain=$(GCC_TOOLCHAIN) |
135 |
+ endif |
136 |
+ CLANG_FLAGS += -no-integrated-as |
137 |
++CLANG_FLAGS += -Werror=unknown-warning-option |
138 |
+ KBUILD_CFLAGS += $(CLANG_FLAGS) |
139 |
+ KBUILD_AFLAGS += $(CLANG_FLAGS) |
140 |
+ export CLANG_FLAGS |
141 |
+diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h |
142 |
+index 570d195a184d..e3a15c751b13 100644 |
143 |
+--- a/arch/arm64/include/asm/assembler.h |
144 |
++++ b/arch/arm64/include/asm/assembler.h |
145 |
+@@ -96,7 +96,11 @@ |
146 |
+ * RAS Error Synchronization barrier |
147 |
+ */ |
148 |
+ .macro esb |
149 |
++#ifdef CONFIG_ARM64_RAS_EXTN |
150 |
+ hint #16 |
151 |
++#else |
152 |
++ nop |
153 |
++#endif |
154 |
+ .endm |
155 |
+ |
156 |
+ /* |
157 |
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig |
158 |
+index 8c1c636308c8..f7a363cbc1bb 100644 |
159 |
+--- a/arch/powerpc/Kconfig |
160 |
++++ b/arch/powerpc/Kconfig |
161 |
+@@ -121,6 +121,7 @@ config PPC |
162 |
+ select ARCH_32BIT_OFF_T if PPC32 |
163 |
+ select ARCH_HAS_DEBUG_VIRTUAL |
164 |
+ select ARCH_HAS_DEVMEM_IS_ALLOWED |
165 |
++ select ARCH_HAS_DMA_MMAP_PGPROT |
166 |
+ select ARCH_HAS_ELF_RANDOMIZE |
167 |
+ select ARCH_HAS_FORTIFY_SOURCE |
168 |
+ select ARCH_HAS_GCOV_PROFILE_ALL |
169 |
+diff --git a/arch/powerpc/boot/xz_config.h b/arch/powerpc/boot/xz_config.h |
170 |
+index e22e5b3770dd..ebfadd39e192 100644 |
171 |
+--- a/arch/powerpc/boot/xz_config.h |
172 |
++++ b/arch/powerpc/boot/xz_config.h |
173 |
+@@ -20,10 +20,30 @@ static inline uint32_t swab32p(void *p) |
174 |
+ |
175 |
+ #ifdef __LITTLE_ENDIAN__ |
176 |
+ #define get_le32(p) (*((uint32_t *) (p))) |
177 |
++#define cpu_to_be32(x) swab32(x) |
178 |
++static inline u32 be32_to_cpup(const u32 *p) |
179 |
++{ |
180 |
++ return swab32p((u32 *)p); |
181 |
++} |
182 |
+ #else |
183 |
+ #define get_le32(p) swab32p(p) |
184 |
++#define cpu_to_be32(x) (x) |
185 |
++static inline u32 be32_to_cpup(const u32 *p) |
186 |
++{ |
187 |
++ return *p; |
188 |
++} |
189 |
+ #endif |
190 |
+ |
191 |
++static inline uint32_t get_unaligned_be32(const void *p) |
192 |
++{ |
193 |
++ return be32_to_cpup(p); |
194 |
++} |
195 |
++ |
196 |
++static inline void put_unaligned_be32(u32 val, void *p) |
197 |
++{ |
198 |
++ *((u32 *)p) = cpu_to_be32(val); |
199 |
++} |
200 |
++ |
201 |
+ #define memeq(a, b, size) (memcmp(a, b, size) == 0) |
202 |
+ #define memzero(buf, size) memset(buf, 0, size) |
203 |
+ |
204 |
+diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h |
205 |
+index 74d60cfe8ce5..fd318f7c3eed 100644 |
206 |
+--- a/arch/powerpc/include/asm/cacheflush.h |
207 |
++++ b/arch/powerpc/include/asm/cacheflush.h |
208 |
+@@ -29,9 +29,12 @@ |
209 |
+ * not expect this type of fault. flush_cache_vmap is not exactly the right |
210 |
+ * place to put this, but it seems to work well enough. |
211 |
+ */ |
212 |
+-#define flush_cache_vmap(start, end) do { asm volatile("ptesync" ::: "memory"); } while (0) |
213 |
++static inline void flush_cache_vmap(unsigned long start, unsigned long end) |
214 |
++{ |
215 |
++ asm volatile("ptesync" ::: "memory"); |
216 |
++} |
217 |
+ #else |
218 |
+-#define flush_cache_vmap(start, end) do { } while (0) |
219 |
++static inline void flush_cache_vmap(unsigned long start, unsigned long end) { } |
220 |
+ #endif |
221 |
+ |
222 |
+ #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 |
223 |
+diff --git a/arch/powerpc/include/asm/pmc.h b/arch/powerpc/include/asm/pmc.h |
224 |
+index dc9a1ca70edf..c6bbe9778d3c 100644 |
225 |
+--- a/arch/powerpc/include/asm/pmc.h |
226 |
++++ b/arch/powerpc/include/asm/pmc.h |
227 |
+@@ -27,11 +27,10 @@ static inline void ppc_set_pmu_inuse(int inuse) |
228 |
+ #ifdef CONFIG_PPC_PSERIES |
229 |
+ get_lppaca()->pmcregs_in_use = inuse; |
230 |
+ #endif |
231 |
+- } else { |
232 |
++ } |
233 |
+ #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
234 |
+- get_paca()->pmcregs_in_use = inuse; |
235 |
++ get_paca()->pmcregs_in_use = inuse; |
236 |
+ #endif |
237 |
+- } |
238 |
+ #endif |
239 |
+ } |
240 |
+ |
241 |
+diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile |
242 |
+index 0ea6c4aa3a20..21dfff2b25a1 100644 |
243 |
+--- a/arch/powerpc/kernel/Makefile |
244 |
++++ b/arch/powerpc/kernel/Makefile |
245 |
+@@ -49,7 +49,8 @@ obj-y := cputable.o ptrace.o syscalls.o \ |
246 |
+ signal.o sysfs.o cacheinfo.o time.o \ |
247 |
+ prom.o traps.o setup-common.o \ |
248 |
+ udbg.o misc.o io.o misc_$(BITS).o \ |
249 |
+- of_platform.o prom_parse.o |
250 |
++ of_platform.o prom_parse.o \ |
251 |
++ dma-common.o |
252 |
+ obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ |
253 |
+ signal_64.o ptrace32.o \ |
254 |
+ paca.o nvram_64.o firmware.o |
255 |
+diff --git a/arch/powerpc/kernel/dma-common.c b/arch/powerpc/kernel/dma-common.c |
256 |
+new file mode 100644 |
257 |
+index 000000000000..dc7ef6b17b69 |
258 |
+--- /dev/null |
259 |
++++ b/arch/powerpc/kernel/dma-common.c |
260 |
+@@ -0,0 +1,17 @@ |
261 |
++// SPDX-License-Identifier: GPL-2.0-or-later |
262 |
++/* |
263 |
++ * Contains common dma routines for all powerpc platforms. |
264 |
++ * |
265 |
++ * Copyright (C) 2019 Shawn Anastasio. |
266 |
++ */ |
267 |
++ |
268 |
++#include <linux/mm.h> |
269 |
++#include <linux/dma-noncoherent.h> |
270 |
++ |
271 |
++pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, |
272 |
++ unsigned long attrs) |
273 |
++{ |
274 |
++ if (!dev_is_dma_coherent(dev)) |
275 |
++ return pgprot_noncached(prot); |
276 |
++ return prot; |
277 |
++} |
278 |
+diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c |
279 |
+index f192d57db47d..c0e4b73191f3 100644 |
280 |
+--- a/arch/powerpc/kernel/eeh.c |
281 |
++++ b/arch/powerpc/kernel/eeh.c |
282 |
+@@ -354,10 +354,19 @@ static inline unsigned long eeh_token_to_phys(unsigned long token) |
283 |
+ ptep = find_init_mm_pte(token, &hugepage_shift); |
284 |
+ if (!ptep) |
285 |
+ return token; |
286 |
+- WARN_ON(hugepage_shift); |
287 |
+- pa = pte_pfn(*ptep) << PAGE_SHIFT; |
288 |
+ |
289 |
+- return pa | (token & (PAGE_SIZE-1)); |
290 |
++ pa = pte_pfn(*ptep); |
291 |
++ |
292 |
++ /* On radix we can do hugepage mappings for io, so handle that */ |
293 |
++ if (hugepage_shift) { |
294 |
++ pa <<= hugepage_shift; |
295 |
++ pa |= token & ((1ul << hugepage_shift) - 1); |
296 |
++ } else { |
297 |
++ pa <<= PAGE_SHIFT; |
298 |
++ pa |= token & (PAGE_SIZE - 1); |
299 |
++ } |
300 |
++ |
301 |
++ return pa; |
302 |
+ } |
303 |
+ |
304 |
+ /* |
305 |
+diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c |
306 |
+index a293a53b4365..50262597c222 100644 |
307 |
+--- a/arch/powerpc/kernel/hw_breakpoint.c |
308 |
++++ b/arch/powerpc/kernel/hw_breakpoint.c |
309 |
+@@ -370,6 +370,11 @@ void hw_breakpoint_pmu_read(struct perf_event *bp) |
310 |
+ bool dawr_force_enable; |
311 |
+ EXPORT_SYMBOL_GPL(dawr_force_enable); |
312 |
+ |
313 |
++static void set_dawr_cb(void *info) |
314 |
++{ |
315 |
++ set_dawr(info); |
316 |
++} |
317 |
++ |
318 |
+ static ssize_t dawr_write_file_bool(struct file *file, |
319 |
+ const char __user *user_buf, |
320 |
+ size_t count, loff_t *ppos) |
321 |
+@@ -389,7 +394,7 @@ static ssize_t dawr_write_file_bool(struct file *file, |
322 |
+ |
323 |
+ /* If we are clearing, make sure all CPUs have the DAWR cleared */ |
324 |
+ if (!dawr_force_enable) |
325 |
+- smp_call_function((smp_call_func_t)set_dawr, &null_brk, 0); |
326 |
++ smp_call_function(set_dawr_cb, &null_brk, 0); |
327 |
+ |
328 |
+ return rc; |
329 |
+ } |
330 |
+diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c |
331 |
+index bc68c53af67c..5645bc9cbc09 100644 |
332 |
+--- a/arch/powerpc/kernel/irq.c |
333 |
++++ b/arch/powerpc/kernel/irq.c |
334 |
+@@ -255,7 +255,7 @@ notrace void arch_local_irq_restore(unsigned long mask) |
335 |
+ irq_happened = get_irq_happened(); |
336 |
+ if (!irq_happened) { |
337 |
+ #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG |
338 |
+- WARN_ON(!(mfmsr() & MSR_EE)); |
339 |
++ WARN_ON_ONCE(!(mfmsr() & MSR_EE)); |
340 |
+ #endif |
341 |
+ return; |
342 |
+ } |
343 |
+@@ -268,7 +268,7 @@ notrace void arch_local_irq_restore(unsigned long mask) |
344 |
+ */ |
345 |
+ if (!(irq_happened & PACA_IRQ_HARD_DIS)) { |
346 |
+ #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG |
347 |
+- WARN_ON(!(mfmsr() & MSR_EE)); |
348 |
++ WARN_ON_ONCE(!(mfmsr() & MSR_EE)); |
349 |
+ #endif |
350 |
+ __hard_irq_disable(); |
351 |
+ #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG |
352 |
+@@ -279,7 +279,7 @@ notrace void arch_local_irq_restore(unsigned long mask) |
353 |
+ * warn if we are wrong. Only do that when IRQ tracing |
354 |
+ * is enabled as mfmsr() can be costly. |
355 |
+ */ |
356 |
+- if (WARN_ON(mfmsr() & MSR_EE)) |
357 |
++ if (WARN_ON_ONCE(mfmsr() & MSR_EE)) |
358 |
+ __hard_irq_disable(); |
359 |
+ #endif |
360 |
+ } |
361 |
+diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c |
362 |
+index 24522aa37665..c63c53b37e8e 100644 |
363 |
+--- a/arch/powerpc/kernel/pci_of_scan.c |
364 |
++++ b/arch/powerpc/kernel/pci_of_scan.c |
365 |
+@@ -42,6 +42,8 @@ unsigned int pci_parse_of_flags(u32 addr0, int bridge) |
366 |
+ if (addr0 & 0x02000000) { |
367 |
+ flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY; |
368 |
+ flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64; |
369 |
++ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) |
370 |
++ flags |= IORESOURCE_MEM_64; |
371 |
+ flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M; |
372 |
+ if (addr0 & 0x40000000) |
373 |
+ flags |= IORESOURCE_PREFETCH |
374 |
+diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c |
375 |
+index b824f4c69622..fff2eb22427d 100644 |
376 |
+--- a/arch/powerpc/kernel/rtas.c |
377 |
++++ b/arch/powerpc/kernel/rtas.c |
378 |
+@@ -980,10 +980,9 @@ int rtas_ibm_suspend_me(u64 handle) |
379 |
+ cpu_hotplug_disable(); |
380 |
+ |
381 |
+ /* Check if we raced with a CPU-Offline Operation */ |
382 |
+- if (unlikely(!cpumask_equal(cpu_present_mask, cpu_online_mask))) { |
383 |
+- pr_err("%s: Raced against a concurrent CPU-Offline\n", |
384 |
+- __func__); |
385 |
+- atomic_set(&data.error, -EBUSY); |
386 |
++ if (!cpumask_equal(cpu_present_mask, cpu_online_mask)) { |
387 |
++ pr_info("%s: Raced against a concurrent CPU-Offline\n", __func__); |
388 |
++ atomic_set(&data.error, -EAGAIN); |
389 |
+ goto out_hotplug_enable; |
390 |
+ } |
391 |
+ |
392 |
+diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c |
393 |
+index a2b74e057904..ebb78effd280 100644 |
394 |
+--- a/arch/powerpc/kernel/signal_32.c |
395 |
++++ b/arch/powerpc/kernel/signal_32.c |
396 |
+@@ -1198,6 +1198,9 @@ SYSCALL_DEFINE0(rt_sigreturn) |
397 |
+ goto bad; |
398 |
+ |
399 |
+ if (MSR_TM_ACTIVE(msr_hi<<32)) { |
400 |
++ /* Trying to start TM on non TM system */ |
401 |
++ if (!cpu_has_feature(CPU_FTR_TM)) |
402 |
++ goto bad; |
403 |
+ /* We only recheckpoint on return if we're |
404 |
+ * transaction. |
405 |
+ */ |
406 |
+diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c |
407 |
+index 4292ea39baa4..bee704f32f96 100644 |
408 |
+--- a/arch/powerpc/kernel/signal_64.c |
409 |
++++ b/arch/powerpc/kernel/signal_64.c |
410 |
+@@ -771,6 +771,11 @@ SYSCALL_DEFINE0(rt_sigreturn) |
411 |
+ if (MSR_TM_ACTIVE(msr)) { |
412 |
+ /* We recheckpoint on return. */ |
413 |
+ struct ucontext __user *uc_transact; |
414 |
++ |
415 |
++ /* Trying to start TM on non TM system */ |
416 |
++ if (!cpu_has_feature(CPU_FTR_TM)) |
417 |
++ goto badframe; |
418 |
++ |
419 |
+ if (__get_user(uc_transact, &uc->uc_link)) |
420 |
+ goto badframe; |
421 |
+ if (restore_tm_sigcontexts(current, &uc->uc_mcontext, |
422 |
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c |
423 |
+index ec1804f822af..cde3f5a4b3e4 100644 |
424 |
+--- a/arch/powerpc/kvm/book3s_hv.c |
425 |
++++ b/arch/powerpc/kvm/book3s_hv.c |
426 |
+@@ -3569,9 +3569,18 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, |
427 |
+ mtspr(SPRN_DEC, vcpu->arch.dec_expires - mftb()); |
428 |
+ |
429 |
+ if (kvmhv_on_pseries()) { |
430 |
++ /* |
431 |
++ * We need to save and restore the guest visible part of the |
432 |
++ * psscr (i.e. using SPRN_PSSCR_PR) since the hypervisor |
433 |
++ * doesn't do this for us. Note only required if pseries since |
434 |
++ * this is done in kvmhv_load_hv_regs_and_go() below otherwise. |
435 |
++ */ |
436 |
++ unsigned long host_psscr; |
437 |
+ /* call our hypervisor to load up HV regs and go */ |
438 |
+ struct hv_guest_state hvregs; |
439 |
+ |
440 |
++ host_psscr = mfspr(SPRN_PSSCR_PR); |
441 |
++ mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr); |
442 |
+ kvmhv_save_hv_regs(vcpu, &hvregs); |
443 |
+ hvregs.lpcr = lpcr; |
444 |
+ vcpu->arch.regs.msr = vcpu->arch.shregs.msr; |
445 |
+@@ -3590,6 +3599,8 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, |
446 |
+ vcpu->arch.shregs.msr = vcpu->arch.regs.msr; |
447 |
+ vcpu->arch.shregs.dar = mfspr(SPRN_DAR); |
448 |
+ vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR); |
449 |
++ vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR); |
450 |
++ mtspr(SPRN_PSSCR_PR, host_psscr); |
451 |
+ |
452 |
+ /* H_CEDE has to be handled now, not later */ |
453 |
+ if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested && |
454 |
+@@ -3654,6 +3665,8 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, |
455 |
+ vcpu->arch.vpa.dirty = 1; |
456 |
+ save_pmu = lp->pmcregs_in_use; |
457 |
+ } |
458 |
++ /* Must save pmu if this guest is capable of running nested guests */ |
459 |
++ save_pmu |= nesting_enabled(vcpu->kvm); |
460 |
+ |
461 |
+ kvmhv_save_guest_pmu(vcpu, save_pmu); |
462 |
+ |
463 |
+diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c |
464 |
+index 6ca0d7376a9f..e3ba67095895 100644 |
465 |
+--- a/arch/powerpc/kvm/book3s_xive.c |
466 |
++++ b/arch/powerpc/kvm/book3s_xive.c |
467 |
+@@ -1986,10 +1986,8 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type) |
468 |
+ |
469 |
+ xive->single_escalation = xive_native_has_single_escalation(); |
470 |
+ |
471 |
+- if (ret) { |
472 |
+- kfree(xive); |
473 |
++ if (ret) |
474 |
+ return ret; |
475 |
+- } |
476 |
+ |
477 |
+ return 0; |
478 |
+ } |
479 |
+diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c |
480 |
+index 5596c8ec221a..a998823f68a3 100644 |
481 |
+--- a/arch/powerpc/kvm/book3s_xive_native.c |
482 |
++++ b/arch/powerpc/kvm/book3s_xive_native.c |
483 |
+@@ -1090,9 +1090,9 @@ static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type) |
484 |
+ xive->ops = &kvmppc_xive_native_ops; |
485 |
+ |
486 |
+ if (ret) |
487 |
+- kfree(xive); |
488 |
++ return ret; |
489 |
+ |
490 |
+- return ret; |
491 |
++ return 0; |
492 |
+ } |
493 |
+ |
494 |
+ /* |
495 |
+diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c |
496 |
+index 30d62ffe3310..1322c59cb5dd 100644 |
497 |
+--- a/arch/powerpc/mm/book3s64/hash_native.c |
498 |
++++ b/arch/powerpc/mm/book3s64/hash_native.c |
499 |
+@@ -56,7 +56,7 @@ static inline void tlbiel_hash_set_isa206(unsigned int set, unsigned int is) |
500 |
+ * tlbiel instruction for hash, set invalidation |
501 |
+ * i.e., r=1 and is=01 or is=10 or is=11 |
502 |
+ */ |
503 |
+-static inline void tlbiel_hash_set_isa300(unsigned int set, unsigned int is, |
504 |
++static __always_inline void tlbiel_hash_set_isa300(unsigned int set, unsigned int is, |
505 |
+ unsigned int pid, |
506 |
+ unsigned int ric, unsigned int prs) |
507 |
+ { |
508 |
+diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c |
509 |
+index 28ced26f2a00..ab659044c7f6 100644 |
510 |
+--- a/arch/powerpc/mm/book3s64/hash_utils.c |
511 |
++++ b/arch/powerpc/mm/book3s64/hash_utils.c |
512 |
+@@ -1901,11 +1901,20 @@ void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base, |
513 |
+ * |
514 |
+ * For guests on platforms before POWER9, we clamp the it limit to 1G |
515 |
+ * to avoid some funky things such as RTAS bugs etc... |
516 |
++ * |
517 |
++ * On POWER9 we limit to 1TB in case the host erroneously told us that |
518 |
++ * the RMA was >1TB. Effective address bits 0:23 are treated as zero |
519 |
++ * (meaning the access is aliased to zero i.e. addr = addr % 1TB) |
520 |
++ * for virtual real mode addressing and so it doesn't make sense to |
521 |
++ * have an area larger than 1TB as it can't be addressed. |
522 |
+ */ |
523 |
+ if (!early_cpu_has_feature(CPU_FTR_HVMODE)) { |
524 |
+ ppc64_rma_size = first_memblock_size; |
525 |
+ if (!early_cpu_has_feature(CPU_FTR_ARCH_300)) |
526 |
+ ppc64_rma_size = min_t(u64, ppc64_rma_size, 0x40000000); |
527 |
++ else |
528 |
++ ppc64_rma_size = min_t(u64, ppc64_rma_size, |
529 |
++ 1UL << SID_SHIFT_1T); |
530 |
+ |
531 |
+ /* Finally limit subsequent allocations */ |
532 |
+ memblock_set_current_limit(ppc64_rma_size); |
533 |
+diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c |
534 |
+index bb9835681315..d0cd5271a57c 100644 |
535 |
+--- a/arch/powerpc/mm/book3s64/radix_tlb.c |
536 |
++++ b/arch/powerpc/mm/book3s64/radix_tlb.c |
537 |
+@@ -25,7 +25,7 @@ |
538 |
+ * tlbiel instruction for radix, set invalidation |
539 |
+ * i.e., r=1 and is=01 or is=10 or is=11 |
540 |
+ */ |
541 |
+-static inline void tlbiel_radix_set_isa300(unsigned int set, unsigned int is, |
542 |
++static __always_inline void tlbiel_radix_set_isa300(unsigned int set, unsigned int is, |
543 |
+ unsigned int pid, |
544 |
+ unsigned int ric, unsigned int prs) |
545 |
+ { |
546 |
+@@ -146,8 +146,8 @@ static __always_inline void __tlbie_lpid(unsigned long lpid, unsigned long ric) |
547 |
+ trace_tlbie(lpid, 0, rb, rs, ric, prs, r); |
548 |
+ } |
549 |
+ |
550 |
+-static inline void __tlbiel_lpid_guest(unsigned long lpid, int set, |
551 |
+- unsigned long ric) |
552 |
++static __always_inline void __tlbiel_lpid_guest(unsigned long lpid, int set, |
553 |
++ unsigned long ric) |
554 |
+ { |
555 |
+ unsigned long rb,rs,prs,r; |
556 |
+ |
557 |
+@@ -163,8 +163,8 @@ static inline void __tlbiel_lpid_guest(unsigned long lpid, int set, |
558 |
+ } |
559 |
+ |
560 |
+ |
561 |
+-static inline void __tlbiel_va(unsigned long va, unsigned long pid, |
562 |
+- unsigned long ap, unsigned long ric) |
563 |
++static __always_inline void __tlbiel_va(unsigned long va, unsigned long pid, |
564 |
++ unsigned long ap, unsigned long ric) |
565 |
+ { |
566 |
+ unsigned long rb,rs,prs,r; |
567 |
+ |
568 |
+@@ -179,8 +179,8 @@ static inline void __tlbiel_va(unsigned long va, unsigned long pid, |
569 |
+ trace_tlbie(0, 1, rb, rs, ric, prs, r); |
570 |
+ } |
571 |
+ |
572 |
+-static inline void __tlbie_va(unsigned long va, unsigned long pid, |
573 |
+- unsigned long ap, unsigned long ric) |
574 |
++static __always_inline void __tlbie_va(unsigned long va, unsigned long pid, |
575 |
++ unsigned long ap, unsigned long ric) |
576 |
+ { |
577 |
+ unsigned long rb,rs,prs,r; |
578 |
+ |
579 |
+@@ -195,8 +195,8 @@ static inline void __tlbie_va(unsigned long va, unsigned long pid, |
580 |
+ trace_tlbie(0, 0, rb, rs, ric, prs, r); |
581 |
+ } |
582 |
+ |
583 |
+-static inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid, |
584 |
+- unsigned long ap, unsigned long ric) |
585 |
++static __always_inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid, |
586 |
++ unsigned long ap, unsigned long ric) |
587 |
+ { |
588 |
+ unsigned long rb,rs,prs,r; |
589 |
+ |
590 |
+@@ -235,7 +235,7 @@ static inline void fixup_tlbie_lpid(unsigned long lpid) |
591 |
+ /* |
592 |
+ * We use 128 set in radix mode and 256 set in hpt mode. |
593 |
+ */ |
594 |
+-static inline void _tlbiel_pid(unsigned long pid, unsigned long ric) |
595 |
++static __always_inline void _tlbiel_pid(unsigned long pid, unsigned long ric) |
596 |
+ { |
597 |
+ int set; |
598 |
+ |
599 |
+@@ -337,7 +337,7 @@ static inline void _tlbie_lpid(unsigned long lpid, unsigned long ric) |
600 |
+ asm volatile("eieio; tlbsync; ptesync": : :"memory"); |
601 |
+ } |
602 |
+ |
603 |
+-static inline void _tlbiel_lpid_guest(unsigned long lpid, unsigned long ric) |
604 |
++static __always_inline void _tlbiel_lpid_guest(unsigned long lpid, unsigned long ric) |
605 |
+ { |
606 |
+ int set; |
607 |
+ |
608 |
+@@ -377,8 +377,8 @@ static inline void __tlbiel_va_range(unsigned long start, unsigned long end, |
609 |
+ __tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB); |
610 |
+ } |
611 |
+ |
612 |
+-static inline void _tlbiel_va(unsigned long va, unsigned long pid, |
613 |
+- unsigned long psize, unsigned long ric) |
614 |
++static __always_inline void _tlbiel_va(unsigned long va, unsigned long pid, |
615 |
++ unsigned long psize, unsigned long ric) |
616 |
+ { |
617 |
+ unsigned long ap = mmu_get_ap(psize); |
618 |
+ |
619 |
+@@ -409,8 +409,8 @@ static inline void __tlbie_va_range(unsigned long start, unsigned long end, |
620 |
+ __tlbie_va(addr, pid, ap, RIC_FLUSH_TLB); |
621 |
+ } |
622 |
+ |
623 |
+-static inline void _tlbie_va(unsigned long va, unsigned long pid, |
624 |
+- unsigned long psize, unsigned long ric) |
625 |
++static __always_inline void _tlbie_va(unsigned long va, unsigned long pid, |
626 |
++ unsigned long psize, unsigned long ric) |
627 |
+ { |
628 |
+ unsigned long ap = mmu_get_ap(psize); |
629 |
+ |
630 |
+@@ -420,7 +420,7 @@ static inline void _tlbie_va(unsigned long va, unsigned long pid, |
631 |
+ asm volatile("eieio; tlbsync; ptesync": : :"memory"); |
632 |
+ } |
633 |
+ |
634 |
+-static inline void _tlbie_lpid_va(unsigned long va, unsigned long lpid, |
635 |
++static __always_inline void _tlbie_lpid_va(unsigned long va, unsigned long lpid, |
636 |
+ unsigned long psize, unsigned long ric) |
637 |
+ { |
638 |
+ unsigned long ap = mmu_get_ap(psize); |
639 |
+diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c |
640 |
+index b5d92dc32844..1de0f43a68e5 100644 |
641 |
+--- a/arch/powerpc/mm/hugetlbpage.c |
642 |
++++ b/arch/powerpc/mm/hugetlbpage.c |
643 |
+@@ -130,6 +130,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz |
644 |
+ } else { |
645 |
+ pdshift = PUD_SHIFT; |
646 |
+ pu = pud_alloc(mm, pg, addr); |
647 |
++ if (!pu) |
648 |
++ return NULL; |
649 |
+ if (pshift == PUD_SHIFT) |
650 |
+ return (pte_t *)pu; |
651 |
+ else if (pshift > PMD_SHIFT) { |
652 |
+@@ -138,6 +140,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz |
653 |
+ } else { |
654 |
+ pdshift = PMD_SHIFT; |
655 |
+ pm = pmd_alloc(mm, pu, addr); |
656 |
++ if (!pm) |
657 |
++ return NULL; |
658 |
+ if (pshift == PMD_SHIFT) |
659 |
+ /* 16MB hugepage */ |
660 |
+ return (pte_t *)pm; |
661 |
+@@ -154,12 +158,16 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz |
662 |
+ } else { |
663 |
+ pdshift = PUD_SHIFT; |
664 |
+ pu = pud_alloc(mm, pg, addr); |
665 |
++ if (!pu) |
666 |
++ return NULL; |
667 |
+ if (pshift >= PUD_SHIFT) { |
668 |
+ ptl = pud_lockptr(mm, pu); |
669 |
+ hpdp = (hugepd_t *)pu; |
670 |
+ } else { |
671 |
+ pdshift = PMD_SHIFT; |
672 |
+ pm = pmd_alloc(mm, pu, addr); |
673 |
++ if (!pm) |
674 |
++ return NULL; |
675 |
+ ptl = pmd_lockptr(mm, pm); |
676 |
+ hpdp = (hugepd_t *)pm; |
677 |
+ } |
678 |
+diff --git a/arch/powerpc/platforms/4xx/uic.c b/arch/powerpc/platforms/4xx/uic.c |
679 |
+index 31f12ad37a98..36fb66ce54cf 100644 |
680 |
+--- a/arch/powerpc/platforms/4xx/uic.c |
681 |
++++ b/arch/powerpc/platforms/4xx/uic.c |
682 |
+@@ -154,6 +154,7 @@ static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type) |
683 |
+ |
684 |
+ mtdcr(uic->dcrbase + UIC_PR, pr); |
685 |
+ mtdcr(uic->dcrbase + UIC_TR, tr); |
686 |
++ mtdcr(uic->dcrbase + UIC_SR, ~mask); |
687 |
+ |
688 |
+ raw_spin_unlock_irqrestore(&uic->lock, flags); |
689 |
+ |
690 |
+diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c |
691 |
+index 0c48c8964783..50e7aee3c7f3 100644 |
692 |
+--- a/arch/powerpc/platforms/pseries/mobility.c |
693 |
++++ b/arch/powerpc/platforms/pseries/mobility.c |
694 |
+@@ -6,6 +6,7 @@ |
695 |
+ * Copyright (C) 2010 IBM Corporation |
696 |
+ */ |
697 |
+ |
698 |
++#include <linux/cpu.h> |
699 |
+ #include <linux/kernel.h> |
700 |
+ #include <linux/kobject.h> |
701 |
+ #include <linux/smp.h> |
702 |
+@@ -335,11 +336,19 @@ void post_mobility_fixup(void) |
703 |
+ if (rc) |
704 |
+ printk(KERN_ERR "Post-mobility activate-fw failed: %d\n", rc); |
705 |
+ |
706 |
++ /* |
707 |
++ * We don't want CPUs to go online/offline while the device |
708 |
++ * tree is being updated. |
709 |
++ */ |
710 |
++ cpus_read_lock(); |
711 |
++ |
712 |
+ rc = pseries_devicetree_update(MIGRATION_SCOPE); |
713 |
+ if (rc) |
714 |
+ printk(KERN_ERR "Post-mobility device tree update " |
715 |
+ "failed: %d\n", rc); |
716 |
+ |
717 |
++ cpus_read_unlock(); |
718 |
++ |
719 |
+ /* Possibly switch to a new RFI flush type */ |
720 |
+ pseries_setup_rfi_flush(); |
721 |
+ |
722 |
+diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c |
723 |
+index 082c7e1c20f0..1cdb39575eae 100644 |
724 |
+--- a/arch/powerpc/sysdev/xive/common.c |
725 |
++++ b/arch/powerpc/sysdev/xive/common.c |
726 |
+@@ -479,7 +479,7 @@ static int xive_find_target_in_mask(const struct cpumask *mask, |
727 |
+ * Now go through the entire mask until we find a valid |
728 |
+ * target. |
729 |
+ */ |
730 |
+- for (;;) { |
731 |
++ do { |
732 |
+ /* |
733 |
+ * We re-check online as the fallback case passes us |
734 |
+ * an untested affinity mask |
735 |
+@@ -487,12 +487,11 @@ static int xive_find_target_in_mask(const struct cpumask *mask, |
736 |
+ if (cpu_online(cpu) && xive_try_pick_target(cpu)) |
737 |
+ return cpu; |
738 |
+ cpu = cpumask_next(cpu, mask); |
739 |
+- if (cpu == first) |
740 |
+- break; |
741 |
+ /* Wrap around */ |
742 |
+ if (cpu >= nr_cpu_ids) |
743 |
+ cpu = cpumask_first(mask); |
744 |
+- } |
745 |
++ } while (cpu != first); |
746 |
++ |
747 |
+ return -1; |
748 |
+ } |
749 |
+ |
750 |
+diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c |
751 |
+index d0620d762a5a..4a721fd62406 100644 |
752 |
+--- a/arch/powerpc/xmon/xmon.c |
753 |
++++ b/arch/powerpc/xmon/xmon.c |
754 |
+@@ -465,8 +465,10 @@ static int xmon_core(struct pt_regs *regs, int fromipi) |
755 |
+ local_irq_save(flags); |
756 |
+ hard_irq_disable(); |
757 |
+ |
758 |
+- tracing_enabled = tracing_is_on(); |
759 |
+- tracing_off(); |
760 |
++ if (!fromipi) { |
761 |
++ tracing_enabled = tracing_is_on(); |
762 |
++ tracing_off(); |
763 |
++ } |
764 |
+ |
765 |
+ bp = in_breakpoint_table(regs->nip, &offset); |
766 |
+ if (bp != NULL) { |
767 |
+diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h |
768 |
+index c28e37a344ad..ac0561960c52 100644 |
769 |
+--- a/arch/sh/include/asm/io.h |
770 |
++++ b/arch/sh/include/asm/io.h |
771 |
+@@ -369,7 +369,11 @@ static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; } |
772 |
+ |
773 |
+ #define ioremap_nocache ioremap |
774 |
+ #define ioremap_uc ioremap |
775 |
+-#define iounmap __iounmap |
776 |
++ |
777 |
++static inline void iounmap(void __iomem *addr) |
778 |
++{ |
779 |
++ __iounmap(addr); |
780 |
++} |
781 |
+ |
782 |
+ /* |
783 |
+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem |
784 |
+diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h |
785 |
+index 9f4b4bb78120..00cefd33afdd 100644 |
786 |
+--- a/arch/um/include/asm/mmu_context.h |
787 |
++++ b/arch/um/include/asm/mmu_context.h |
788 |
+@@ -52,7 +52,7 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new) |
789 |
+ * when the new ->mm is used for the first time. |
790 |
+ */ |
791 |
+ __switch_mm(&new->context.id); |
792 |
+- down_write(&new->mmap_sem); |
793 |
++ down_write_nested(&new->mmap_sem, 1); |
794 |
+ uml_setup_stubs(new); |
795 |
+ up_write(&new->mmap_sem); |
796 |
+ } |
797 |
+diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h |
798 |
+index d213ec5c3766..f0b0c90dd398 100644 |
799 |
+--- a/arch/x86/include/uapi/asm/vmx.h |
800 |
++++ b/arch/x86/include/uapi/asm/vmx.h |
801 |
+@@ -146,7 +146,6 @@ |
802 |
+ |
803 |
+ #define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1 |
804 |
+ #define VMX_ABORT_LOAD_HOST_PDPTE_FAIL 2 |
805 |
+-#define VMX_ABORT_VMCS_CORRUPTED 3 |
806 |
+ #define VMX_ABORT_LOAD_HOST_MSR_FAIL 4 |
807 |
+ |
808 |
+ #endif /* _UAPIVMX_H */ |
809 |
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
810 |
+index 66ca906aa790..801ecd1c3fd5 100644 |
811 |
+--- a/arch/x86/kernel/cpu/bugs.c |
812 |
++++ b/arch/x86/kernel/cpu/bugs.c |
813 |
+@@ -1226,7 +1226,7 @@ static ssize_t l1tf_show_state(char *buf) |
814 |
+ |
815 |
+ static ssize_t mds_show_state(char *buf) |
816 |
+ { |
817 |
+- if (!hypervisor_is_type(X86_HYPER_NATIVE)) { |
818 |
++ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { |
819 |
+ return sprintf(buf, "%s; SMT Host state unknown\n", |
820 |
+ mds_strings[mds_mitigation]); |
821 |
+ } |
822 |
+diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c |
823 |
+index 4f36d3241faf..2d6898c2cb64 100644 |
824 |
+--- a/arch/x86/kernel/stacktrace.c |
825 |
++++ b/arch/x86/kernel/stacktrace.c |
826 |
+@@ -100,7 +100,7 @@ copy_stack_frame(const void __user *fp, struct stack_frame_user *frame) |
827 |
+ { |
828 |
+ int ret; |
829 |
+ |
830 |
+- if (!access_ok(fp, sizeof(*frame))) |
831 |
++ if (__range_not_ok(fp, sizeof(*frame), TASK_SIZE)) |
832 |
+ return 0; |
833 |
+ |
834 |
+ ret = 1; |
835 |
+diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c |
836 |
+index 8eb67a670b10..653b7f617b61 100644 |
837 |
+--- a/arch/x86/kernel/sysfb_efi.c |
838 |
++++ b/arch/x86/kernel/sysfb_efi.c |
839 |
+@@ -230,9 +230,55 @@ static const struct dmi_system_id efifb_dmi_system_table[] __initconst = { |
840 |
+ {}, |
841 |
+ }; |
842 |
+ |
843 |
++/* |
844 |
++ * Some devices have a portrait LCD but advertise a landscape resolution (and |
845 |
++ * pitch). We simply swap width and height for these devices so that we can |
846 |
++ * correctly deal with some of them coming with multiple resolutions. |
847 |
++ */ |
848 |
++static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = { |
849 |
++ { |
850 |
++ /* |
851 |
++ * Lenovo MIIX310-10ICR, only some batches have the troublesome |
852 |
++ * 800x1280 portrait screen. Luckily the portrait version has |
853 |
++ * its own BIOS version, so we match on that. |
854 |
++ */ |
855 |
++ .matches = { |
856 |
++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), |
857 |
++ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "MIIX 310-10ICR"), |
858 |
++ DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1HCN44WW"), |
859 |
++ }, |
860 |
++ }, |
861 |
++ { |
862 |
++ /* Lenovo MIIX 320-10ICR with 800x1280 portrait screen */ |
863 |
++ .matches = { |
864 |
++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), |
865 |
++ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, |
866 |
++ "Lenovo MIIX 320-10ICR"), |
867 |
++ }, |
868 |
++ }, |
869 |
++ { |
870 |
++ /* Lenovo D330 with 800x1280 or 1200x1920 portrait screen */ |
871 |
++ .matches = { |
872 |
++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), |
873 |
++ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, |
874 |
++ "Lenovo ideapad D330-10IGM"), |
875 |
++ }, |
876 |
++ }, |
877 |
++ {}, |
878 |
++}; |
879 |
++ |
880 |
+ __init void sysfb_apply_efi_quirks(void) |
881 |
+ { |
882 |
+ if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || |
883 |
+ !(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS)) |
884 |
+ dmi_check_system(efifb_dmi_system_table); |
885 |
++ |
886 |
++ if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI && |
887 |
++ dmi_check_system(efifb_dmi_swap_width_height)) { |
888 |
++ u16 temp = screen_info.lfb_width; |
889 |
++ |
890 |
++ screen_info.lfb_width = screen_info.lfb_height; |
891 |
++ screen_info.lfb_height = temp; |
892 |
++ screen_info.lfb_linelength = 4 * screen_info.lfb_width; |
893 |
++ } |
894 |
+ } |
895 |
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c |
896 |
+index b101127e13b6..ef6575ab60ed 100644 |
897 |
+--- a/arch/x86/kvm/vmx/nested.c |
898 |
++++ b/arch/x86/kvm/vmx/nested.c |
899 |
+@@ -91,6 +91,10 @@ static void init_vmcs_shadow_fields(void) |
900 |
+ pr_err("Missing field from shadow_read_write_field %x\n", |
901 |
+ field + 1); |
902 |
+ |
903 |
++ WARN_ONCE(field >= GUEST_ES_AR_BYTES && |
904 |
++ field <= GUEST_TR_AR_BYTES, |
905 |
++ "Update vmcs12_write_any() to expose AR_BYTES RW"); |
906 |
++ |
907 |
+ /* |
908 |
+ * PML and the preemption timer can be emulated, but the |
909 |
+ * processor cannot vmwrite to fields that don't exist |
910 |
+@@ -2969,6 +2973,25 @@ int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry) |
911 |
+ !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) |
912 |
+ vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); |
913 |
+ |
914 |
++ /* |
915 |
++ * Overwrite vmcs01.GUEST_CR3 with L1's CR3 if EPT is disabled *and* |
916 |
++ * nested early checks are disabled. In the event of a "late" VM-Fail, |
917 |
++ * i.e. a VM-Fail detected by hardware but not KVM, KVM must unwind its |
918 |
++ * software model to the pre-VMEntry host state. When EPT is disabled, |
919 |
++ * GUEST_CR3 holds KVM's shadow CR3, not L1's "real" CR3, which causes |
920 |
++ * nested_vmx_restore_host_state() to corrupt vcpu->arch.cr3. Stuffing |
921 |
++ * vmcs01.GUEST_CR3 results in the unwind naturally setting arch.cr3 to |
922 |
++ * the correct value. Smashing vmcs01.GUEST_CR3 is safe because nested |
923 |
++ * VM-Exits, and the unwind, reset KVM's MMU, i.e. vmcs01.GUEST_CR3 is |
924 |
++ * guaranteed to be overwritten with a shadow CR3 prior to re-entering |
925 |
++ * L1. Don't stuff vmcs01.GUEST_CR3 when using nested early checks as |
926 |
++ * KVM modifies vcpu->arch.cr3 if and only if the early hardware checks |
927 |
++ * pass, and early VM-Fails do not reset KVM's MMU, i.e. the VM-Fail |
928 |
++ * path would need to manually save/restore vmcs01.GUEST_CR3. |
929 |
++ */ |
930 |
++ if (!enable_ept && !nested_early_check) |
931 |
++ vmcs_writel(GUEST_CR3, vcpu->arch.cr3); |
932 |
++ |
933 |
+ vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); |
934 |
+ |
935 |
+ prepare_vmcs02_early(vmx, vmcs12); |
936 |
+@@ -3780,18 +3803,8 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu) |
937 |
+ vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW)); |
938 |
+ |
939 |
+ nested_ept_uninit_mmu_context(vcpu); |
940 |
+- |
941 |
+- /* |
942 |
+- * This is only valid if EPT is in use, otherwise the vmcs01 GUEST_CR3 |
943 |
+- * points to shadow pages! Fortunately we only get here after a WARN_ON |
944 |
+- * if EPT is disabled, so a VMabort is perfectly fine. |
945 |
+- */ |
946 |
+- if (enable_ept) { |
947 |
+- vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); |
948 |
+- __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); |
949 |
+- } else { |
950 |
+- nested_vmx_abort(vcpu, VMX_ABORT_VMCS_CORRUPTED); |
951 |
+- } |
952 |
++ vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); |
953 |
++ __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); |
954 |
+ |
955 |
+ /* |
956 |
+ * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs |
957 |
+@@ -3799,7 +3812,8 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu) |
958 |
+ * VMFail, like everything else we just need to ensure our |
959 |
+ * software model is up-to-date. |
960 |
+ */ |
961 |
+- ept_save_pdptrs(vcpu); |
962 |
++ if (enable_ept) |
963 |
++ ept_save_pdptrs(vcpu); |
964 |
+ |
965 |
+ kvm_mmu_reset_context(vcpu); |
966 |
+ |
967 |
+@@ -4013,7 +4027,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, |
968 |
+ * #UD or #GP. |
969 |
+ */ |
970 |
+ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, |
971 |
+- u32 vmx_instruction_info, bool wr, gva_t *ret) |
972 |
++ u32 vmx_instruction_info, bool wr, int len, gva_t *ret) |
973 |
+ { |
974 |
+ gva_t off; |
975 |
+ bool exn; |
976 |
+@@ -4120,7 +4134,7 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, |
977 |
+ */ |
978 |
+ if (!(s.base == 0 && s.limit == 0xffffffff && |
979 |
+ ((s.type & 8) || !(s.type & 4)))) |
980 |
+- exn = exn || (off + sizeof(u64) > s.limit); |
981 |
++ exn = exn || ((u64)off + len - 1 > s.limit); |
982 |
+ } |
983 |
+ if (exn) { |
984 |
+ kvm_queue_exception_e(vcpu, |
985 |
+@@ -4139,7 +4153,8 @@ static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer) |
986 |
+ struct x86_exception e; |
987 |
+ |
988 |
+ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), |
989 |
+- vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva)) |
990 |
++ vmcs_read32(VMX_INSTRUCTION_INFO), false, |
991 |
++ sizeof(*vmpointer), &gva)) |
992 |
+ return 1; |
993 |
+ |
994 |
+ if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) { |
995 |
+@@ -4390,6 +4405,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu) |
996 |
+ u64 field_value; |
997 |
+ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
998 |
+ u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); |
999 |
++ int len; |
1000 |
+ gva_t gva = 0; |
1001 |
+ struct vmcs12 *vmcs12; |
1002 |
+ |
1003 |
+@@ -4427,12 +4443,12 @@ static int handle_vmread(struct kvm_vcpu *vcpu) |
1004 |
+ kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf), |
1005 |
+ field_value); |
1006 |
+ } else { |
1007 |
++ len = is_64_bit_mode(vcpu) ? 8 : 4; |
1008 |
+ if (get_vmx_mem_address(vcpu, exit_qualification, |
1009 |
+- vmx_instruction_info, true, &gva)) |
1010 |
++ vmx_instruction_info, true, len, &gva)) |
1011 |
+ return 1; |
1012 |
+ /* _system ok, nested_vmx_check_permission has verified cpl=0 */ |
1013 |
+- kvm_write_guest_virt_system(vcpu, gva, &field_value, |
1014 |
+- (is_long_mode(vcpu) ? 8 : 4), NULL); |
1015 |
++ kvm_write_guest_virt_system(vcpu, gva, &field_value, len, NULL); |
1016 |
+ } |
1017 |
+ |
1018 |
+ return nested_vmx_succeed(vcpu); |
1019 |
+@@ -4442,6 +4458,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu) |
1020 |
+ static int handle_vmwrite(struct kvm_vcpu *vcpu) |
1021 |
+ { |
1022 |
+ unsigned long field; |
1023 |
++ int len; |
1024 |
+ gva_t gva; |
1025 |
+ struct vcpu_vmx *vmx = to_vmx(vcpu); |
1026 |
+ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
1027 |
+@@ -4467,11 +4484,11 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) |
1028 |
+ field_value = kvm_register_readl(vcpu, |
1029 |
+ (((vmx_instruction_info) >> 3) & 0xf)); |
1030 |
+ else { |
1031 |
++ len = is_64_bit_mode(vcpu) ? 8 : 4; |
1032 |
+ if (get_vmx_mem_address(vcpu, exit_qualification, |
1033 |
+- vmx_instruction_info, false, &gva)) |
1034 |
++ vmx_instruction_info, false, len, &gva)) |
1035 |
+ return 1; |
1036 |
+- if (kvm_read_guest_virt(vcpu, gva, &field_value, |
1037 |
+- (is_64_bit_mode(vcpu) ? 8 : 4), &e)) { |
1038 |
++ if (kvm_read_guest_virt(vcpu, gva, &field_value, len, &e)) { |
1039 |
+ kvm_inject_page_fault(vcpu, &e); |
1040 |
+ return 1; |
1041 |
+ } |
1042 |
+@@ -4500,6 +4517,17 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) |
1043 |
+ vmcs12 = get_shadow_vmcs12(vcpu); |
1044 |
+ } |
1045 |
+ |
1046 |
++ /* |
1047 |
++ * Some Intel CPUs intentionally drop the reserved bits of the AR byte |
1048 |
++ * fields on VMWRITE. Emulate this behavior to ensure consistent KVM |
1049 |
++ * behavior regardless of the underlying hardware, e.g. if an AR_BYTE |
1050 |
++ * field is intercepted for VMWRITE but not VMREAD (in L1), then VMREAD |
1051 |
++ * from L1 will return a different value than VMREAD from L2 (L1 sees |
1052 |
++ * the stripped down value, L2 sees the full value as stored by KVM). |
1053 |
++ */ |
1054 |
++ if (field >= GUEST_ES_AR_BYTES && field <= GUEST_TR_AR_BYTES) |
1055 |
++ field_value &= 0x1f0ff; |
1056 |
++ |
1057 |
+ if (vmcs12_write_any(vmcs12, field, field_value) < 0) |
1058 |
+ return nested_vmx_failValid(vcpu, |
1059 |
+ VMXERR_UNSUPPORTED_VMCS_COMPONENT); |
1060 |
+@@ -4619,7 +4647,8 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu) |
1061 |
+ if (unlikely(to_vmx(vcpu)->nested.hv_evmcs)) |
1062 |
+ return 1; |
1063 |
+ |
1064 |
+- if (get_vmx_mem_address(vcpu, exit_qual, instr_info, true, &gva)) |
1065 |
++ if (get_vmx_mem_address(vcpu, exit_qual, instr_info, |
1066 |
++ true, sizeof(gpa_t), &gva)) |
1067 |
+ return 1; |
1068 |
+ /* *_system ok, nested_vmx_check_permission has verified cpl=0 */ |
1069 |
+ if (kvm_write_guest_virt_system(vcpu, gva, (void *)¤t_vmptr, |
1070 |
+@@ -4665,7 +4694,7 @@ static int handle_invept(struct kvm_vcpu *vcpu) |
1071 |
+ * operand is read even if it isn't needed (e.g., for type==global) |
1072 |
+ */ |
1073 |
+ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), |
1074 |
+- vmx_instruction_info, false, &gva)) |
1075 |
++ vmx_instruction_info, false, sizeof(operand), &gva)) |
1076 |
+ return 1; |
1077 |
+ if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { |
1078 |
+ kvm_inject_page_fault(vcpu, &e); |
1079 |
+@@ -4727,7 +4756,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) |
1080 |
+ * operand is read even if it isn't needed (e.g., for type==global) |
1081 |
+ */ |
1082 |
+ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), |
1083 |
+- vmx_instruction_info, false, &gva)) |
1084 |
++ vmx_instruction_info, false, sizeof(operand), &gva)) |
1085 |
+ return 1; |
1086 |
+ if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { |
1087 |
+ kvm_inject_page_fault(vcpu, &e); |
1088 |
+@@ -5753,14 +5782,6 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *)) |
1089 |
+ { |
1090 |
+ int i; |
1091 |
+ |
1092 |
+- /* |
1093 |
+- * Without EPT it is not possible to restore L1's CR3 and PDPTR on |
1094 |
+- * VMfail, because they are not available in vmcs01. Just always |
1095 |
+- * use hardware checks. |
1096 |
+- */ |
1097 |
+- if (!enable_ept) |
1098 |
+- nested_early_check = 1; |
1099 |
+- |
1100 |
+ if (!cpu_has_vmx_shadow_vmcs()) |
1101 |
+ enable_shadow_vmcs = 0; |
1102 |
+ if (enable_shadow_vmcs) { |
1103 |
+diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h |
1104 |
+index e847ff1019a2..29d205bb4e4f 100644 |
1105 |
+--- a/arch/x86/kvm/vmx/nested.h |
1106 |
++++ b/arch/x86/kvm/vmx/nested.h |
1107 |
+@@ -21,7 +21,7 @@ void nested_sync_from_vmcs12(struct kvm_vcpu *vcpu); |
1108 |
+ int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); |
1109 |
+ int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata); |
1110 |
+ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, |
1111 |
+- u32 vmx_instruction_info, bool wr, gva_t *ret); |
1112 |
++ u32 vmx_instruction_info, bool wr, int len, gva_t *ret); |
1113 |
+ |
1114 |
+ static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu) |
1115 |
+ { |
1116 |
+diff --git a/arch/x86/kvm/vmx/vmcs_shadow_fields.h b/arch/x86/kvm/vmx/vmcs_shadow_fields.h |
1117 |
+index 132432f375c2..97dd5295be31 100644 |
1118 |
+--- a/arch/x86/kvm/vmx/vmcs_shadow_fields.h |
1119 |
++++ b/arch/x86/kvm/vmx/vmcs_shadow_fields.h |
1120 |
+@@ -40,14 +40,14 @@ SHADOW_FIELD_RO(VM_EXIT_INSTRUCTION_LEN) |
1121 |
+ SHADOW_FIELD_RO(IDT_VECTORING_INFO_FIELD) |
1122 |
+ SHADOW_FIELD_RO(IDT_VECTORING_ERROR_CODE) |
1123 |
+ SHADOW_FIELD_RO(VM_EXIT_INTR_ERROR_CODE) |
1124 |
++SHADOW_FIELD_RO(GUEST_CS_AR_BYTES) |
1125 |
++SHADOW_FIELD_RO(GUEST_SS_AR_BYTES) |
1126 |
+ SHADOW_FIELD_RW(CPU_BASED_VM_EXEC_CONTROL) |
1127 |
+ SHADOW_FIELD_RW(EXCEPTION_BITMAP) |
1128 |
+ SHADOW_FIELD_RW(VM_ENTRY_EXCEPTION_ERROR_CODE) |
1129 |
+ SHADOW_FIELD_RW(VM_ENTRY_INTR_INFO_FIELD) |
1130 |
+ SHADOW_FIELD_RW(VM_ENTRY_INSTRUCTION_LEN) |
1131 |
+ SHADOW_FIELD_RW(TPR_THRESHOLD) |
1132 |
+-SHADOW_FIELD_RW(GUEST_CS_AR_BYTES) |
1133 |
+-SHADOW_FIELD_RW(GUEST_SS_AR_BYTES) |
1134 |
+ SHADOW_FIELD_RW(GUEST_INTERRUPTIBILITY_INFO) |
1135 |
+ SHADOW_FIELD_RW(VMX_PREEMPTION_TIMER_VALUE) |
1136 |
+ |
1137 |
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c |
1138 |
+index 306ed28569c0..924c2a79e4a9 100644 |
1139 |
+--- a/arch/x86/kvm/vmx/vmx.c |
1140 |
++++ b/arch/x86/kvm/vmx/vmx.c |
1141 |
+@@ -5349,7 +5349,8 @@ static int handle_invpcid(struct kvm_vcpu *vcpu) |
1142 |
+ * is read even if it isn't needed (e.g., for type==all) |
1143 |
+ */ |
1144 |
+ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), |
1145 |
+- vmx_instruction_info, false, &gva)) |
1146 |
++ vmx_instruction_info, false, |
1147 |
++ sizeof(operand), &gva)) |
1148 |
+ return 1; |
1149 |
+ |
1150 |
+ if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { |
1151 |
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c |
1152 |
+index a4eceb0b5dde..a8ad3a4d86b1 100644 |
1153 |
+--- a/arch/x86/kvm/x86.c |
1154 |
++++ b/arch/x86/kvm/x86.c |
1155 |
+@@ -3264,6 +3264,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
1156 |
+ |
1157 |
+ kvm_x86_ops->vcpu_load(vcpu, cpu); |
1158 |
+ |
1159 |
++ fpregs_assert_state_consistent(); |
1160 |
++ if (test_thread_flag(TIF_NEED_FPU_LOAD)) |
1161 |
++ switch_fpu_return(); |
1162 |
++ |
1163 |
+ /* Apply any externally detected TSC adjustments (due to suspend) */ |
1164 |
+ if (unlikely(vcpu->arch.tsc_offset_adjustment)) { |
1165 |
+ adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); |
1166 |
+@@ -7955,9 +7959,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) |
1167 |
+ wait_lapic_expire(vcpu); |
1168 |
+ guest_enter_irqoff(); |
1169 |
+ |
1170 |
+- fpregs_assert_state_consistent(); |
1171 |
+- if (test_thread_flag(TIF_NEED_FPU_LOAD)) |
1172 |
+- switch_fpu_return(); |
1173 |
++ /* The preempt notifier should have taken care of the FPU already. */ |
1174 |
++ WARN_ON_ONCE(test_thread_flag(TIF_NEED_FPU_LOAD)); |
1175 |
+ |
1176 |
+ if (unlikely(vcpu->arch.switch_db_regs)) { |
1177 |
+ set_debugreg(0, 7); |
1178 |
+diff --git a/block/bio-integrity.c b/block/bio-integrity.c |
1179 |
+index 4db620849515..fb95dbb21dd8 100644 |
1180 |
+--- a/block/bio-integrity.c |
1181 |
++++ b/block/bio-integrity.c |
1182 |
+@@ -276,8 +276,12 @@ bool bio_integrity_prep(struct bio *bio) |
1183 |
+ ret = bio_integrity_add_page(bio, virt_to_page(buf), |
1184 |
+ bytes, offset); |
1185 |
+ |
1186 |
+- if (ret == 0) |
1187 |
+- return false; |
1188 |
++ if (ret == 0) { |
1189 |
++ printk(KERN_ERR "could not attach integrity payload\n"); |
1190 |
++ kfree(buf); |
1191 |
++ status = BLK_STS_RESOURCE; |
1192 |
++ goto err_end_io; |
1193 |
++ } |
1194 |
+ |
1195 |
+ if (ret < bytes) |
1196 |
+ break; |
1197 |
+diff --git a/block/blk-core.c b/block/blk-core.c |
1198 |
+index 8340f69670d8..5183fca0818a 100644 |
1199 |
+--- a/block/blk-core.c |
1200 |
++++ b/block/blk-core.c |
1201 |
+@@ -117,6 +117,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq) |
1202 |
+ rq->internal_tag = -1; |
1203 |
+ rq->start_time_ns = ktime_get_ns(); |
1204 |
+ rq->part = NULL; |
1205 |
++ refcount_set(&rq->ref, 1); |
1206 |
+ } |
1207 |
+ EXPORT_SYMBOL(blk_rq_init); |
1208 |
+ |
1209 |
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c |
1210 |
+index 38a59a630cd4..dc1c83eafc22 100644 |
1211 |
+--- a/drivers/android/binder.c |
1212 |
++++ b/drivers/android/binder.c |
1213 |
+@@ -2988,7 +2988,7 @@ static void binder_transaction(struct binder_proc *proc, |
1214 |
+ else |
1215 |
+ return_error = BR_DEAD_REPLY; |
1216 |
+ mutex_unlock(&context->context_mgr_node_lock); |
1217 |
+- if (target_node && target_proc == proc) { |
1218 |
++ if (target_node && target_proc->pid == proc->pid) { |
1219 |
+ binder_user_error("%d:%d got transaction to context manager from process owning it\n", |
1220 |
+ proc->pid, thread->pid); |
1221 |
+ return_error = BR_FAILED_REPLY; |
1222 |
+@@ -3239,7 +3239,8 @@ static void binder_transaction(struct binder_proc *proc, |
1223 |
+ buffer_offset = off_start_offset; |
1224 |
+ off_end_offset = off_start_offset + tr->offsets_size; |
1225 |
+ sg_buf_offset = ALIGN(off_end_offset, sizeof(void *)); |
1226 |
+- sg_buf_end_offset = sg_buf_offset + extra_buffers_size; |
1227 |
++ sg_buf_end_offset = sg_buf_offset + extra_buffers_size - |
1228 |
++ ALIGN(secctx_sz, sizeof(u64)); |
1229 |
+ off_min = 0; |
1230 |
+ for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; |
1231 |
+ buffer_offset += sizeof(binder_size_t)) { |
1232 |
+diff --git a/drivers/base/core.c b/drivers/base/core.c |
1233 |
+index fd7511e04e62..eaf3aa0cb803 100644 |
1234 |
+--- a/drivers/base/core.c |
1235 |
++++ b/drivers/base/core.c |
1236 |
+@@ -2211,6 +2211,24 @@ void put_device(struct device *dev) |
1237 |
+ } |
1238 |
+ EXPORT_SYMBOL_GPL(put_device); |
1239 |
+ |
1240 |
++bool kill_device(struct device *dev) |
1241 |
++{ |
1242 |
++ /* |
1243 |
++ * Require the device lock and set the "dead" flag to guarantee that |
1244 |
++ * the update behavior is consistent with the other bitfields near |
1245 |
++ * it and that we cannot have an asynchronous probe routine trying |
1246 |
++ * to run while we are tearing out the bus/class/sysfs from |
1247 |
++ * underneath the device. |
1248 |
++ */ |
1249 |
++ lockdep_assert_held(&dev->mutex); |
1250 |
++ |
1251 |
++ if (dev->p->dead) |
1252 |
++ return false; |
1253 |
++ dev->p->dead = true; |
1254 |
++ return true; |
1255 |
++} |
1256 |
++EXPORT_SYMBOL_GPL(kill_device); |
1257 |
++ |
1258 |
+ /** |
1259 |
+ * device_del - delete device from system. |
1260 |
+ * @dev: device. |
1261 |
+@@ -2230,15 +2248,8 @@ void device_del(struct device *dev) |
1262 |
+ struct kobject *glue_dir = NULL; |
1263 |
+ struct class_interface *class_intf; |
1264 |
+ |
1265 |
+- /* |
1266 |
+- * Hold the device lock and set the "dead" flag to guarantee that |
1267 |
+- * the update behavior is consistent with the other bitfields near |
1268 |
+- * it and that we cannot have an asynchronous probe routine trying |
1269 |
+- * to run while we are tearing out the bus/class/sysfs from |
1270 |
+- * underneath the device. |
1271 |
+- */ |
1272 |
+ device_lock(dev); |
1273 |
+- dev->p->dead = true; |
1274 |
++ kill_device(dev); |
1275 |
+ device_unlock(dev); |
1276 |
+ |
1277 |
+ /* Notify clients of device removal. This call must come |
1278 |
+diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c |
1279 |
+index 5c39f20378b8..9ac6671bb514 100644 |
1280 |
+--- a/drivers/char/hpet.c |
1281 |
++++ b/drivers/char/hpet.c |
1282 |
+@@ -567,8 +567,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets, |
1283 |
+ unsigned long long m; |
1284 |
+ |
1285 |
+ m = hpets->hp_tick_freq + (dis >> 1); |
1286 |
+- do_div(m, dis); |
1287 |
+- return (unsigned long)m; |
1288 |
++ return div64_ul(m, dis); |
1289 |
+ } |
1290 |
+ |
1291 |
+ static int |
1292 |
+diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c |
1293 |
+index f2a91c4d8cab..0cd849675d99 100644 |
1294 |
+--- a/drivers/char/ipmi/ipmi_si_platform.c |
1295 |
++++ b/drivers/char/ipmi/ipmi_si_platform.c |
1296 |
+@@ -19,6 +19,7 @@ |
1297 |
+ #include "ipmi_si.h" |
1298 |
+ #include "ipmi_dmi.h" |
1299 |
+ |
1300 |
++static bool platform_registered; |
1301 |
+ static bool si_tryplatform = true; |
1302 |
+ #ifdef CONFIG_ACPI |
1303 |
+ static bool si_tryacpi = true; |
1304 |
+@@ -469,9 +470,12 @@ void ipmi_si_platform_init(void) |
1305 |
+ int rv = platform_driver_register(&ipmi_platform_driver); |
1306 |
+ if (rv) |
1307 |
+ pr_err("Unable to register driver: %d\n", rv); |
1308 |
++ else |
1309 |
++ platform_registered = true; |
1310 |
+ } |
1311 |
+ |
1312 |
+ void ipmi_si_platform_shutdown(void) |
1313 |
+ { |
1314 |
+- platform_driver_unregister(&ipmi_platform_driver); |
1315 |
++ if (platform_registered) |
1316 |
++ platform_driver_unregister(&ipmi_platform_driver); |
1317 |
+ } |
1318 |
+diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c |
1319 |
+index cf8156d6bc07..305fa5054274 100644 |
1320 |
+--- a/drivers/char/ipmi/ipmi_ssif.c |
1321 |
++++ b/drivers/char/ipmi/ipmi_ssif.c |
1322 |
+@@ -303,6 +303,7 @@ struct ssif_info { |
1323 |
+ ((unsigned int) atomic_read(&(ssif)->stats[SSIF_STAT_ ## stat])) |
1324 |
+ |
1325 |
+ static bool initialized; |
1326 |
++static bool platform_registered; |
1327 |
+ |
1328 |
+ static void return_hosed_msg(struct ssif_info *ssif_info, |
1329 |
+ struct ipmi_smi_msg *msg); |
1330 |
+@@ -2088,6 +2089,8 @@ static int init_ipmi_ssif(void) |
1331 |
+ rv = platform_driver_register(&ipmi_driver); |
1332 |
+ if (rv) |
1333 |
+ pr_err("Unable to register driver: %d\n", rv); |
1334 |
++ else |
1335 |
++ platform_registered = true; |
1336 |
+ } |
1337 |
+ |
1338 |
+ ssif_i2c_driver.address_list = ssif_address_list(); |
1339 |
+@@ -2111,7 +2114,7 @@ static void cleanup_ipmi_ssif(void) |
1340 |
+ |
1341 |
+ kfree(ssif_i2c_driver.address_list); |
1342 |
+ |
1343 |
+- if (ssif_trydmi) |
1344 |
++ if (ssif_trydmi && platform_registered) |
1345 |
+ platform_driver_unregister(&ipmi_driver); |
1346 |
+ |
1347 |
+ free_ssif_clients(); |
1348 |
+diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig |
1349 |
+index 8072c195d831..dd414250e77e 100644 |
1350 |
+--- a/drivers/fpga/Kconfig |
1351 |
++++ b/drivers/fpga/Kconfig |
1352 |
+@@ -40,6 +40,7 @@ config ALTERA_PR_IP_CORE_PLAT |
1353 |
+ config FPGA_MGR_ALTERA_PS_SPI |
1354 |
+ tristate "Altera FPGA Passive Serial over SPI" |
1355 |
+ depends on SPI |
1356 |
++ select BITREVERSE |
1357 |
+ help |
1358 |
+ FPGA manager driver support for Altera Arria/Cyclone/Stratix |
1359 |
+ using the passive serial interface over SPI. |
1360 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c |
1361 |
+index a6e5184d436c..4b192e0ce92f 100644 |
1362 |
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c |
1363 |
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c |
1364 |
+@@ -896,6 +896,9 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, |
1365 |
+ AMDGPU_FENCE_OWNER_KFD, false); |
1366 |
+ if (ret) |
1367 |
+ goto wait_pd_fail; |
1368 |
++ ret = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv, 1); |
1369 |
++ if (ret) |
1370 |
++ goto reserve_shared_fail; |
1371 |
+ amdgpu_bo_fence(vm->root.base.bo, |
1372 |
+ &vm->process_info->eviction_fence->base, true); |
1373 |
+ amdgpu_bo_unreserve(vm->root.base.bo); |
1374 |
+@@ -909,6 +912,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, |
1375 |
+ |
1376 |
+ return 0; |
1377 |
+ |
1378 |
++reserve_shared_fail: |
1379 |
+ wait_pd_fail: |
1380 |
+ validate_pd_fail: |
1381 |
+ amdgpu_bo_unreserve(vm->root.base.bo); |
1382 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c |
1383 |
+index 72837b8c7031..c2086eb00555 100644 |
1384 |
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c |
1385 |
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c |
1386 |
+@@ -1163,6 +1163,9 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) |
1387 |
+ tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL); |
1388 |
+ WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp); |
1389 |
+ |
1390 |
++ WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8)); |
1391 |
++ WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40)); |
1392 |
++ |
1393 |
+ /* After HDP is initialized, flush HDP.*/ |
1394 |
+ adev->nbio_funcs->hdp_flush(adev, NULL); |
1395 |
+ |
1396 |
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c |
1397 |
+index ae381450601c..afbaf6f5131e 100644 |
1398 |
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c |
1399 |
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c |
1400 |
+@@ -1268,12 +1268,17 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr, |
1401 |
+ return 0; |
1402 |
+ } |
1403 |
+ |
1404 |
+-static int unmap_sdma_queues(struct device_queue_manager *dqm, |
1405 |
+- unsigned int sdma_engine) |
1406 |
++static int unmap_sdma_queues(struct device_queue_manager *dqm) |
1407 |
+ { |
1408 |
+- return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA, |
1409 |
+- KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false, |
1410 |
+- sdma_engine); |
1411 |
++ int i, retval = 0; |
1412 |
++ |
1413 |
++ for (i = 0; i < dqm->dev->device_info->num_sdma_engines; i++) { |
1414 |
++ retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA, |
1415 |
++ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false, i); |
1416 |
++ if (retval) |
1417 |
++ return retval; |
1418 |
++ } |
1419 |
++ return retval; |
1420 |
+ } |
1421 |
+ |
1422 |
+ /* dqm->lock mutex has to be locked before calling this function */ |
1423 |
+@@ -1312,10 +1317,8 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, |
1424 |
+ pr_debug("Before destroying queues, sdma queue count is : %u\n", |
1425 |
+ dqm->sdma_queue_count); |
1426 |
+ |
1427 |
+- if (dqm->sdma_queue_count > 0) { |
1428 |
+- unmap_sdma_queues(dqm, 0); |
1429 |
+- unmap_sdma_queues(dqm, 1); |
1430 |
+- } |
1431 |
++ if (dqm->sdma_queue_count > 0) |
1432 |
++ unmap_sdma_queues(dqm); |
1433 |
+ |
1434 |
+ retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE, |
1435 |
+ filter, filter_param, false, 0); |
1436 |
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c |
1437 |
+index 9dbba609450e..8fe74b821b32 100644 |
1438 |
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c |
1439 |
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c |
1440 |
+@@ -76,6 +76,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd, |
1441 |
+ struct v9_mqd *m; |
1442 |
+ struct kfd_dev *kfd = mm->dev; |
1443 |
+ |
1444 |
++ *mqd_mem_obj = NULL; |
1445 |
+ /* From V9, for CWSR, the control stack is located on the next page |
1446 |
+ * boundary after the mqd, we will use the gtt allocation function |
1447 |
+ * instead of sub-allocation function. |
1448 |
+@@ -93,8 +94,10 @@ static int init_mqd(struct mqd_manager *mm, void **mqd, |
1449 |
+ } else |
1450 |
+ retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd), |
1451 |
+ mqd_mem_obj); |
1452 |
+- if (retval != 0) |
1453 |
++ if (retval) { |
1454 |
++ kfree(*mqd_mem_obj); |
1455 |
+ return -ENOMEM; |
1456 |
++ } |
1457 |
+ |
1458 |
+ m = (struct v9_mqd *) (*mqd_mem_obj)->cpu_ptr; |
1459 |
+ addr = (*mqd_mem_obj)->gpu_addr; |
1460 |
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |
1461 |
+index ab7c5c3004ee..dc3ac66a4450 100644 |
1462 |
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |
1463 |
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |
1464 |
+@@ -2592,7 +2592,7 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev, |
1465 |
+ address->type = PLN_ADDR_TYPE_GRAPHICS; |
1466 |
+ address->grph.addr.low_part = lower_32_bits(afb->address); |
1467 |
+ address->grph.addr.high_part = upper_32_bits(afb->address); |
1468 |
+- } else { |
1469 |
++ } else if (format < SURFACE_PIXEL_FORMAT_INVALID) { |
1470 |
+ uint64_t chroma_addr = afb->address + fb->offsets[1]; |
1471 |
+ |
1472 |
+ plane_size->video.luma_size.x = 0; |
1473 |
+@@ -4627,6 +4627,13 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, |
1474 |
+ { |
1475 |
+ struct amdgpu_device *adev = dm->ddev->dev_private; |
1476 |
+ |
1477 |
++ /* |
1478 |
++ * Some of the properties below require access to state, like bpc. |
1479 |
++ * Allocate some default initial connector state with our reset helper. |
1480 |
++ */ |
1481 |
++ if (aconnector->base.funcs->reset) |
1482 |
++ aconnector->base.funcs->reset(&aconnector->base); |
1483 |
++ |
1484 |
+ aconnector->connector_id = link_index; |
1485 |
+ aconnector->dc_link = link; |
1486 |
+ aconnector->base.interlace_allowed = false; |
1487 |
+@@ -4809,9 +4816,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, |
1488 |
+ &aconnector->base, |
1489 |
+ &amdgpu_dm_connector_helper_funcs); |
1490 |
+ |
1491 |
+- if (aconnector->base.funcs->reset) |
1492 |
+- aconnector->base.funcs->reset(&aconnector->base); |
1493 |
+- |
1494 |
+ amdgpu_dm_connector_init_helper( |
1495 |
+ dm, |
1496 |
+ aconnector, |
1497 |
+@@ -4952,12 +4956,12 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, |
1498 |
+ int x, y; |
1499 |
+ int xorigin = 0, yorigin = 0; |
1500 |
+ |
1501 |
+- if (!crtc || !plane->state->fb) { |
1502 |
+- position->enable = false; |
1503 |
+- position->x = 0; |
1504 |
+- position->y = 0; |
1505 |
++ position->enable = false; |
1506 |
++ position->x = 0; |
1507 |
++ position->y = 0; |
1508 |
++ |
1509 |
++ if (!crtc || !plane->state->fb) |
1510 |
+ return 0; |
1511 |
+- } |
1512 |
+ |
1513 |
+ if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) || |
1514 |
+ (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) { |
1515 |
+@@ -4971,6 +4975,10 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, |
1516 |
+ x = plane->state->crtc_x; |
1517 |
+ y = plane->state->crtc_y; |
1518 |
+ |
1519 |
++ if (x <= -amdgpu_crtc->max_cursor_width || |
1520 |
++ y <= -amdgpu_crtc->max_cursor_height) |
1521 |
++ return 0; |
1522 |
++ |
1523 |
+ if (crtc->primary->state) { |
1524 |
+ /* avivo cursor are offset into the total surface */ |
1525 |
+ x += crtc->primary->state->src_x >> 16; |
1526 |
+@@ -6327,6 +6335,10 @@ static bool should_reset_plane(struct drm_atomic_state *state, |
1527 |
+ if (!new_crtc_state) |
1528 |
+ return true; |
1529 |
+ |
1530 |
++ /* CRTC Degamma changes currently require us to recreate planes. */ |
1531 |
++ if (new_crtc_state->color_mgmt_changed) |
1532 |
++ return true; |
1533 |
++ |
1534 |
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state)) |
1535 |
+ return true; |
1536 |
+ |
1537 |
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c |
1538 |
+index 18c775a950cc..ee6b646180b6 100644 |
1539 |
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c |
1540 |
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c |
1541 |
+@@ -1138,9 +1138,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c |
1542 |
+ const struct dc_link *link = context->streams[i]->link; |
1543 |
+ struct dc_stream_status *status; |
1544 |
+ |
1545 |
+- if (context->streams[i]->apply_seamless_boot_optimization) |
1546 |
+- context->streams[i]->apply_seamless_boot_optimization = false; |
1547 |
+- |
1548 |
+ if (!context->streams[i]->mode_changed) |
1549 |
+ continue; |
1550 |
+ |
1551 |
+@@ -1792,10 +1789,15 @@ static void commit_planes_for_stream(struct dc *dc, |
1552 |
+ if (dc->optimize_seamless_boot && surface_count > 0) { |
1553 |
+ /* Optimize seamless boot flag keeps clocks and watermarks high until |
1554 |
+ * first flip. After first flip, optimization is required to lower |
1555 |
+- * bandwidth. |
1556 |
++ * bandwidth. Important to note that it is expected UEFI will |
1557 |
++ * only light up a single display on POST, therefore we only expect |
1558 |
++ * one stream with seamless boot flag set. |
1559 |
+ */ |
1560 |
+- dc->optimize_seamless_boot = false; |
1561 |
+- dc->optimized_required = true; |
1562 |
++ if (stream->apply_seamless_boot_optimization) { |
1563 |
++ stream->apply_seamless_boot_optimization = false; |
1564 |
++ dc->optimize_seamless_boot = false; |
1565 |
++ dc->optimized_required = true; |
1566 |
++ } |
1567 |
+ } |
1568 |
+ |
1569 |
+ if (update_type == UPDATE_TYPE_FULL && !dc->optimize_seamless_boot) { |
1570 |
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c |
1571 |
+index b37ecc3ede61..a3ff33ff6da1 100644 |
1572 |
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c |
1573 |
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c |
1574 |
+@@ -960,6 +960,12 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) |
1575 |
+ |
1576 |
+ link->type = dc_connection_none; |
1577 |
+ sink_caps.signal = SIGNAL_TYPE_NONE; |
1578 |
++ /* When we unplug a passive DP-HDMI dongle connection, dongle_max_pix_clk |
1579 |
++ * is not cleared. If we emulate a DP signal on this connection, it thinks |
1580 |
++ * the dongle is still there and limits the number of modes we can emulate. |
1581 |
++ * Clear dongle_max_pix_clk on disconnect to fix this |
1582 |
++ */ |
1583 |
++ link->dongle_max_pix_clk = 0; |
1584 |
+ } |
1585 |
+ |
1586 |
+ LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p dpcd same=%d edid same=%d\n", |
1587 |
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c |
1588 |
+index 1ee544a32ebb..253311864cdd 100644 |
1589 |
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c |
1590 |
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c |
1591 |
+@@ -1624,8 +1624,7 @@ static bool decide_edp_link_settings(struct dc_link *link, struct dc_link_settin |
1592 |
+ uint32_t link_bw; |
1593 |
+ |
1594 |
+ if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14 || |
1595 |
+- link->dpcd_caps.edp_supported_link_rates_count == 0 || |
1596 |
+- link->dc->config.optimize_edp_link_rate == false) { |
1597 |
++ link->dpcd_caps.edp_supported_link_rates_count == 0) { |
1598 |
+ *link_setting = link->verified_link_cap; |
1599 |
+ return true; |
1600 |
+ } |
1601 |
+@@ -2597,7 +2596,8 @@ void detect_edp_sink_caps(struct dc_link *link) |
1602 |
+ memset(supported_link_rates, 0, sizeof(supported_link_rates)); |
1603 |
+ |
1604 |
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 && |
1605 |
+- link->dc->config.optimize_edp_link_rate) { |
1606 |
++ (link->dc->config.optimize_edp_link_rate || |
1607 |
++ link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)) { |
1608 |
+ // Read DPCD 00010h - 0001Fh 16 bytes at one shot |
1609 |
+ core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES, |
1610 |
+ supported_link_rates, sizeof(supported_link_rates)); |
1611 |
+@@ -2612,6 +2612,9 @@ void detect_edp_sink_caps(struct dc_link *link) |
1612 |
+ link_rate = linkRateInKHzToLinkRateMultiplier(link_rate_in_khz); |
1613 |
+ link->dpcd_caps.edp_supported_link_rates[link->dpcd_caps.edp_supported_link_rates_count] = link_rate; |
1614 |
+ link->dpcd_caps.edp_supported_link_rates_count++; |
1615 |
++ |
1616 |
++ if (link->reported_link_cap.link_rate < link_rate) |
1617 |
++ link->reported_link_cap.link_rate = link_rate; |
1618 |
+ } |
1619 |
+ } |
1620 |
+ } |
1621 |
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c |
1622 |
+index da96229db53a..2959c3c9390b 100644 |
1623 |
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c |
1624 |
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c |
1625 |
+@@ -473,6 +473,8 @@ void dce_abm_destroy(struct abm **abm) |
1626 |
+ { |
1627 |
+ struct dce_abm *abm_dce = TO_DCE_ABM(*abm); |
1628 |
+ |
1629 |
++ abm_dce->base.funcs->set_abm_immediate_disable(*abm); |
1630 |
++ |
1631 |
+ kfree(abm_dce); |
1632 |
+ *abm = NULL; |
1633 |
+ } |
1634 |
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c |
1635 |
+index 818536eea00a..c6a607cd0e4b 100644 |
1636 |
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c |
1637 |
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c |
1638 |
+@@ -388,6 +388,9 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu) |
1639 |
+ /* Set initialized ramping boundary value */ |
1640 |
+ REG_WRITE(MASTER_COMM_DATA_REG1, 0xFFFF); |
1641 |
+ |
1642 |
++ /* Set backlight ramping stepsize */ |
1643 |
++ REG_WRITE(MASTER_COMM_DATA_REG2, abm_gain_stepsize); |
1644 |
++ |
1645 |
+ /* Set command to initialize microcontroller */ |
1646 |
+ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, |
1647 |
+ MCP_INIT_DMCU); |
1648 |
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h |
1649 |
+index 60ce56f60ae3..5bd0df55aa5d 100644 |
1650 |
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h |
1651 |
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h |
1652 |
+@@ -263,4 +263,6 @@ struct dmcu *dcn10_dmcu_create( |
1653 |
+ |
1654 |
+ void dce_dmcu_destroy(struct dmcu **dmcu); |
1655 |
+ |
1656 |
++static const uint32_t abm_gain_stepsize = 0x0060; |
1657 |
++ |
1658 |
+ #endif /* _DCE_ABM_H_ */ |
1659 |
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c |
1660 |
+index 7ac50ab1b762..7d7e93c87c28 100644 |
1661 |
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c |
1662 |
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c |
1663 |
+@@ -242,6 +242,9 @@ static void build_prescale_params(struct ipp_prescale_params *prescale_params, |
1664 |
+ prescale_params->mode = IPP_PRESCALE_MODE_FIXED_UNSIGNED; |
1665 |
+ |
1666 |
+ switch (plane_state->format) { |
1667 |
++ case SURFACE_PIXEL_FORMAT_GRPH_RGB565: |
1668 |
++ prescale_params->scale = 0x2082; |
1669 |
++ break; |
1670 |
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: |
1671 |
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: |
1672 |
+ prescale_params->scale = 0x2020; |
1673 |
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c |
1674 |
+index 33d311cea28c..9e4d70a0055e 100644 |
1675 |
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c |
1676 |
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c |
1677 |
+@@ -23,6 +23,7 @@ |
1678 |
+ * |
1679 |
+ */ |
1680 |
+ |
1681 |
++#include <linux/delay.h> |
1682 |
+ #include "dm_services.h" |
1683 |
+ #include "core_types.h" |
1684 |
+ #include "resource.h" |
1685 |
+diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c |
1686 |
+index a1055413bade..31f867bb5afe 100644 |
1687 |
+--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c |
1688 |
++++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c |
1689 |
+@@ -1564,7 +1564,8 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, |
1690 |
+ |
1691 |
+ output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; |
1692 |
+ |
1693 |
+- if (ramp && (mapUserRamp || ramp->type != GAMMA_RGB_256)) { |
1694 |
++ if (ramp && ramp->type != GAMMA_CS_TFM_1D && |
1695 |
++ (mapUserRamp || ramp->type != GAMMA_RGB_256)) { |
1696 |
+ rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS, |
1697 |
+ sizeof(*rgb_user), |
1698 |
+ GFP_KERNEL); |
1699 |
+diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c |
1700 |
+index b86cc705138c..d8b945596b09 100644 |
1701 |
+--- a/drivers/gpu/drm/bochs/bochs_drv.c |
1702 |
++++ b/drivers/gpu/drm/bochs/bochs_drv.c |
1703 |
+@@ -7,6 +7,7 @@ |
1704 |
+ #include <linux/slab.h> |
1705 |
+ #include <drm/drm_fb_helper.h> |
1706 |
+ #include <drm/drm_probe_helper.h> |
1707 |
++#include <drm/drm_atomic_helper.h> |
1708 |
+ |
1709 |
+ #include "bochs.h" |
1710 |
+ |
1711 |
+@@ -171,6 +172,7 @@ static void bochs_pci_remove(struct pci_dev *pdev) |
1712 |
+ { |
1713 |
+ struct drm_device *dev = pci_get_drvdata(pdev); |
1714 |
+ |
1715 |
++ drm_atomic_helper_shutdown(dev); |
1716 |
+ drm_dev_unregister(dev); |
1717 |
+ bochs_unload(dev); |
1718 |
+ drm_dev_put(dev); |
1719 |
+diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c |
1720 |
+index 1211b5379df1..8e3c5e599eba 100644 |
1721 |
+--- a/drivers/gpu/drm/bridge/sii902x.c |
1722 |
++++ b/drivers/gpu/drm/bridge/sii902x.c |
1723 |
+@@ -229,10 +229,11 @@ static void sii902x_bridge_mode_set(struct drm_bridge *bridge, |
1724 |
+ struct regmap *regmap = sii902x->regmap; |
1725 |
+ u8 buf[HDMI_INFOFRAME_SIZE(AVI)]; |
1726 |
+ struct hdmi_avi_infoframe frame; |
1727 |
++ u16 pixel_clock_10kHz = adj->clock / 10; |
1728 |
+ int ret; |
1729 |
+ |
1730 |
+- buf[0] = adj->clock; |
1731 |
+- buf[1] = adj->clock >> 8; |
1732 |
++ buf[0] = pixel_clock_10kHz & 0xff; |
1733 |
++ buf[1] = pixel_clock_10kHz >> 8; |
1734 |
+ buf[2] = adj->vrefresh; |
1735 |
+ buf[3] = 0x00; |
1736 |
+ buf[4] = adj->hdisplay; |
1737 |
+diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c |
1738 |
+index 4655bb1eb88f..f59a51e19dab 100644 |
1739 |
+--- a/drivers/gpu/drm/bridge/tc358767.c |
1740 |
++++ b/drivers/gpu/drm/bridge/tc358767.c |
1741 |
+@@ -1141,6 +1141,13 @@ static int tc_connector_get_modes(struct drm_connector *connector) |
1742 |
+ struct tc_data *tc = connector_to_tc(connector); |
1743 |
+ struct edid *edid; |
1744 |
+ unsigned int count; |
1745 |
++ int ret; |
1746 |
++ |
1747 |
++ ret = tc_get_display_props(tc); |
1748 |
++ if (ret < 0) { |
1749 |
++ dev_err(tc->dev, "failed to read display props: %d\n", ret); |
1750 |
++ return 0; |
1751 |
++ } |
1752 |
+ |
1753 |
+ if (tc->panel && tc->panel->funcs && tc->panel->funcs->get_modes) { |
1754 |
+ count = tc->panel->funcs->get_modes(tc->panel); |
1755 |
+diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c |
1756 |
+index a879aac21246..3a8af9978ebd 100644 |
1757 |
+--- a/drivers/gpu/drm/bridge/ti-tfp410.c |
1758 |
++++ b/drivers/gpu/drm/bridge/ti-tfp410.c |
1759 |
+@@ -372,7 +372,8 @@ static int tfp410_fini(struct device *dev) |
1760 |
+ { |
1761 |
+ struct tfp410 *dvi = dev_get_drvdata(dev); |
1762 |
+ |
1763 |
+- cancel_delayed_work_sync(&dvi->hpd_work); |
1764 |
++ if (dvi->hpd_irq >= 0) |
1765 |
++ cancel_delayed_work_sync(&dvi->hpd_work); |
1766 |
+ |
1767 |
+ drm_bridge_remove(&dvi->bridge); |
1768 |
+ |
1769 |
+diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c |
1770 |
+index 00e743153e94..fde298d9f510 100644 |
1771 |
+--- a/drivers/gpu/drm/drm_debugfs_crc.c |
1772 |
++++ b/drivers/gpu/drm/drm_debugfs_crc.c |
1773 |
+@@ -389,12 +389,13 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame, |
1774 |
+ struct drm_crtc_crc *crc = &crtc->crc; |
1775 |
+ struct drm_crtc_crc_entry *entry; |
1776 |
+ int head, tail; |
1777 |
++ unsigned long flags; |
1778 |
+ |
1779 |
+- spin_lock(&crc->lock); |
1780 |
++ spin_lock_irqsave(&crc->lock, flags); |
1781 |
+ |
1782 |
+ /* Caller may not have noticed yet that userspace has stopped reading */ |
1783 |
+ if (!crc->entries) { |
1784 |
+- spin_unlock(&crc->lock); |
1785 |
++ spin_unlock_irqrestore(&crc->lock, flags); |
1786 |
+ return -EINVAL; |
1787 |
+ } |
1788 |
+ |
1789 |
+@@ -405,7 +406,7 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame, |
1790 |
+ bool was_overflow = crc->overflow; |
1791 |
+ |
1792 |
+ crc->overflow = true; |
1793 |
+- spin_unlock(&crc->lock); |
1794 |
++ spin_unlock_irqrestore(&crc->lock, flags); |
1795 |
+ |
1796 |
+ if (!was_overflow) |
1797 |
+ DRM_ERROR("Overflow of CRC buffer, userspace reads too slow.\n"); |
1798 |
+@@ -421,7 +422,7 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame, |
1799 |
+ head = (head + 1) & (DRM_CRC_ENTRIES_NR - 1); |
1800 |
+ crc->head = head; |
1801 |
+ |
1802 |
+- spin_unlock(&crc->lock); |
1803 |
++ spin_unlock_irqrestore(&crc->lock, flags); |
1804 |
+ |
1805 |
+ wake_up_interruptible(&crc->wq); |
1806 |
+ |
1807 |
+diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c |
1808 |
+index 1e5593575d23..6192b7b20d84 100644 |
1809 |
+--- a/drivers/gpu/drm/drm_edid_load.c |
1810 |
++++ b/drivers/gpu/drm/drm_edid_load.c |
1811 |
+@@ -278,6 +278,8 @@ struct edid *drm_load_edid_firmware(struct drm_connector *connector) |
1812 |
+ * the last one found one as a fallback. |
1813 |
+ */ |
1814 |
+ fwstr = kstrdup(edid_firmware, GFP_KERNEL); |
1815 |
++ if (!fwstr) |
1816 |
++ return ERR_PTR(-ENOMEM); |
1817 |
+ edidstr = fwstr; |
1818 |
+ |
1819 |
+ while ((edidname = strsep(&edidstr, ","))) { |
1820 |
+diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c |
1821 |
+index c88e538b2ef4..81b48e273cbd 100644 |
1822 |
+--- a/drivers/gpu/drm/i915/i915_request.c |
1823 |
++++ b/drivers/gpu/drm/i915/i915_request.c |
1824 |
+@@ -443,7 +443,7 @@ void __i915_request_submit(struct i915_request *request) |
1825 |
+ */ |
1826 |
+ if (request->sched.semaphores && |
1827 |
+ i915_sw_fence_signaled(&request->semaphore)) |
1828 |
+- request->hw_context->saturated |= request->sched.semaphores; |
1829 |
++ engine->saturated |= request->sched.semaphores; |
1830 |
+ |
1831 |
+ /* We may be recursing from the signal callback of another i915 fence */ |
1832 |
+ spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); |
1833 |
+@@ -829,7 +829,7 @@ already_busywaiting(struct i915_request *rq) |
1834 |
+ * |
1835 |
+ * See the are-we-too-late? check in __i915_request_submit(). |
1836 |
+ */ |
1837 |
+- return rq->sched.semaphores | rq->hw_context->saturated; |
1838 |
++ return rq->sched.semaphores | rq->engine->saturated; |
1839 |
+ } |
1840 |
+ |
1841 |
+ static int |
1842 |
+diff --git a/drivers/gpu/drm/i915/intel_context.c b/drivers/gpu/drm/i915/intel_context.c |
1843 |
+index 924cc556223a..8931e0fee873 100644 |
1844 |
+--- a/drivers/gpu/drm/i915/intel_context.c |
1845 |
++++ b/drivers/gpu/drm/i915/intel_context.c |
1846 |
+@@ -230,7 +230,6 @@ intel_context_init(struct intel_context *ce, |
1847 |
+ ce->gem_context = ctx; |
1848 |
+ ce->engine = engine; |
1849 |
+ ce->ops = engine->cops; |
1850 |
+- ce->saturated = 0; |
1851 |
+ |
1852 |
+ INIT_LIST_HEAD(&ce->signal_link); |
1853 |
+ INIT_LIST_HEAD(&ce->signals); |
1854 |
+diff --git a/drivers/gpu/drm/i915/intel_context_types.h b/drivers/gpu/drm/i915/intel_context_types.h |
1855 |
+index 339c7437fe82..fd47b9d49e09 100644 |
1856 |
+--- a/drivers/gpu/drm/i915/intel_context_types.h |
1857 |
++++ b/drivers/gpu/drm/i915/intel_context_types.h |
1858 |
+@@ -59,8 +59,6 @@ struct intel_context { |
1859 |
+ atomic_t pin_count; |
1860 |
+ struct mutex pin_mutex; /* guards pinning and associated on-gpuing */ |
1861 |
+ |
1862 |
+- intel_engine_mask_t saturated; /* submitting semaphores too late? */ |
1863 |
+- |
1864 |
+ /** |
1865 |
+ * active_tracker: Active tracker for the external rq activity |
1866 |
+ * on this intel_context object. |
1867 |
+diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c |
1868 |
+index eea9bec04f1b..9d4f12e982c3 100644 |
1869 |
+--- a/drivers/gpu/drm/i915/intel_engine_cs.c |
1870 |
++++ b/drivers/gpu/drm/i915/intel_engine_cs.c |
1871 |
+@@ -1200,6 +1200,7 @@ void intel_engines_park(struct drm_i915_private *i915) |
1872 |
+ |
1873 |
+ i915_gem_batch_pool_fini(&engine->batch_pool); |
1874 |
+ engine->execlists.no_priolist = false; |
1875 |
++ engine->saturated = 0; |
1876 |
+ } |
1877 |
+ |
1878 |
+ i915->gt.active_engines = 0; |
1879 |
+diff --git a/drivers/gpu/drm/i915/intel_engine_types.h b/drivers/gpu/drm/i915/intel_engine_types.h |
1880 |
+index 1f970c76b6a6..4270ddb45f41 100644 |
1881 |
+--- a/drivers/gpu/drm/i915/intel_engine_types.h |
1882 |
++++ b/drivers/gpu/drm/i915/intel_engine_types.h |
1883 |
+@@ -285,6 +285,8 @@ struct intel_engine_cs { |
1884 |
+ struct intel_context *kernel_context; /* pinned */ |
1885 |
+ struct intel_context *preempt_context; /* pinned; optional */ |
1886 |
+ |
1887 |
++ intel_engine_mask_t saturated; /* submitting semaphores too late? */ |
1888 |
++ |
1889 |
+ struct drm_i915_gem_object *default_state; |
1890 |
+ void *pinned_default_state; |
1891 |
+ |
1892 |
+diff --git a/drivers/gpu/drm/lima/lima_pp.c b/drivers/gpu/drm/lima/lima_pp.c |
1893 |
+index d29721e177bf..8fef224b93c8 100644 |
1894 |
+--- a/drivers/gpu/drm/lima/lima_pp.c |
1895 |
++++ b/drivers/gpu/drm/lima/lima_pp.c |
1896 |
+@@ -64,7 +64,13 @@ static irqreturn_t lima_pp_bcast_irq_handler(int irq, void *data) |
1897 |
+ struct lima_ip *pp_bcast = data; |
1898 |
+ struct lima_device *dev = pp_bcast->dev; |
1899 |
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp; |
1900 |
+- struct drm_lima_m450_pp_frame *frame = pipe->current_task->frame; |
1901 |
++ struct drm_lima_m450_pp_frame *frame; |
1902 |
++ |
1903 |
++ /* for shared irq case */ |
1904 |
++ if (!pipe->current_task) |
1905 |
++ return IRQ_NONE; |
1906 |
++ |
1907 |
++ frame = pipe->current_task->frame; |
1908 |
+ |
1909 |
+ for (i = 0; i < frame->num_pp; i++) { |
1910 |
+ struct lima_ip *ip = pipe->processor[i]; |
1911 |
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c |
1912 |
+index 38e2cfa9cec7..6910d0468e3c 100644 |
1913 |
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c |
1914 |
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c |
1915 |
+@@ -74,7 +74,7 @@ bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu) |
1916 |
+ u32 val; |
1917 |
+ |
1918 |
+ /* This can be called from gpu state code so make sure GMU is valid */ |
1919 |
+- if (IS_ERR_OR_NULL(gmu->mmio)) |
1920 |
++ if (!gmu->initialized) |
1921 |
+ return false; |
1922 |
+ |
1923 |
+ val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); |
1924 |
+@@ -90,7 +90,7 @@ bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu) |
1925 |
+ u32 val; |
1926 |
+ |
1927 |
+ /* This can be called from gpu state code so make sure GMU is valid */ |
1928 |
+- if (IS_ERR_OR_NULL(gmu->mmio)) |
1929 |
++ if (!gmu->initialized) |
1930 |
+ return false; |
1931 |
+ |
1932 |
+ val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); |
1933 |
+@@ -504,8 +504,10 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) |
1934 |
+ wmb(); |
1935 |
+ |
1936 |
+ err: |
1937 |
+- devm_iounmap(gmu->dev, pdcptr); |
1938 |
+- devm_iounmap(gmu->dev, seqptr); |
1939 |
++ if (!IS_ERR_OR_NULL(pdcptr)) |
1940 |
++ devm_iounmap(gmu->dev, pdcptr); |
1941 |
++ if (!IS_ERR_OR_NULL(seqptr)) |
1942 |
++ devm_iounmap(gmu->dev, seqptr); |
1943 |
+ } |
1944 |
+ |
1945 |
+ /* |
1946 |
+@@ -695,7 +697,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) |
1947 |
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu; |
1948 |
+ int status, ret; |
1949 |
+ |
1950 |
+- if (WARN(!gmu->mmio, "The GMU is not set up yet\n")) |
1951 |
++ if (WARN(!gmu->initialized, "The GMU is not set up yet\n")) |
1952 |
+ return 0; |
1953 |
+ |
1954 |
+ gmu->hung = false; |
1955 |
+@@ -765,7 +767,7 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu) |
1956 |
+ { |
1957 |
+ u32 reg; |
1958 |
+ |
1959 |
+- if (!gmu->mmio) |
1960 |
++ if (!gmu->initialized) |
1961 |
+ return true; |
1962 |
+ |
1963 |
+ reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS); |
1964 |
+@@ -1227,7 +1229,7 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) |
1965 |
+ { |
1966 |
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu; |
1967 |
+ |
1968 |
+- if (IS_ERR_OR_NULL(gmu->mmio)) |
1969 |
++ if (!gmu->initialized) |
1970 |
+ return; |
1971 |
+ |
1972 |
+ a6xx_gmu_stop(a6xx_gpu); |
1973 |
+@@ -1245,6 +1247,8 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) |
1974 |
+ iommu_detach_device(gmu->domain, gmu->dev); |
1975 |
+ |
1976 |
+ iommu_domain_free(gmu->domain); |
1977 |
++ |
1978 |
++ gmu->initialized = false; |
1979 |
+ } |
1980 |
+ |
1981 |
+ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node) |
1982 |
+@@ -1309,6 +1313,8 @@ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node) |
1983 |
+ /* Set up the HFI queues */ |
1984 |
+ a6xx_hfi_init(gmu); |
1985 |
+ |
1986 |
++ gmu->initialized = true; |
1987 |
++ |
1988 |
+ return 0; |
1989 |
+ err: |
1990 |
+ a6xx_gmu_memory_free(gmu, gmu->hfi); |
1991 |
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h |
1992 |
+index bedd8e6a63aa..39a26dd63674 100644 |
1993 |
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h |
1994 |
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h |
1995 |
+@@ -75,6 +75,7 @@ struct a6xx_gmu { |
1996 |
+ |
1997 |
+ struct a6xx_hfi_queue queues[2]; |
1998 |
+ |
1999 |
++ bool initialized; |
2000 |
+ bool hung; |
2001 |
+ }; |
2002 |
+ |
2003 |
+diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c |
2004 |
+index a9c0ac937b00..9acbbc0f3232 100644 |
2005 |
+--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c |
2006 |
++++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c |
2007 |
+@@ -56,7 +56,6 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname, |
2008 |
+ return ret; |
2009 |
+ |
2010 |
+ mem_phys = r.start; |
2011 |
+- mem_size = resource_size(&r); |
2012 |
+ |
2013 |
+ /* Request the MDT file for the firmware */ |
2014 |
+ fw = adreno_request_fw(to_adreno_gpu(gpu), fwname); |
2015 |
+@@ -72,6 +71,13 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname, |
2016 |
+ goto out; |
2017 |
+ } |
2018 |
+ |
2019 |
++ if (mem_size > resource_size(&r)) { |
2020 |
++ DRM_DEV_ERROR(dev, |
2021 |
++ "memory region is too small to load the MDT\n"); |
2022 |
++ ret = -E2BIG; |
2023 |
++ goto out; |
2024 |
++ } |
2025 |
++ |
2026 |
+ /* Allocate memory for the firmware image */ |
2027 |
+ mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC); |
2028 |
+ if (!mem_region) { |
2029 |
+diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c |
2030 |
+index f38d7367bd3b..4a0fe8a25ad7 100644 |
2031 |
+--- a/drivers/gpu/drm/msm/msm_drv.c |
2032 |
++++ b/drivers/gpu/drm/msm/msm_drv.c |
2033 |
+@@ -1306,16 +1306,24 @@ static int msm_pdev_probe(struct platform_device *pdev) |
2034 |
+ |
2035 |
+ ret = add_gpu_components(&pdev->dev, &match); |
2036 |
+ if (ret) |
2037 |
+- return ret; |
2038 |
++ goto fail; |
2039 |
+ |
2040 |
+ /* on all devices that I am aware of, iommu's which can map |
2041 |
+ * any address the cpu can see are used: |
2042 |
+ */ |
2043 |
+ ret = dma_set_mask_and_coherent(&pdev->dev, ~0); |
2044 |
+ if (ret) |
2045 |
+- return ret; |
2046 |
++ goto fail; |
2047 |
++ |
2048 |
++ ret = component_master_add_with_match(&pdev->dev, &msm_drm_ops, match); |
2049 |
++ if (ret) |
2050 |
++ goto fail; |
2051 |
+ |
2052 |
+- return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match); |
2053 |
++ return 0; |
2054 |
++ |
2055 |
++fail: |
2056 |
++ of_platform_depopulate(&pdev->dev); |
2057 |
++ return ret; |
2058 |
+ } |
2059 |
+ |
2060 |
+ static int msm_pdev_remove(struct platform_device *pdev) |
2061 |
+diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c |
2062 |
+index 8712af79a49c..4c43dd282acc 100644 |
2063 |
+--- a/drivers/gpu/drm/omapdrm/omap_crtc.c |
2064 |
++++ b/drivers/gpu/drm/omapdrm/omap_crtc.c |
2065 |
+@@ -384,10 +384,20 @@ static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc, |
2066 |
+ int r; |
2067 |
+ |
2068 |
+ drm_display_mode_to_videomode(mode, &vm); |
2069 |
+- r = priv->dispc_ops->mgr_check_timings(priv->dispc, omap_crtc->channel, |
2070 |
+- &vm); |
2071 |
+- if (r) |
2072 |
+- return r; |
2073 |
++ |
2074 |
++ /* |
2075 |
++ * DSI might not call this, since the supplied mode is not a |
2076 |
++ * valid DISPC mode. DSI will calculate and configure the |
2077 |
++ * proper DISPC mode later. |
2078 |
++ */ |
2079 |
++ if (omap_crtc->pipe->output->next == NULL || |
2080 |
++ omap_crtc->pipe->output->next->type != OMAP_DISPLAY_TYPE_DSI) { |
2081 |
++ r = priv->dispc_ops->mgr_check_timings(priv->dispc, |
2082 |
++ omap_crtc->channel, |
2083 |
++ &vm); |
2084 |
++ if (r) |
2085 |
++ return r; |
2086 |
++ } |
2087 |
+ |
2088 |
+ /* Check for bandwidth limit */ |
2089 |
+ if (priv->max_bandwidth) { |
2090 |
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c |
2091 |
+index 569be4efd8d1..397a3086eac8 100644 |
2092 |
+--- a/drivers/gpu/drm/panel/panel-simple.c |
2093 |
++++ b/drivers/gpu/drm/panel/panel-simple.c |
2094 |
+@@ -446,6 +446,32 @@ static const struct panel_desc ampire_am800480r3tmqwa1h = { |
2095 |
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18, |
2096 |
+ }; |
2097 |
+ |
2098 |
++static const struct display_timing santek_st0700i5y_rbslw_f_timing = { |
2099 |
++ .pixelclock = { 26400000, 33300000, 46800000 }, |
2100 |
++ .hactive = { 800, 800, 800 }, |
2101 |
++ .hfront_porch = { 16, 210, 354 }, |
2102 |
++ .hback_porch = { 45, 36, 6 }, |
2103 |
++ .hsync_len = { 1, 10, 40 }, |
2104 |
++ .vactive = { 480, 480, 480 }, |
2105 |
++ .vfront_porch = { 7, 22, 147 }, |
2106 |
++ .vback_porch = { 22, 13, 3 }, |
2107 |
++ .vsync_len = { 1, 10, 20 }, |
2108 |
++ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW | |
2109 |
++ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
2110 |
++}; |
2111 |
++ |
2112 |
++static const struct panel_desc armadeus_st0700_adapt = { |
2113 |
++ .timings = &santek_st0700i5y_rbslw_f_timing, |
2114 |
++ .num_timings = 1, |
2115 |
++ .bpc = 6, |
2116 |
++ .size = { |
2117 |
++ .width = 154, |
2118 |
++ .height = 86, |
2119 |
++ }, |
2120 |
++ .bus_format = MEDIA_BUS_FMT_RGB666_1X18, |
2121 |
++ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE, |
2122 |
++}; |
2123 |
++ |
2124 |
+ static const struct drm_display_mode auo_b101aw03_mode = { |
2125 |
+ .clock = 51450, |
2126 |
+ .hdisplay = 1024, |
2127 |
+@@ -2570,6 +2596,9 @@ static const struct of_device_id platform_of_match[] = { |
2128 |
+ }, { |
2129 |
+ .compatible = "arm,rtsm-display", |
2130 |
+ .data = &arm_rtsm, |
2131 |
++ }, { |
2132 |
++ .compatible = "armadeus,st0700-adapt", |
2133 |
++ .data = &armadeus_st0700_adapt, |
2134 |
+ }, { |
2135 |
+ .compatible = "auo,b101aw03", |
2136 |
+ .data = &auo_b101aw03, |
2137 |
+@@ -3098,7 +3127,14 @@ static int panel_simple_dsi_probe(struct mipi_dsi_device *dsi) |
2138 |
+ dsi->format = desc->format; |
2139 |
+ dsi->lanes = desc->lanes; |
2140 |
+ |
2141 |
+- return mipi_dsi_attach(dsi); |
2142 |
++ err = mipi_dsi_attach(dsi); |
2143 |
++ if (err) { |
2144 |
++ struct panel_simple *panel = dev_get_drvdata(&dsi->dev); |
2145 |
++ |
2146 |
++ drm_panel_remove(&panel->base); |
2147 |
++ } |
2148 |
++ |
2149 |
++ return err; |
2150 |
+ } |
2151 |
+ |
2152 |
+ static int panel_simple_dsi_remove(struct mipi_dsi_device *dsi) |
2153 |
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c |
2154 |
+index 12ed5265a90b..09046135e720 100644 |
2155 |
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c |
2156 |
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c |
2157 |
+@@ -1011,7 +1011,8 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc, |
2158 |
+ struct vop *vop = to_vop(crtc); |
2159 |
+ |
2160 |
+ adjusted_mode->clock = |
2161 |
+- clk_round_rate(vop->dclk, mode->clock * 1000) / 1000; |
2162 |
++ DIV_ROUND_UP(clk_round_rate(vop->dclk, mode->clock * 1000), |
2163 |
++ 1000); |
2164 |
+ |
2165 |
+ return true; |
2166 |
+ } |
2167 |
+diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h |
2168 |
+index b69ae10ca238..d724fb3de44e 100644 |
2169 |
+--- a/drivers/gpu/drm/virtio/virtgpu_drv.h |
2170 |
++++ b/drivers/gpu/drm/virtio/virtgpu_drv.h |
2171 |
+@@ -102,7 +102,6 @@ struct virtio_gpu_fence { |
2172 |
+ struct dma_fence f; |
2173 |
+ struct virtio_gpu_fence_driver *drv; |
2174 |
+ struct list_head node; |
2175 |
+- uint64_t seq; |
2176 |
+ }; |
2177 |
+ #define to_virtio_fence(x) \ |
2178 |
+ container_of(x, struct virtio_gpu_fence, f) |
2179 |
+diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c |
2180 |
+index 87d1966192f4..72b4f7561432 100644 |
2181 |
+--- a/drivers/gpu/drm/virtio/virtgpu_fence.c |
2182 |
++++ b/drivers/gpu/drm/virtio/virtgpu_fence.c |
2183 |
+@@ -40,16 +40,14 @@ bool virtio_fence_signaled(struct dma_fence *f) |
2184 |
+ { |
2185 |
+ struct virtio_gpu_fence *fence = to_virtio_fence(f); |
2186 |
+ |
2187 |
+- if (atomic64_read(&fence->drv->last_seq) >= fence->seq) |
2188 |
++ if (atomic64_read(&fence->drv->last_seq) >= fence->f.seqno) |
2189 |
+ return true; |
2190 |
+ return false; |
2191 |
+ } |
2192 |
+ |
2193 |
+ static void virtio_fence_value_str(struct dma_fence *f, char *str, int size) |
2194 |
+ { |
2195 |
+- struct virtio_gpu_fence *fence = to_virtio_fence(f); |
2196 |
+- |
2197 |
+- snprintf(str, size, "%llu", fence->seq); |
2198 |
++ snprintf(str, size, "%llu", f->seqno); |
2199 |
+ } |
2200 |
+ |
2201 |
+ static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size) |
2202 |
+@@ -76,6 +74,11 @@ struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev) |
2203 |
+ return fence; |
2204 |
+ |
2205 |
+ fence->drv = drv; |
2206 |
++ |
2207 |
++ /* This only partially initializes the fence because the seqno is |
2208 |
++ * unknown yet. The fence must not be used outside of the driver |
2209 |
++ * until virtio_gpu_fence_emit is called. |
2210 |
++ */ |
2211 |
+ dma_fence_init(&fence->f, &virtio_fence_ops, &drv->lock, drv->context, 0); |
2212 |
+ |
2213 |
+ return fence; |
2214 |
+@@ -89,13 +92,13 @@ int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, |
2215 |
+ unsigned long irq_flags; |
2216 |
+ |
2217 |
+ spin_lock_irqsave(&drv->lock, irq_flags); |
2218 |
+- fence->seq = ++drv->sync_seq; |
2219 |
++ fence->f.seqno = ++drv->sync_seq; |
2220 |
+ dma_fence_get(&fence->f); |
2221 |
+ list_add_tail(&fence->node, &drv->fences); |
2222 |
+ spin_unlock_irqrestore(&drv->lock, irq_flags); |
2223 |
+ |
2224 |
+ cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE); |
2225 |
+- cmd_hdr->fence_id = cpu_to_le64(fence->seq); |
2226 |
++ cmd_hdr->fence_id = cpu_to_le64(fence->f.seqno); |
2227 |
+ return 0; |
2228 |
+ } |
2229 |
+ |
2230 |
+@@ -109,7 +112,7 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev, |
2231 |
+ spin_lock_irqsave(&drv->lock, irq_flags); |
2232 |
+ atomic64_set(&vgdev->fence_drv.last_seq, last_seq); |
2233 |
+ list_for_each_entry_safe(fence, tmp, &drv->fences, node) { |
2234 |
+- if (last_seq < fence->seq) |
2235 |
++ if (last_seq < fence->f.seqno) |
2236 |
+ continue; |
2237 |
+ dma_fence_signal_locked(&fence->f); |
2238 |
+ list_del(&fence->node); |
2239 |
+diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c |
2240 |
+index 949a264985fc..19fbffd0f7a3 100644 |
2241 |
+--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c |
2242 |
++++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c |
2243 |
+@@ -542,6 +542,9 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev, |
2244 |
+ if (!ret) |
2245 |
+ return -EBUSY; |
2246 |
+ |
2247 |
++ /* is_valid check must proceed before copy of the cache entry. */ |
2248 |
++ smp_rmb(); |
2249 |
++ |
2250 |
+ ptr = cache_ent->caps_cache; |
2251 |
+ |
2252 |
+ copy_exit: |
2253 |
+diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c |
2254 |
+index 5bb0f0a084e9..a7684f9c80db 100644 |
2255 |
+--- a/drivers/gpu/drm/virtio/virtgpu_vq.c |
2256 |
++++ b/drivers/gpu/drm/virtio/virtgpu_vq.c |
2257 |
+@@ -583,6 +583,8 @@ static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev, |
2258 |
+ cache_ent->id == le32_to_cpu(cmd->capset_id)) { |
2259 |
+ memcpy(cache_ent->caps_cache, resp->capset_data, |
2260 |
+ cache_ent->size); |
2261 |
++ /* Copy must occur before is_valid is signalled. */ |
2262 |
++ smp_wmb(); |
2263 |
+ atomic_set(&cache_ent->is_valid, 1); |
2264 |
+ break; |
2265 |
+ } |
2266 |
+diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c |
2267 |
+index bb66dbcd5e3f..e447b7588d06 100644 |
2268 |
+--- a/drivers/gpu/drm/vkms/vkms_crtc.c |
2269 |
++++ b/drivers/gpu/drm/vkms/vkms_crtc.c |
2270 |
+@@ -15,6 +15,10 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer) |
2271 |
+ |
2272 |
+ spin_lock(&output->lock); |
2273 |
+ |
2274 |
++ ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer, |
2275 |
++ output->period_ns); |
2276 |
++ WARN_ON(ret_overrun != 1); |
2277 |
++ |
2278 |
+ ret = drm_crtc_handle_vblank(crtc); |
2279 |
+ if (!ret) |
2280 |
+ DRM_ERROR("vkms failure on handling vblank"); |
2281 |
+@@ -35,10 +39,6 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer) |
2282 |
+ DRM_WARN("failed to queue vkms_crc_work_handle"); |
2283 |
+ } |
2284 |
+ |
2285 |
+- ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer, |
2286 |
+- output->period_ns); |
2287 |
+- WARN_ON(ret_overrun != 1); |
2288 |
+- |
2289 |
+ spin_unlock(&output->lock); |
2290 |
+ |
2291 |
+ return HRTIMER_RESTART; |
2292 |
+@@ -74,11 +74,21 @@ bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe, |
2293 |
+ { |
2294 |
+ struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev); |
2295 |
+ struct vkms_output *output = &vkmsdev->output; |
2296 |
++ struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; |
2297 |
+ |
2298 |
+ *vblank_time = output->vblank_hrtimer.node.expires; |
2299 |
+ |
2300 |
+- if (!in_vblank_irq) |
2301 |
+- *vblank_time -= output->period_ns; |
2302 |
++ if (WARN_ON(*vblank_time == vblank->time)) |
2303 |
++ return true; |
2304 |
++ |
2305 |
++ /* |
2306 |
++ * To prevent races we roll the hrtimer forward before we do any |
2307 |
++ * interrupt processing - this is how real hw works (the interrupt is |
2308 |
++ * only generated after all the vblank registers are updated) and what |
2309 |
++ * the vblank core expects. Therefore we need to always correct the |
2310 |
++ * timestampe by one frame. |
2311 |
++ */ |
2312 |
++ *vblank_time -= output->period_ns; |
2313 |
+ |
2314 |
+ return true; |
2315 |
+ } |
2316 |
+diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c |
2317 |
+index 9797ccb0a073..6387302c1245 100644 |
2318 |
+--- a/drivers/gpu/host1x/bus.c |
2319 |
++++ b/drivers/gpu/host1x/bus.c |
2320 |
+@@ -414,6 +414,9 @@ static int host1x_device_add(struct host1x *host1x, |
2321 |
+ |
2322 |
+ of_dma_configure(&device->dev, host1x->dev->of_node, true); |
2323 |
+ |
2324 |
++ device->dev.dma_parms = &device->dma_parms; |
2325 |
++ dma_set_max_seg_size(&device->dev, SZ_4M); |
2326 |
++ |
2327 |
+ err = host1x_device_parse_dt(device, driver); |
2328 |
+ if (err < 0) { |
2329 |
+ kfree(device); |
2330 |
+diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c |
2331 |
+index 1c8f708f212b..ee2412b7459c 100644 |
2332 |
+--- a/drivers/i2c/busses/i2c-nvidia-gpu.c |
2333 |
++++ b/drivers/i2c/busses/i2c-nvidia-gpu.c |
2334 |
+@@ -51,6 +51,7 @@ struct gpu_i2c_dev { |
2335 |
+ void __iomem *regs; |
2336 |
+ struct i2c_adapter adapter; |
2337 |
+ struct i2c_board_info *gpu_ccgx_ucsi; |
2338 |
++ struct i2c_client *ccgx_client; |
2339 |
+ }; |
2340 |
+ |
2341 |
+ static void gpu_enable_i2c_bus(struct gpu_i2c_dev *i2cd) |
2342 |
+@@ -261,8 +262,6 @@ static const struct property_entry ccgx_props[] = { |
2343 |
+ |
2344 |
+ static int gpu_populate_client(struct gpu_i2c_dev *i2cd, int irq) |
2345 |
+ { |
2346 |
+- struct i2c_client *ccgx_client; |
2347 |
+- |
2348 |
+ i2cd->gpu_ccgx_ucsi = devm_kzalloc(i2cd->dev, |
2349 |
+ sizeof(*i2cd->gpu_ccgx_ucsi), |
2350 |
+ GFP_KERNEL); |
2351 |
+@@ -274,8 +273,8 @@ static int gpu_populate_client(struct gpu_i2c_dev *i2cd, int irq) |
2352 |
+ i2cd->gpu_ccgx_ucsi->addr = 0x8; |
2353 |
+ i2cd->gpu_ccgx_ucsi->irq = irq; |
2354 |
+ i2cd->gpu_ccgx_ucsi->properties = ccgx_props; |
2355 |
+- ccgx_client = i2c_new_device(&i2cd->adapter, i2cd->gpu_ccgx_ucsi); |
2356 |
+- if (!ccgx_client) |
2357 |
++ i2cd->ccgx_client = i2c_new_device(&i2cd->adapter, i2cd->gpu_ccgx_ucsi); |
2358 |
++ if (!i2cd->ccgx_client) |
2359 |
+ return -ENODEV; |
2360 |
+ |
2361 |
+ return 0; |
2362 |
+@@ -354,6 +353,13 @@ static __maybe_unused int gpu_i2c_resume(struct device *dev) |
2363 |
+ struct gpu_i2c_dev *i2cd = dev_get_drvdata(dev); |
2364 |
+ |
2365 |
+ gpu_enable_i2c_bus(i2cd); |
2366 |
++ /* |
2367 |
++ * Runtime resume ccgx client so that it can see for any |
2368 |
++ * connector change event. Old ccg firmware has known |
2369 |
++ * issue of not triggering interrupt when a device is |
2370 |
++ * connected to runtime resume the controller. |
2371 |
++ */ |
2372 |
++ pm_request_resume(&i2cd->ccgx_client->dev); |
2373 |
+ return 0; |
2374 |
+ } |
2375 |
+ |
2376 |
+diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c |
2377 |
+index 48337bef5b87..3d90c0bb049e 100644 |
2378 |
+--- a/drivers/i2c/busses/i2c-stm32f7.c |
2379 |
++++ b/drivers/i2c/busses/i2c-stm32f7.c |
2380 |
+@@ -25,7 +25,6 @@ |
2381 |
+ #include <linux/module.h> |
2382 |
+ #include <linux/of.h> |
2383 |
+ #include <linux/of_address.h> |
2384 |
+-#include <linux/of_irq.h> |
2385 |
+ #include <linux/of_platform.h> |
2386 |
+ #include <linux/platform_device.h> |
2387 |
+ #include <linux/pinctrl/consumer.h> |
2388 |
+@@ -1816,15 +1815,14 @@ static struct i2c_algorithm stm32f7_i2c_algo = { |
2389 |
+ |
2390 |
+ static int stm32f7_i2c_probe(struct platform_device *pdev) |
2391 |
+ { |
2392 |
+- struct device_node *np = pdev->dev.of_node; |
2393 |
+ struct stm32f7_i2c_dev *i2c_dev; |
2394 |
+ const struct stm32f7_i2c_setup *setup; |
2395 |
+ struct resource *res; |
2396 |
+- u32 irq_error, irq_event, clk_rate, rise_time, fall_time; |
2397 |
++ u32 clk_rate, rise_time, fall_time; |
2398 |
+ struct i2c_adapter *adap; |
2399 |
+ struct reset_control *rst; |
2400 |
+ dma_addr_t phy_addr; |
2401 |
+- int ret; |
2402 |
++ int irq_error, irq_event, ret; |
2403 |
+ |
2404 |
+ i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL); |
2405 |
+ if (!i2c_dev) |
2406 |
+@@ -1836,16 +1834,20 @@ static int stm32f7_i2c_probe(struct platform_device *pdev) |
2407 |
+ return PTR_ERR(i2c_dev->base); |
2408 |
+ phy_addr = (dma_addr_t)res->start; |
2409 |
+ |
2410 |
+- irq_event = irq_of_parse_and_map(np, 0); |
2411 |
+- if (!irq_event) { |
2412 |
+- dev_err(&pdev->dev, "IRQ event missing or invalid\n"); |
2413 |
+- return -EINVAL; |
2414 |
++ irq_event = platform_get_irq(pdev, 0); |
2415 |
++ if (irq_event <= 0) { |
2416 |
++ if (irq_event != -EPROBE_DEFER) |
2417 |
++ dev_err(&pdev->dev, "Failed to get IRQ event: %d\n", |
2418 |
++ irq_event); |
2419 |
++ return irq_event ? : -ENOENT; |
2420 |
+ } |
2421 |
+ |
2422 |
+- irq_error = irq_of_parse_and_map(np, 1); |
2423 |
+- if (!irq_error) { |
2424 |
+- dev_err(&pdev->dev, "IRQ error missing or invalid\n"); |
2425 |
+- return -EINVAL; |
2426 |
++ irq_error = platform_get_irq(pdev, 1); |
2427 |
++ if (irq_error <= 0) { |
2428 |
++ if (irq_error != -EPROBE_DEFER) |
2429 |
++ dev_err(&pdev->dev, "Failed to get IRQ error: %d\n", |
2430 |
++ irq_error); |
2431 |
++ return irq_error ? : -ENOENT; |
2432 |
+ } |
2433 |
+ |
2434 |
+ i2c_dev->clk = devm_clk_get(&pdev->dev, NULL); |
2435 |
+diff --git a/drivers/iio/accel/adxl372.c b/drivers/iio/accel/adxl372.c |
2436 |
+index 3b84cb243a87..055227cb3d43 100644 |
2437 |
+--- a/drivers/iio/accel/adxl372.c |
2438 |
++++ b/drivers/iio/accel/adxl372.c |
2439 |
+@@ -782,10 +782,14 @@ static int adxl372_buffer_postenable(struct iio_dev *indio_dev) |
2440 |
+ unsigned int mask; |
2441 |
+ int i, ret; |
2442 |
+ |
2443 |
+- ret = adxl372_set_interrupts(st, ADXL372_INT1_MAP_FIFO_FULL_MSK, 0); |
2444 |
++ ret = iio_triggered_buffer_postenable(indio_dev); |
2445 |
+ if (ret < 0) |
2446 |
+ return ret; |
2447 |
+ |
2448 |
++ ret = adxl372_set_interrupts(st, ADXL372_INT1_MAP_FIFO_FULL_MSK, 0); |
2449 |
++ if (ret < 0) |
2450 |
++ goto err; |
2451 |
++ |
2452 |
+ mask = *indio_dev->active_scan_mask; |
2453 |
+ |
2454 |
+ for (i = 0; i < ARRAY_SIZE(adxl372_axis_lookup_table); i++) { |
2455 |
+@@ -793,8 +797,10 @@ static int adxl372_buffer_postenable(struct iio_dev *indio_dev) |
2456 |
+ break; |
2457 |
+ } |
2458 |
+ |
2459 |
+- if (i == ARRAY_SIZE(adxl372_axis_lookup_table)) |
2460 |
+- return -EINVAL; |
2461 |
++ if (i == ARRAY_SIZE(adxl372_axis_lookup_table)) { |
2462 |
++ ret = -EINVAL; |
2463 |
++ goto err; |
2464 |
++ } |
2465 |
+ |
2466 |
+ st->fifo_format = adxl372_axis_lookup_table[i].fifo_format; |
2467 |
+ st->fifo_set_size = bitmap_weight(indio_dev->active_scan_mask, |
2468 |
+@@ -814,26 +820,25 @@ static int adxl372_buffer_postenable(struct iio_dev *indio_dev) |
2469 |
+ if (ret < 0) { |
2470 |
+ st->fifo_mode = ADXL372_FIFO_BYPASSED; |
2471 |
+ adxl372_set_interrupts(st, 0, 0); |
2472 |
+- return ret; |
2473 |
++ goto err; |
2474 |
+ } |
2475 |
+ |
2476 |
+- return iio_triggered_buffer_postenable(indio_dev); |
2477 |
++ return 0; |
2478 |
++ |
2479 |
++err: |
2480 |
++ iio_triggered_buffer_predisable(indio_dev); |
2481 |
++ return ret; |
2482 |
+ } |
2483 |
+ |
2484 |
+ static int adxl372_buffer_predisable(struct iio_dev *indio_dev) |
2485 |
+ { |
2486 |
+ struct adxl372_state *st = iio_priv(indio_dev); |
2487 |
+- int ret; |
2488 |
+- |
2489 |
+- ret = iio_triggered_buffer_predisable(indio_dev); |
2490 |
+- if (ret < 0) |
2491 |
+- return ret; |
2492 |
+ |
2493 |
+ adxl372_set_interrupts(st, 0, 0); |
2494 |
+ st->fifo_mode = ADXL372_FIFO_BYPASSED; |
2495 |
+ adxl372_configure_fifo(st); |
2496 |
+ |
2497 |
+- return 0; |
2498 |
++ return iio_triggered_buffer_predisable(indio_dev); |
2499 |
+ } |
2500 |
+ |
2501 |
+ static const struct iio_buffer_setup_ops adxl372_buffer_ops = { |
2502 |
+diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c |
2503 |
+index 19adc2b23472..588907cc3b6b 100644 |
2504 |
+--- a/drivers/iio/adc/stm32-dfsdm-adc.c |
2505 |
++++ b/drivers/iio/adc/stm32-dfsdm-adc.c |
2506 |
+@@ -1456,6 +1456,12 @@ static int stm32_dfsdm_adc_probe(struct platform_device *pdev) |
2507 |
+ * So IRQ associated to filter instance 0 is dedicated to the Filter 0. |
2508 |
+ */ |
2509 |
+ irq = platform_get_irq(pdev, 0); |
2510 |
++ if (irq < 0) { |
2511 |
++ if (irq != -EPROBE_DEFER) |
2512 |
++ dev_err(dev, "Failed to get IRQ: %d\n", irq); |
2513 |
++ return irq; |
2514 |
++ } |
2515 |
++ |
2516 |
+ ret = devm_request_irq(dev, irq, stm32_dfsdm_irq, |
2517 |
+ 0, pdev->name, adc); |
2518 |
+ if (ret < 0) { |
2519 |
+diff --git a/drivers/iio/adc/stm32-dfsdm-core.c b/drivers/iio/adc/stm32-dfsdm-core.c |
2520 |
+index 0a4d3746d21c..26e2011c5868 100644 |
2521 |
+--- a/drivers/iio/adc/stm32-dfsdm-core.c |
2522 |
++++ b/drivers/iio/adc/stm32-dfsdm-core.c |
2523 |
+@@ -233,6 +233,8 @@ static int stm32_dfsdm_parse_of(struct platform_device *pdev, |
2524 |
+ } |
2525 |
+ priv->dfsdm.phys_base = res->start; |
2526 |
+ priv->dfsdm.base = devm_ioremap_resource(&pdev->dev, res); |
2527 |
++ if (IS_ERR(priv->dfsdm.base)) |
2528 |
++ return PTR_ERR(priv->dfsdm.base); |
2529 |
+ |
2530 |
+ /* |
2531 |
+ * "dfsdm" clock is mandatory for DFSDM peripheral clocking. |
2532 |
+@@ -242,8 +244,10 @@ static int stm32_dfsdm_parse_of(struct platform_device *pdev, |
2533 |
+ */ |
2534 |
+ priv->clk = devm_clk_get(&pdev->dev, "dfsdm"); |
2535 |
+ if (IS_ERR(priv->clk)) { |
2536 |
+- dev_err(&pdev->dev, "No stm32_dfsdm_clk clock found\n"); |
2537 |
+- return -EINVAL; |
2538 |
++ ret = PTR_ERR(priv->clk); |
2539 |
++ if (ret != -EPROBE_DEFER) |
2540 |
++ dev_err(&pdev->dev, "Failed to get clock (%d)\n", ret); |
2541 |
++ return ret; |
2542 |
+ } |
2543 |
+ |
2544 |
+ priv->aclk = devm_clk_get(&pdev->dev, "audio"); |
2545 |
+diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c |
2546 |
+index 2f7d14159841..9b76a8fcdd24 100644 |
2547 |
+--- a/drivers/infiniband/core/addr.c |
2548 |
++++ b/drivers/infiniband/core/addr.c |
2549 |
+@@ -337,7 +337,7 @@ static int dst_fetch_ha(const struct dst_entry *dst, |
2550 |
+ neigh_event_send(n, NULL); |
2551 |
+ ret = -ENODATA; |
2552 |
+ } else { |
2553 |
+- memcpy(dev_addr->dst_dev_addr, n->ha, MAX_ADDR_LEN); |
2554 |
++ neigh_ha_snapshot(dev_addr->dst_dev_addr, n, dst->dev); |
2555 |
+ } |
2556 |
+ |
2557 |
+ neigh_release(n); |
2558 |
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c |
2559 |
+index 5689d742bafb..4c88d6f72574 100644 |
2560 |
+--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c |
2561 |
++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c |
2562 |
+@@ -772,6 +772,8 @@ static int i40iw_query_qp(struct ib_qp *ibqp, |
2563 |
+ struct i40iw_qp *iwqp = to_iwqp(ibqp); |
2564 |
+ struct i40iw_sc_qp *qp = &iwqp->sc_qp; |
2565 |
+ |
2566 |
++ attr->qp_state = iwqp->ibqp_state; |
2567 |
++ attr->cur_qp_state = attr->qp_state; |
2568 |
+ attr->qp_access_flags = 0; |
2569 |
+ attr->cap.max_send_wr = qp->qp_uk.sq_size; |
2570 |
+ attr->cap.max_recv_wr = qp->qp_uk.rq_size; |
2571 |
+diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c |
2572 |
+index 6c529e6f3a01..348c1df69cdc 100644 |
2573 |
+--- a/drivers/infiniband/hw/mlx5/mad.c |
2574 |
++++ b/drivers/infiniband/hw/mlx5/mad.c |
2575 |
+@@ -200,19 +200,33 @@ static void pma_cnt_assign(struct ib_pma_portcounters *pma_cnt, |
2576 |
+ vl_15_dropped); |
2577 |
+ } |
2578 |
+ |
2579 |
+-static int process_pma_cmd(struct mlx5_core_dev *mdev, u8 port_num, |
2580 |
++static int process_pma_cmd(struct mlx5_ib_dev *dev, u8 port_num, |
2581 |
+ const struct ib_mad *in_mad, struct ib_mad *out_mad) |
2582 |
+ { |
2583 |
+- int err; |
2584 |
++ struct mlx5_core_dev *mdev; |
2585 |
++ bool native_port = true; |
2586 |
++ u8 mdev_port_num; |
2587 |
+ void *out_cnt; |
2588 |
++ int err; |
2589 |
+ |
2590 |
++ mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num); |
2591 |
++ if (!mdev) { |
2592 |
++ /* Fail to get the native port, likely due to 2nd port is still |
2593 |
++ * unaffiliated. In such case default to 1st port and attached |
2594 |
++ * PF device. |
2595 |
++ */ |
2596 |
++ native_port = false; |
2597 |
++ mdev = dev->mdev; |
2598 |
++ mdev_port_num = 1; |
2599 |
++ } |
2600 |
+ /* Declaring support of extended counters */ |
2601 |
+ if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) { |
2602 |
+ struct ib_class_port_info cpi = {}; |
2603 |
+ |
2604 |
+ cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH; |
2605 |
+ memcpy((out_mad->data + 40), &cpi, sizeof(cpi)); |
2606 |
+- return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; |
2607 |
++ err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; |
2608 |
++ goto done; |
2609 |
+ } |
2610 |
+ |
2611 |
+ if (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT) { |
2612 |
+@@ -221,11 +235,13 @@ static int process_pma_cmd(struct mlx5_core_dev *mdev, u8 port_num, |
2613 |
+ int sz = MLX5_ST_SZ_BYTES(query_vport_counter_out); |
2614 |
+ |
2615 |
+ out_cnt = kvzalloc(sz, GFP_KERNEL); |
2616 |
+- if (!out_cnt) |
2617 |
+- return IB_MAD_RESULT_FAILURE; |
2618 |
++ if (!out_cnt) { |
2619 |
++ err = IB_MAD_RESULT_FAILURE; |
2620 |
++ goto done; |
2621 |
++ } |
2622 |
+ |
2623 |
+ err = mlx5_core_query_vport_counter(mdev, 0, 0, |
2624 |
+- port_num, out_cnt, sz); |
2625 |
++ mdev_port_num, out_cnt, sz); |
2626 |
+ if (!err) |
2627 |
+ pma_cnt_ext_assign(pma_cnt_ext, out_cnt); |
2628 |
+ } else { |
2629 |
+@@ -234,20 +250,23 @@ static int process_pma_cmd(struct mlx5_core_dev *mdev, u8 port_num, |
2630 |
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); |
2631 |
+ |
2632 |
+ out_cnt = kvzalloc(sz, GFP_KERNEL); |
2633 |
+- if (!out_cnt) |
2634 |
+- return IB_MAD_RESULT_FAILURE; |
2635 |
++ if (!out_cnt) { |
2636 |
++ err = IB_MAD_RESULT_FAILURE; |
2637 |
++ goto done; |
2638 |
++ } |
2639 |
+ |
2640 |
+- err = mlx5_core_query_ib_ppcnt(mdev, port_num, |
2641 |
++ err = mlx5_core_query_ib_ppcnt(mdev, mdev_port_num, |
2642 |
+ out_cnt, sz); |
2643 |
+ if (!err) |
2644 |
+ pma_cnt_assign(pma_cnt, out_cnt); |
2645 |
+- } |
2646 |
+- |
2647 |
++ } |
2648 |
+ kvfree(out_cnt); |
2649 |
+- if (err) |
2650 |
+- return IB_MAD_RESULT_FAILURE; |
2651 |
+- |
2652 |
+- return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; |
2653 |
++ err = err ? IB_MAD_RESULT_FAILURE : |
2654 |
++ IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; |
2655 |
++done: |
2656 |
++ if (native_port) |
2657 |
++ mlx5_ib_put_native_port_mdev(dev, port_num); |
2658 |
++ return err; |
2659 |
+ } |
2660 |
+ |
2661 |
+ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, |
2662 |
+@@ -259,8 +278,6 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, |
2663 |
+ struct mlx5_ib_dev *dev = to_mdev(ibdev); |
2664 |
+ const struct ib_mad *in_mad = (const struct ib_mad *)in; |
2665 |
+ struct ib_mad *out_mad = (struct ib_mad *)out; |
2666 |
+- struct mlx5_core_dev *mdev; |
2667 |
+- u8 mdev_port_num; |
2668 |
+ int ret; |
2669 |
+ |
2670 |
+ if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) || |
2671 |
+@@ -269,19 +286,14 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, |
2672 |
+ |
2673 |
+ memset(out_mad->data, 0, sizeof(out_mad->data)); |
2674 |
+ |
2675 |
+- mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num); |
2676 |
+- if (!mdev) |
2677 |
+- return IB_MAD_RESULT_FAILURE; |
2678 |
+- |
2679 |
+- if (MLX5_CAP_GEN(mdev, vport_counters) && |
2680 |
++ if (MLX5_CAP_GEN(dev->mdev, vport_counters) && |
2681 |
+ in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT && |
2682 |
+ in_mad->mad_hdr.method == IB_MGMT_METHOD_GET) { |
2683 |
+- ret = process_pma_cmd(mdev, mdev_port_num, in_mad, out_mad); |
2684 |
++ ret = process_pma_cmd(dev, port_num, in_mad, out_mad); |
2685 |
+ } else { |
2686 |
+ ret = process_mad(ibdev, mad_flags, port_num, in_wc, in_grh, |
2687 |
+ in_mad, out_mad); |
2688 |
+ } |
2689 |
+- mlx5_ib_put_native_port_mdev(dev, port_num); |
2690 |
+ return ret; |
2691 |
+ } |
2692 |
+ |
2693 |
+diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c |
2694 |
+index aca9f60f9b21..1cbfbd98eb22 100644 |
2695 |
+--- a/drivers/infiniband/sw/rxe/rxe_resp.c |
2696 |
++++ b/drivers/infiniband/sw/rxe/rxe_resp.c |
2697 |
+@@ -431,6 +431,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp, |
2698 |
+ qp->resp.va = reth_va(pkt); |
2699 |
+ qp->resp.rkey = reth_rkey(pkt); |
2700 |
+ qp->resp.resid = reth_len(pkt); |
2701 |
++ qp->resp.length = reth_len(pkt); |
2702 |
+ } |
2703 |
+ access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ |
2704 |
+ : IB_ACCESS_REMOTE_WRITE; |
2705 |
+@@ -856,7 +857,9 @@ static enum resp_states do_complete(struct rxe_qp *qp, |
2706 |
+ pkt->mask & RXE_WRITE_MASK) ? |
2707 |
+ IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV; |
2708 |
+ wc->vendor_err = 0; |
2709 |
+- wc->byte_len = wqe->dma.length - wqe->dma.resid; |
2710 |
++ wc->byte_len = (pkt->mask & RXE_IMMDT_MASK && |
2711 |
++ pkt->mask & RXE_WRITE_MASK) ? |
2712 |
++ qp->resp.length : wqe->dma.length - wqe->dma.resid; |
2713 |
+ |
2714 |
+ /* fields after byte_len are different between kernel and user |
2715 |
+ * space |
2716 |
+diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h |
2717 |
+index e8be7f44e3be..28bfb3ece104 100644 |
2718 |
+--- a/drivers/infiniband/sw/rxe/rxe_verbs.h |
2719 |
++++ b/drivers/infiniband/sw/rxe/rxe_verbs.h |
2720 |
+@@ -213,6 +213,7 @@ struct rxe_resp_info { |
2721 |
+ struct rxe_mem *mr; |
2722 |
+ u32 resid; |
2723 |
+ u32 rkey; |
2724 |
++ u32 length; |
2725 |
+ u64 atomic_orig; |
2726 |
+ |
2727 |
+ /* SRQ only */ |
2728 |
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c |
2729 |
+index 04ea7db08e87..ac0583ff280d 100644 |
2730 |
+--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c |
2731 |
++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c |
2732 |
+@@ -1893,12 +1893,6 @@ static void ipoib_child_init(struct net_device *ndev) |
2733 |
+ struct ipoib_dev_priv *priv = ipoib_priv(ndev); |
2734 |
+ struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent); |
2735 |
+ |
2736 |
+- dev_hold(priv->parent); |
2737 |
+- |
2738 |
+- down_write(&ppriv->vlan_rwsem); |
2739 |
+- list_add_tail(&priv->list, &ppriv->child_intfs); |
2740 |
+- up_write(&ppriv->vlan_rwsem); |
2741 |
+- |
2742 |
+ priv->max_ib_mtu = ppriv->max_ib_mtu; |
2743 |
+ set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); |
2744 |
+ memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN); |
2745 |
+@@ -1941,6 +1935,17 @@ static int ipoib_ndo_init(struct net_device *ndev) |
2746 |
+ if (rc) { |
2747 |
+ pr_warn("%s: failed to initialize device: %s port %d (ret = %d)\n", |
2748 |
+ priv->ca->name, priv->dev->name, priv->port, rc); |
2749 |
++ return rc; |
2750 |
++ } |
2751 |
++ |
2752 |
++ if (priv->parent) { |
2753 |
++ struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent); |
2754 |
++ |
2755 |
++ dev_hold(priv->parent); |
2756 |
++ |
2757 |
++ down_write(&ppriv->vlan_rwsem); |
2758 |
++ list_add_tail(&priv->list, &ppriv->child_intfs); |
2759 |
++ up_write(&ppriv->vlan_rwsem); |
2760 |
+ } |
2761 |
+ |
2762 |
+ return 0; |
2763 |
+@@ -1958,6 +1963,14 @@ static void ipoib_ndo_uninit(struct net_device *dev) |
2764 |
+ */ |
2765 |
+ WARN_ON(!list_empty(&priv->child_intfs)); |
2766 |
+ |
2767 |
++ if (priv->parent) { |
2768 |
++ struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent); |
2769 |
++ |
2770 |
++ down_write(&ppriv->vlan_rwsem); |
2771 |
++ list_del(&priv->list); |
2772 |
++ up_write(&ppriv->vlan_rwsem); |
2773 |
++ } |
2774 |
++ |
2775 |
+ ipoib_neigh_hash_uninit(dev); |
2776 |
+ |
2777 |
+ ipoib_ib_dev_cleanup(dev); |
2778 |
+@@ -1969,15 +1982,8 @@ static void ipoib_ndo_uninit(struct net_device *dev) |
2779 |
+ priv->wq = NULL; |
2780 |
+ } |
2781 |
+ |
2782 |
+- if (priv->parent) { |
2783 |
+- struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent); |
2784 |
+- |
2785 |
+- down_write(&ppriv->vlan_rwsem); |
2786 |
+- list_del(&priv->list); |
2787 |
+- up_write(&ppriv->vlan_rwsem); |
2788 |
+- |
2789 |
++ if (priv->parent) |
2790 |
+ dev_put(priv->parent); |
2791 |
+- } |
2792 |
+ } |
2793 |
+ |
2794 |
+ static int ipoib_set_vf_link_state(struct net_device *dev, int vf, int link_state) |
2795 |
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c |
2796 |
+index 162b3236e72c..2101601adf57 100644 |
2797 |
+--- a/drivers/iommu/intel-iommu.c |
2798 |
++++ b/drivers/iommu/intel-iommu.c |
2799 |
+@@ -3752,7 +3752,8 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) |
2800 |
+ |
2801 |
+ freelist = domain_unmap(domain, start_pfn, last_pfn); |
2802 |
+ |
2803 |
+- if (intel_iommu_strict || (pdev && pdev->untrusted)) { |
2804 |
++ if (intel_iommu_strict || (pdev && pdev->untrusted) || |
2805 |
++ !has_iova_flush_queue(&domain->iovad)) { |
2806 |
+ iommu_flush_iotlb_psi(iommu, domain, start_pfn, |
2807 |
+ nrpages, !freelist, 0); |
2808 |
+ /* free iova */ |
2809 |
+diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c |
2810 |
+index d499b2621239..3e1a8a675572 100644 |
2811 |
+--- a/drivers/iommu/iova.c |
2812 |
++++ b/drivers/iommu/iova.c |
2813 |
+@@ -54,9 +54,14 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, |
2814 |
+ } |
2815 |
+ EXPORT_SYMBOL_GPL(init_iova_domain); |
2816 |
+ |
2817 |
++bool has_iova_flush_queue(struct iova_domain *iovad) |
2818 |
++{ |
2819 |
++ return !!iovad->fq; |
2820 |
++} |
2821 |
++ |
2822 |
+ static void free_iova_flush_queue(struct iova_domain *iovad) |
2823 |
+ { |
2824 |
+- if (!iovad->fq) |
2825 |
++ if (!has_iova_flush_queue(iovad)) |
2826 |
+ return; |
2827 |
+ |
2828 |
+ if (timer_pending(&iovad->fq_timer)) |
2829 |
+@@ -74,13 +79,14 @@ static void free_iova_flush_queue(struct iova_domain *iovad) |
2830 |
+ int init_iova_flush_queue(struct iova_domain *iovad, |
2831 |
+ iova_flush_cb flush_cb, iova_entry_dtor entry_dtor) |
2832 |
+ { |
2833 |
++ struct iova_fq __percpu *queue; |
2834 |
+ int cpu; |
2835 |
+ |
2836 |
+ atomic64_set(&iovad->fq_flush_start_cnt, 0); |
2837 |
+ atomic64_set(&iovad->fq_flush_finish_cnt, 0); |
2838 |
+ |
2839 |
+- iovad->fq = alloc_percpu(struct iova_fq); |
2840 |
+- if (!iovad->fq) |
2841 |
++ queue = alloc_percpu(struct iova_fq); |
2842 |
++ if (!queue) |
2843 |
+ return -ENOMEM; |
2844 |
+ |
2845 |
+ iovad->flush_cb = flush_cb; |
2846 |
+@@ -89,13 +95,17 @@ int init_iova_flush_queue(struct iova_domain *iovad, |
2847 |
+ for_each_possible_cpu(cpu) { |
2848 |
+ struct iova_fq *fq; |
2849 |
+ |
2850 |
+- fq = per_cpu_ptr(iovad->fq, cpu); |
2851 |
++ fq = per_cpu_ptr(queue, cpu); |
2852 |
+ fq->head = 0; |
2853 |
+ fq->tail = 0; |
2854 |
+ |
2855 |
+ spin_lock_init(&fq->lock); |
2856 |
+ } |
2857 |
+ |
2858 |
++ smp_wmb(); |
2859 |
++ |
2860 |
++ iovad->fq = queue; |
2861 |
++ |
2862 |
+ timer_setup(&iovad->fq_timer, fq_flush_timeout, 0); |
2863 |
+ atomic_set(&iovad->fq_timer_on, 0); |
2864 |
+ |
2865 |
+@@ -127,8 +137,9 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) |
2866 |
+ struct iova *cached_iova; |
2867 |
+ |
2868 |
+ cached_iova = rb_entry(iovad->cached32_node, struct iova, node); |
2869 |
+- if (free->pfn_hi < iovad->dma_32bit_pfn && |
2870 |
+- free->pfn_lo >= cached_iova->pfn_lo) { |
2871 |
++ if (free == cached_iova || |
2872 |
++ (free->pfn_hi < iovad->dma_32bit_pfn && |
2873 |
++ free->pfn_lo >= cached_iova->pfn_lo)) { |
2874 |
+ iovad->cached32_node = rb_next(&free->node); |
2875 |
+ iovad->max32_alloc_size = iovad->dma_32bit_pfn; |
2876 |
+ } |
2877 |
+diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c |
2878 |
+index f4b1950d35f3..0b821a5b2db8 100644 |
2879 |
+--- a/drivers/mailbox/mailbox.c |
2880 |
++++ b/drivers/mailbox/mailbox.c |
2881 |
+@@ -418,11 +418,13 @@ struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl, |
2882 |
+ |
2883 |
+ of_property_for_each_string(np, "mbox-names", prop, mbox_name) { |
2884 |
+ if (!strncmp(name, mbox_name, strlen(name))) |
2885 |
+- break; |
2886 |
++ return mbox_request_channel(cl, index); |
2887 |
+ index++; |
2888 |
+ } |
2889 |
+ |
2890 |
+- return mbox_request_channel(cl, index); |
2891 |
++ dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n", |
2892 |
++ __func__, name); |
2893 |
++ return ERR_PTR(-EINVAL); |
2894 |
+ } |
2895 |
+ EXPORT_SYMBOL_GPL(mbox_request_channel_byname); |
2896 |
+ |
2897 |
+diff --git a/drivers/media/platform/coda/Makefile b/drivers/media/platform/coda/Makefile |
2898 |
+index f13adacd924e..cfe3ef8fad8a 100644 |
2899 |
+--- a/drivers/media/platform/coda/Makefile |
2900 |
++++ b/drivers/media/platform/coda/Makefile |
2901 |
+@@ -1,7 +1,7 @@ |
2902 |
+ # SPDX-License-Identifier: GPL-2.0-only |
2903 |
+ ccflags-y += -I$(src) |
2904 |
+ |
2905 |
+-coda-objs := coda-common.o coda-bit.o coda-gdi.o coda-h264.o coda-jpeg.o |
2906 |
++coda-vpu-objs := coda-common.o coda-bit.o coda-gdi.o coda-h264.o coda-jpeg.o |
2907 |
+ |
2908 |
+-obj-$(CONFIG_VIDEO_CODA) += coda.o |
2909 |
++obj-$(CONFIG_VIDEO_CODA) += coda-vpu.o |
2910 |
+ obj-$(CONFIG_VIDEO_IMX_VDOA) += imx-vdoa.o |
2911 |
+diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c |
2912 |
+index 6cfb293396f2..693ee73eb291 100644 |
2913 |
+--- a/drivers/memstick/core/memstick.c |
2914 |
++++ b/drivers/memstick/core/memstick.c |
2915 |
+@@ -625,13 +625,18 @@ static int __init memstick_init(void) |
2916 |
+ return -ENOMEM; |
2917 |
+ |
2918 |
+ rc = bus_register(&memstick_bus_type); |
2919 |
+- if (!rc) |
2920 |
+- rc = class_register(&memstick_host_class); |
2921 |
++ if (rc) |
2922 |
++ goto error_destroy_workqueue; |
2923 |
+ |
2924 |
+- if (!rc) |
2925 |
+- return 0; |
2926 |
++ rc = class_register(&memstick_host_class); |
2927 |
++ if (rc) |
2928 |
++ goto error_bus_unregister; |
2929 |
++ |
2930 |
++ return 0; |
2931 |
+ |
2932 |
++error_bus_unregister: |
2933 |
+ bus_unregister(&memstick_bus_type); |
2934 |
++error_destroy_workqueue: |
2935 |
+ destroy_workqueue(workqueue); |
2936 |
+ |
2937 |
+ return rc; |
2938 |
+diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c |
2939 |
+index 2bdc7b02157a..4a31907a4525 100644 |
2940 |
+--- a/drivers/mfd/arizona-core.c |
2941 |
++++ b/drivers/mfd/arizona-core.c |
2942 |
+@@ -993,7 +993,7 @@ int arizona_dev_init(struct arizona *arizona) |
2943 |
+ unsigned int reg, val; |
2944 |
+ int (*apply_patch)(struct arizona *) = NULL; |
2945 |
+ const struct mfd_cell *subdevs = NULL; |
2946 |
+- int n_subdevs, ret, i; |
2947 |
++ int n_subdevs = 0, ret, i; |
2948 |
+ |
2949 |
+ dev_set_drvdata(arizona->dev, arizona); |
2950 |
+ mutex_init(&arizona->clk_lock); |
2951 |
+diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c |
2952 |
+index a5391f96eafd..607383b67cf1 100644 |
2953 |
+--- a/drivers/mfd/cros_ec_dev.c |
2954 |
++++ b/drivers/mfd/cros_ec_dev.c |
2955 |
+@@ -285,13 +285,15 @@ static void cros_ec_sensors_register(struct cros_ec_dev *ec) |
2956 |
+ |
2957 |
+ resp = (struct ec_response_motion_sense *)msg->data; |
2958 |
+ sensor_num = resp->dump.sensor_count; |
2959 |
+- /* Allocate 1 extra sensors in FIFO are needed */ |
2960 |
+- sensor_cells = kcalloc(sensor_num + 1, sizeof(struct mfd_cell), |
2961 |
++ /* |
2962 |
++ * Allocate 2 extra sensors if lid angle sensor and/or FIFO are needed. |
2963 |
++ */ |
2964 |
++ sensor_cells = kcalloc(sensor_num + 2, sizeof(struct mfd_cell), |
2965 |
+ GFP_KERNEL); |
2966 |
+ if (sensor_cells == NULL) |
2967 |
+ goto error; |
2968 |
+ |
2969 |
+- sensor_platforms = kcalloc(sensor_num + 1, |
2970 |
++ sensor_platforms = kcalloc(sensor_num, |
2971 |
+ sizeof(struct cros_ec_sensor_platform), |
2972 |
+ GFP_KERNEL); |
2973 |
+ if (sensor_platforms == NULL) |
2974 |
+@@ -351,6 +353,11 @@ static void cros_ec_sensors_register(struct cros_ec_dev *ec) |
2975 |
+ sensor_cells[id].name = "cros-ec-ring"; |
2976 |
+ id++; |
2977 |
+ } |
2978 |
++ if (cros_ec_check_features(ec, |
2979 |
++ EC_FEATURE_REFINED_TABLET_MODE_HYSTERESIS)) { |
2980 |
++ sensor_cells[id].name = "cros-ec-lid-angle"; |
2981 |
++ id++; |
2982 |
++ } |
2983 |
+ |
2984 |
+ ret = mfd_add_devices(ec->dev, 0, sensor_cells, id, |
2985 |
+ NULL, 0, NULL); |
2986 |
+diff --git a/drivers/mfd/hi655x-pmic.c b/drivers/mfd/hi655x-pmic.c |
2987 |
+index f1c51ce309fa..7e3959aaa285 100644 |
2988 |
+--- a/drivers/mfd/hi655x-pmic.c |
2989 |
++++ b/drivers/mfd/hi655x-pmic.c |
2990 |
+@@ -109,6 +109,8 @@ static int hi655x_pmic_probe(struct platform_device *pdev) |
2991 |
+ |
2992 |
+ pmic->regmap = devm_regmap_init_mmio_clk(dev, NULL, base, |
2993 |
+ &hi655x_regmap_config); |
2994 |
++ if (IS_ERR(pmic->regmap)) |
2995 |
++ return PTR_ERR(pmic->regmap); |
2996 |
+ |
2997 |
+ regmap_read(pmic->regmap, HI655X_BUS_ADDR(HI655X_VER_REG), &pmic->ver); |
2998 |
+ if ((pmic->ver < PMU_VER_START) || (pmic->ver > PMU_VER_END)) { |
2999 |
+diff --git a/drivers/mfd/madera-core.c b/drivers/mfd/madera-core.c |
3000 |
+index 2a77988d0462..826b971ccb86 100644 |
3001 |
+--- a/drivers/mfd/madera-core.c |
3002 |
++++ b/drivers/mfd/madera-core.c |
3003 |
+@@ -286,6 +286,7 @@ const struct of_device_id madera_of_match[] = { |
3004 |
+ { .compatible = "cirrus,wm1840", .data = (void *)WM1840 }, |
3005 |
+ {} |
3006 |
+ }; |
3007 |
++MODULE_DEVICE_TABLE(of, madera_of_match); |
3008 |
+ EXPORT_SYMBOL_GPL(madera_of_match); |
3009 |
+ |
3010 |
+ static int madera_get_reset_gpio(struct madera *madera) |
3011 |
+diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c |
3012 |
+index dbf684c4ebfb..23276a80e3b4 100644 |
3013 |
+--- a/drivers/mfd/mfd-core.c |
3014 |
++++ b/drivers/mfd/mfd-core.c |
3015 |
+@@ -175,6 +175,7 @@ static int mfd_add_device(struct device *parent, int id, |
3016 |
+ for_each_child_of_node(parent->of_node, np) { |
3017 |
+ if (of_device_is_compatible(np, cell->of_compatible)) { |
3018 |
+ pdev->dev.of_node = np; |
3019 |
++ pdev->dev.fwnode = &np->fwnode; |
3020 |
+ break; |
3021 |
+ } |
3022 |
+ } |
3023 |
+diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig |
3024 |
+index f88094719552..f2abe27010ef 100644 |
3025 |
+--- a/drivers/misc/eeprom/Kconfig |
3026 |
++++ b/drivers/misc/eeprom/Kconfig |
3027 |
+@@ -5,6 +5,7 @@ config EEPROM_AT24 |
3028 |
+ tristate "I2C EEPROMs / RAMs / ROMs from most vendors" |
3029 |
+ depends on I2C && SYSFS |
3030 |
+ select NVMEM |
3031 |
++ select NVMEM_SYSFS |
3032 |
+ select REGMAP_I2C |
3033 |
+ help |
3034 |
+ Enable this driver to get read/write support to most I2C EEPROMs |
3035 |
+@@ -34,6 +35,7 @@ config EEPROM_AT25 |
3036 |
+ tristate "SPI EEPROMs from most vendors" |
3037 |
+ depends on SPI && SYSFS |
3038 |
+ select NVMEM |
3039 |
++ select NVMEM_SYSFS |
3040 |
+ help |
3041 |
+ Enable this driver to get read/write support to most SPI EEPROMs, |
3042 |
+ after you configure the board init code to know about each eeprom |
3043 |
+@@ -80,6 +82,7 @@ config EEPROM_93XX46 |
3044 |
+ depends on SPI && SYSFS |
3045 |
+ select REGMAP |
3046 |
+ select NVMEM |
3047 |
++ select NVMEM_SYSFS |
3048 |
+ help |
3049 |
+ Driver for the microwire EEPROM chipsets 93xx46x. The driver |
3050 |
+ supports both read and write commands and also the command to |
3051 |
+diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h |
3052 |
+index d74b182e19f3..6c0173772162 100644 |
3053 |
+--- a/drivers/misc/mei/hw-me-regs.h |
3054 |
++++ b/drivers/misc/mei/hw-me-regs.h |
3055 |
+@@ -81,6 +81,9 @@ |
3056 |
+ |
3057 |
+ #define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */ |
3058 |
+ |
3059 |
++#define MEI_DEV_ID_MCC 0x4B70 /* Mule Creek Canyon (EHL) */ |
3060 |
++#define MEI_DEV_ID_MCC_4 0x4B75 /* Mule Creek Canyon 4 (EHL) */ |
3061 |
++ |
3062 |
+ /* |
3063 |
+ * MEI HW Section |
3064 |
+ */ |
3065 |
+diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c |
3066 |
+index 7a2b3545a7f9..57cb68f5cc64 100644 |
3067 |
+--- a/drivers/misc/mei/pci-me.c |
3068 |
++++ b/drivers/misc/mei/pci-me.c |
3069 |
+@@ -98,6 +98,9 @@ static const struct pci_device_id mei_me_pci_tbl[] = { |
3070 |
+ |
3071 |
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)}, |
3072 |
+ |
3073 |
++ {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)}, |
3074 |
++ {MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)}, |
3075 |
++ |
3076 |
+ /* required last entry */ |
3077 |
+ {0, } |
3078 |
+ }; |
3079 |
+diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c |
3080 |
+index dd21315922c8..9dc4548271b4 100644 |
3081 |
+--- a/drivers/mmc/host/sdhci-pci-o2micro.c |
3082 |
++++ b/drivers/mmc/host/sdhci-pci-o2micro.c |
3083 |
+@@ -395,11 +395,21 @@ int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot) |
3084 |
+ { |
3085 |
+ struct sdhci_pci_chip *chip; |
3086 |
+ struct sdhci_host *host; |
3087 |
+- u32 reg; |
3088 |
++ u32 reg, caps; |
3089 |
+ int ret; |
3090 |
+ |
3091 |
+ chip = slot->chip; |
3092 |
+ host = slot->host; |
3093 |
++ |
3094 |
++ caps = sdhci_readl(host, SDHCI_CAPABILITIES); |
3095 |
++ |
3096 |
++ /* |
3097 |
++ * mmc_select_bus_width() will test the bus to determine the actual bus |
3098 |
++ * width. |
3099 |
++ */ |
3100 |
++ if (caps & SDHCI_CAN_DO_8BIT) |
3101 |
++ host->mmc->caps |= MMC_CAP_8_BIT_DATA; |
3102 |
++ |
3103 |
+ switch (chip->pdev->device) { |
3104 |
+ case PCI_DEVICE_ID_O2_SDS0: |
3105 |
+ case PCI_DEVICE_ID_O2_SEABIRD0: |
3106 |
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c |
3107 |
+index a76529a7662d..c2e92786608b 100644 |
3108 |
+--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c |
3109 |
++++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c |
3110 |
+@@ -1054,14 +1054,12 @@ static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init, |
3111 |
+ } |
3112 |
+ } |
3113 |
+ |
3114 |
+-static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init, |
3115 |
+- struct cudbg_buffer *dbg_buff, |
3116 |
+- struct cudbg_error *cudbg_err, |
3117 |
+- u8 mem_type) |
3118 |
++static unsigned long cudbg_mem_region_size(struct cudbg_init *pdbg_init, |
3119 |
++ struct cudbg_error *cudbg_err, |
3120 |
++ u8 mem_type) |
3121 |
+ { |
3122 |
+ struct adapter *padap = pdbg_init->adap; |
3123 |
+ struct cudbg_meminfo mem_info; |
3124 |
+- unsigned long size; |
3125 |
+ u8 mc_idx; |
3126 |
+ int rc; |
3127 |
+ |
3128 |
+@@ -1075,7 +1073,16 @@ static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init, |
3129 |
+ if (rc) |
3130 |
+ return rc; |
3131 |
+ |
3132 |
+- size = mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base; |
3133 |
++ return mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base; |
3134 |
++} |
3135 |
++ |
3136 |
++static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init, |
3137 |
++ struct cudbg_buffer *dbg_buff, |
3138 |
++ struct cudbg_error *cudbg_err, |
3139 |
++ u8 mem_type) |
3140 |
++{ |
3141 |
++ unsigned long size = cudbg_mem_region_size(pdbg_init, cudbg_err, mem_type); |
3142 |
++ |
3143 |
+ return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size, |
3144 |
+ cudbg_err); |
3145 |
+ } |
3146 |
+diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c |
3147 |
+index 2dca3034fee0..dfb93228d6a7 100644 |
3148 |
+--- a/drivers/nvdimm/bus.c |
3149 |
++++ b/drivers/nvdimm/bus.c |
3150 |
+@@ -73,7 +73,7 @@ static void nvdimm_bus_probe_end(struct nvdimm_bus *nvdimm_bus) |
3151 |
+ { |
3152 |
+ nvdimm_bus_lock(&nvdimm_bus->dev); |
3153 |
+ if (--nvdimm_bus->probe_active == 0) |
3154 |
+- wake_up(&nvdimm_bus->probe_wait); |
3155 |
++ wake_up(&nvdimm_bus->wait); |
3156 |
+ nvdimm_bus_unlock(&nvdimm_bus->dev); |
3157 |
+ } |
3158 |
+ |
3159 |
+@@ -341,7 +341,7 @@ struct nvdimm_bus *nvdimm_bus_register(struct device *parent, |
3160 |
+ return NULL; |
3161 |
+ INIT_LIST_HEAD(&nvdimm_bus->list); |
3162 |
+ INIT_LIST_HEAD(&nvdimm_bus->mapping_list); |
3163 |
+- init_waitqueue_head(&nvdimm_bus->probe_wait); |
3164 |
++ init_waitqueue_head(&nvdimm_bus->wait); |
3165 |
+ nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL); |
3166 |
+ if (nvdimm_bus->id < 0) { |
3167 |
+ kfree(nvdimm_bus); |
3168 |
+@@ -426,6 +426,9 @@ static int nd_bus_remove(struct device *dev) |
3169 |
+ list_del_init(&nvdimm_bus->list); |
3170 |
+ mutex_unlock(&nvdimm_bus_list_mutex); |
3171 |
+ |
3172 |
++ wait_event(nvdimm_bus->wait, |
3173 |
++ atomic_read(&nvdimm_bus->ioctl_active) == 0); |
3174 |
++ |
3175 |
+ nd_synchronize(); |
3176 |
+ device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister); |
3177 |
+ |
3178 |
+@@ -547,13 +550,38 @@ EXPORT_SYMBOL(nd_device_register); |
3179 |
+ |
3180 |
+ void nd_device_unregister(struct device *dev, enum nd_async_mode mode) |
3181 |
+ { |
3182 |
++ bool killed; |
3183 |
++ |
3184 |
+ switch (mode) { |
3185 |
+ case ND_ASYNC: |
3186 |
++ /* |
3187 |
++ * In the async case this is being triggered with the |
3188 |
++ * device lock held and the unregistration work needs to |
3189 |
++ * be moved out of line iff this is thread has won the |
3190 |
++ * race to schedule the deletion. |
3191 |
++ */ |
3192 |
++ if (!kill_device(dev)) |
3193 |
++ return; |
3194 |
++ |
3195 |
+ get_device(dev); |
3196 |
+ async_schedule_domain(nd_async_device_unregister, dev, |
3197 |
+ &nd_async_domain); |
3198 |
+ break; |
3199 |
+ case ND_SYNC: |
3200 |
++ /* |
3201 |
++ * In the sync case the device is being unregistered due |
3202 |
++ * to a state change of the parent. Claim the kill state |
3203 |
++ * to synchronize against other unregistration requests, |
3204 |
++ * or otherwise let the async path handle it if the |
3205 |
++ * unregistration was already queued. |
3206 |
++ */ |
3207 |
++ device_lock(dev); |
3208 |
++ killed = kill_device(dev); |
3209 |
++ device_unlock(dev); |
3210 |
++ |
3211 |
++ if (!killed) |
3212 |
++ return; |
3213 |
++ |
3214 |
+ nd_synchronize(); |
3215 |
+ device_unregister(dev); |
3216 |
+ break; |
3217 |
+@@ -860,7 +888,7 @@ void wait_nvdimm_bus_probe_idle(struct device *dev) |
3218 |
+ if (nvdimm_bus->probe_active == 0) |
3219 |
+ break; |
3220 |
+ nvdimm_bus_unlock(&nvdimm_bus->dev); |
3221 |
+- wait_event(nvdimm_bus->probe_wait, |
3222 |
++ wait_event(nvdimm_bus->wait, |
3223 |
+ nvdimm_bus->probe_active == 0); |
3224 |
+ nvdimm_bus_lock(&nvdimm_bus->dev); |
3225 |
+ } while (true); |
3226 |
+@@ -1090,24 +1118,10 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, |
3227 |
+ return rc; |
3228 |
+ } |
3229 |
+ |
3230 |
+-static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
3231 |
+-{ |
3232 |
+- long id = (long) file->private_data; |
3233 |
+- int rc = -ENXIO, ro; |
3234 |
+- struct nvdimm_bus *nvdimm_bus; |
3235 |
+- |
3236 |
+- ro = ((file->f_flags & O_ACCMODE) == O_RDONLY); |
3237 |
+- mutex_lock(&nvdimm_bus_list_mutex); |
3238 |
+- list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) { |
3239 |
+- if (nvdimm_bus->id == id) { |
3240 |
+- rc = __nd_ioctl(nvdimm_bus, NULL, ro, cmd, arg); |
3241 |
+- break; |
3242 |
+- } |
3243 |
+- } |
3244 |
+- mutex_unlock(&nvdimm_bus_list_mutex); |
3245 |
+- |
3246 |
+- return rc; |
3247 |
+-} |
3248 |
++enum nd_ioctl_mode { |
3249 |
++ BUS_IOCTL, |
3250 |
++ DIMM_IOCTL, |
3251 |
++}; |
3252 |
+ |
3253 |
+ static int match_dimm(struct device *dev, void *data) |
3254 |
+ { |
3255 |
+@@ -1122,31 +1136,62 @@ static int match_dimm(struct device *dev, void *data) |
3256 |
+ return 0; |
3257 |
+ } |
3258 |
+ |
3259 |
+-static long nvdimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
3260 |
++static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg, |
3261 |
++ enum nd_ioctl_mode mode) |
3262 |
++ |
3263 |
+ { |
3264 |
+- int rc = -ENXIO, ro; |
3265 |
+- struct nvdimm_bus *nvdimm_bus; |
3266 |
++ struct nvdimm_bus *nvdimm_bus, *found = NULL; |
3267 |
++ long id = (long) file->private_data; |
3268 |
++ struct nvdimm *nvdimm = NULL; |
3269 |
++ int rc, ro; |
3270 |
+ |
3271 |
+ ro = ((file->f_flags & O_ACCMODE) == O_RDONLY); |
3272 |
+ mutex_lock(&nvdimm_bus_list_mutex); |
3273 |
+ list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) { |
3274 |
+- struct device *dev = device_find_child(&nvdimm_bus->dev, |
3275 |
+- file->private_data, match_dimm); |
3276 |
+- struct nvdimm *nvdimm; |
3277 |
+- |
3278 |
+- if (!dev) |
3279 |
+- continue; |
3280 |
++ if (mode == DIMM_IOCTL) { |
3281 |
++ struct device *dev; |
3282 |
++ |
3283 |
++ dev = device_find_child(&nvdimm_bus->dev, |
3284 |
++ file->private_data, match_dimm); |
3285 |
++ if (!dev) |
3286 |
++ continue; |
3287 |
++ nvdimm = to_nvdimm(dev); |
3288 |
++ found = nvdimm_bus; |
3289 |
++ } else if (nvdimm_bus->id == id) { |
3290 |
++ found = nvdimm_bus; |
3291 |
++ } |
3292 |
+ |
3293 |
+- nvdimm = to_nvdimm(dev); |
3294 |
+- rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg); |
3295 |
+- put_device(dev); |
3296 |
+- break; |
3297 |
++ if (found) { |
3298 |
++ atomic_inc(&nvdimm_bus->ioctl_active); |
3299 |
++ break; |
3300 |
++ } |
3301 |
+ } |
3302 |
+ mutex_unlock(&nvdimm_bus_list_mutex); |
3303 |
+ |
3304 |
++ if (!found) |
3305 |
++ return -ENXIO; |
3306 |
++ |
3307 |
++ nvdimm_bus = found; |
3308 |
++ rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg); |
3309 |
++ |
3310 |
++ if (nvdimm) |
3311 |
++ put_device(&nvdimm->dev); |
3312 |
++ if (atomic_dec_and_test(&nvdimm_bus->ioctl_active)) |
3313 |
++ wake_up(&nvdimm_bus->wait); |
3314 |
++ |
3315 |
+ return rc; |
3316 |
+ } |
3317 |
+ |
3318 |
++static long bus_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
3319 |
++{ |
3320 |
++ return nd_ioctl(file, cmd, arg, BUS_IOCTL); |
3321 |
++} |
3322 |
++ |
3323 |
++static long dimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
3324 |
++{ |
3325 |
++ return nd_ioctl(file, cmd, arg, DIMM_IOCTL); |
3326 |
++} |
3327 |
++ |
3328 |
+ static int nd_open(struct inode *inode, struct file *file) |
3329 |
+ { |
3330 |
+ long minor = iminor(inode); |
3331 |
+@@ -1158,16 +1203,16 @@ static int nd_open(struct inode *inode, struct file *file) |
3332 |
+ static const struct file_operations nvdimm_bus_fops = { |
3333 |
+ .owner = THIS_MODULE, |
3334 |
+ .open = nd_open, |
3335 |
+- .unlocked_ioctl = nd_ioctl, |
3336 |
+- .compat_ioctl = nd_ioctl, |
3337 |
++ .unlocked_ioctl = bus_ioctl, |
3338 |
++ .compat_ioctl = bus_ioctl, |
3339 |
+ .llseek = noop_llseek, |
3340 |
+ }; |
3341 |
+ |
3342 |
+ static const struct file_operations nvdimm_fops = { |
3343 |
+ .owner = THIS_MODULE, |
3344 |
+ .open = nd_open, |
3345 |
+- .unlocked_ioctl = nvdimm_ioctl, |
3346 |
+- .compat_ioctl = nvdimm_ioctl, |
3347 |
++ .unlocked_ioctl = dimm_ioctl, |
3348 |
++ .compat_ioctl = dimm_ioctl, |
3349 |
+ .llseek = noop_llseek, |
3350 |
+ }; |
3351 |
+ |
3352 |
+diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h |
3353 |
+index 391e88de3a29..6cd470547106 100644 |
3354 |
+--- a/drivers/nvdimm/nd-core.h |
3355 |
++++ b/drivers/nvdimm/nd-core.h |
3356 |
+@@ -17,10 +17,11 @@ extern struct workqueue_struct *nvdimm_wq; |
3357 |
+ |
3358 |
+ struct nvdimm_bus { |
3359 |
+ struct nvdimm_bus_descriptor *nd_desc; |
3360 |
+- wait_queue_head_t probe_wait; |
3361 |
++ wait_queue_head_t wait; |
3362 |
+ struct list_head list; |
3363 |
+ struct device dev; |
3364 |
+ int id, probe_active; |
3365 |
++ atomic_t ioctl_active; |
3366 |
+ struct list_head mapping_list; |
3367 |
+ struct mutex reconfig_mutex; |
3368 |
+ struct badrange badrange; |
3369 |
+diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c |
3370 |
+index ef46cc3a71ae..488c47ac4c4a 100644 |
3371 |
+--- a/drivers/nvdimm/region.c |
3372 |
++++ b/drivers/nvdimm/region.c |
3373 |
+@@ -34,17 +34,6 @@ static int nd_region_probe(struct device *dev) |
3374 |
+ if (rc) |
3375 |
+ return rc; |
3376 |
+ |
3377 |
+- rc = nd_region_register_namespaces(nd_region, &err); |
3378 |
+- if (rc < 0) |
3379 |
+- return rc; |
3380 |
+- |
3381 |
+- ndrd = dev_get_drvdata(dev); |
3382 |
+- ndrd->ns_active = rc; |
3383 |
+- ndrd->ns_count = rc + err; |
3384 |
+- |
3385 |
+- if (rc && err && rc == err) |
3386 |
+- return -ENODEV; |
3387 |
+- |
3388 |
+ if (is_nd_pmem(&nd_region->dev)) { |
3389 |
+ struct resource ndr_res; |
3390 |
+ |
3391 |
+@@ -60,6 +49,17 @@ static int nd_region_probe(struct device *dev) |
3392 |
+ nvdimm_badblocks_populate(nd_region, &nd_region->bb, &ndr_res); |
3393 |
+ } |
3394 |
+ |
3395 |
++ rc = nd_region_register_namespaces(nd_region, &err); |
3396 |
++ if (rc < 0) |
3397 |
++ return rc; |
3398 |
++ |
3399 |
++ ndrd = dev_get_drvdata(dev); |
3400 |
++ ndrd->ns_active = rc; |
3401 |
++ ndrd->ns_count = rc + err; |
3402 |
++ |
3403 |
++ if (rc && err && rc == err) |
3404 |
++ return -ENODEV; |
3405 |
++ |
3406 |
+ nd_region->btt_seed = nd_btt_create(nd_region); |
3407 |
+ nd_region->pfn_seed = nd_pfn_create(nd_region); |
3408 |
+ nd_region->dax_seed = nd_dax_create(nd_region); |
3409 |
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c |
3410 |
+index 22c68e3b71d5..4a1d2ab4d161 100644 |
3411 |
+--- a/drivers/nvme/host/core.c |
3412 |
++++ b/drivers/nvme/host/core.c |
3413 |
+@@ -11,6 +11,7 @@ |
3414 |
+ #include <linux/hdreg.h> |
3415 |
+ #include <linux/kernel.h> |
3416 |
+ #include <linux/module.h> |
3417 |
++#include <linux/backing-dev.h> |
3418 |
+ #include <linux/list_sort.h> |
3419 |
+ #include <linux/slab.h> |
3420 |
+ #include <linux/types.h> |
3421 |
+@@ -3256,6 +3257,10 @@ static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) |
3422 |
+ goto out_free_ns; |
3423 |
+ } |
3424 |
+ |
3425 |
++ if (ctrl->opts && ctrl->opts->data_digest) |
3426 |
++ ns->queue->backing_dev_info->capabilities |
3427 |
++ |= BDI_CAP_STABLE_WRITES; |
3428 |
++ |
3429 |
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue); |
3430 |
+ if (ctrl->ops->flags & NVME_F_PCI_P2PDMA) |
3431 |
+ blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue); |
3432 |
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c |
3433 |
+index f5bc1c30cef5..7fbcd72c438f 100644 |
3434 |
+--- a/drivers/nvme/host/pci.c |
3435 |
++++ b/drivers/nvme/host/pci.c |
3436 |
+@@ -1456,11 +1456,15 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, |
3437 |
+ |
3438 |
+ if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { |
3439 |
+ nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(depth)); |
3440 |
+- nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev, |
3441 |
+- nvmeq->sq_cmds); |
3442 |
+- if (nvmeq->sq_dma_addr) { |
3443 |
+- set_bit(NVMEQ_SQ_CMB, &nvmeq->flags); |
3444 |
+- return 0; |
3445 |
++ if (nvmeq->sq_cmds) { |
3446 |
++ nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev, |
3447 |
++ nvmeq->sq_cmds); |
3448 |
++ if (nvmeq->sq_dma_addr) { |
3449 |
++ set_bit(NVMEQ_SQ_CMB, &nvmeq->flags); |
3450 |
++ return 0; |
3451 |
++ } |
3452 |
++ |
3453 |
++ pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(depth)); |
3454 |
+ } |
3455 |
+ } |
3456 |
+ |
3457 |
+@@ -2517,7 +2521,8 @@ static void nvme_reset_work(struct work_struct *work) |
3458 |
+ * Limit the max command size to prevent iod->sg allocations going |
3459 |
+ * over a single page. |
3460 |
+ */ |
3461 |
+- dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1; |
3462 |
++ dev->ctrl.max_hw_sectors = min_t(u32, |
3463 |
++ NVME_MAX_KB_SZ << 1, dma_max_mapping_size(dev->dev) >> 9); |
3464 |
+ dev->ctrl.max_segments = NVME_MAX_SEGS; |
3465 |
+ |
3466 |
+ /* |
3467 |
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c |
3468 |
+index 08a2501b9357..606b13d35d16 100644 |
3469 |
+--- a/drivers/nvme/host/tcp.c |
3470 |
++++ b/drivers/nvme/host/tcp.c |
3471 |
+@@ -860,7 +860,14 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req) |
3472 |
+ else |
3473 |
+ flags |= MSG_MORE; |
3474 |
+ |
3475 |
+- ret = kernel_sendpage(queue->sock, page, offset, len, flags); |
3476 |
++ /* can't zcopy slab pages */ |
3477 |
++ if (unlikely(PageSlab(page))) { |
3478 |
++ ret = sock_no_sendpage(queue->sock, page, offset, len, |
3479 |
++ flags); |
3480 |
++ } else { |
3481 |
++ ret = kernel_sendpage(queue->sock, page, offset, len, |
3482 |
++ flags); |
3483 |
++ } |
3484 |
+ if (ret <= 0) |
3485 |
+ return ret; |
3486 |
+ |
3487 |
+diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c |
3488 |
+index 419451efd58c..4234ddb4722f 100644 |
3489 |
+--- a/drivers/pci/controller/dwc/pci-dra7xx.c |
3490 |
++++ b/drivers/pci/controller/dwc/pci-dra7xx.c |
3491 |
+@@ -26,6 +26,7 @@ |
3492 |
+ #include <linux/types.h> |
3493 |
+ #include <linux/mfd/syscon.h> |
3494 |
+ #include <linux/regmap.h> |
3495 |
++#include <linux/gpio/consumer.h> |
3496 |
+ |
3497 |
+ #include "../../pci.h" |
3498 |
+ #include "pcie-designware.h" |
3499 |
+diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/pcie-mobiveil.c |
3500 |
+index 77052a0712d0..387a20f3c240 100644 |
3501 |
+--- a/drivers/pci/controller/pcie-mobiveil.c |
3502 |
++++ b/drivers/pci/controller/pcie-mobiveil.c |
3503 |
+@@ -501,6 +501,12 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie) |
3504 |
+ return err; |
3505 |
+ } |
3506 |
+ |
3507 |
++ /* setup bus numbers */ |
3508 |
++ value = csr_readl(pcie, PCI_PRIMARY_BUS); |
3509 |
++ value &= 0xff000000; |
3510 |
++ value |= 0x00ff0100; |
3511 |
++ csr_writel(pcie, value, PCI_PRIMARY_BUS); |
3512 |
++ |
3513 |
+ /* |
3514 |
+ * program Bus Master Enable Bit in Command Register in PAB Config |
3515 |
+ * Space |
3516 |
+@@ -540,7 +546,7 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie) |
3517 |
+ resource_size(pcie->ob_io_res)); |
3518 |
+ |
3519 |
+ /* memory inbound translation window */ |
3520 |
+- program_ib_windows(pcie, WIN_NUM_1, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE); |
3521 |
++ program_ib_windows(pcie, WIN_NUM_0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE); |
3522 |
+ |
3523 |
+ /* Get the I/O and memory ranges from DT */ |
3524 |
+ resource_list_for_each_entry_safe(win, tmp, &pcie->resources) { |
3525 |
+@@ -552,11 +558,18 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie) |
3526 |
+ if (type) { |
3527 |
+ /* configure outbound translation window */ |
3528 |
+ program_ob_windows(pcie, pcie->ob_wins_configured, |
3529 |
+- win->res->start, 0, type, |
3530 |
+- resource_size(win->res)); |
3531 |
++ win->res->start, |
3532 |
++ win->res->start - win->offset, |
3533 |
++ type, resource_size(win->res)); |
3534 |
+ } |
3535 |
+ } |
3536 |
+ |
3537 |
++ /* fixup for PCIe class register */ |
3538 |
++ value = csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS); |
3539 |
++ value &= 0xff; |
3540 |
++ value |= (PCI_CLASS_BRIDGE_PCI << 16); |
3541 |
++ csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS); |
3542 |
++ |
3543 |
+ /* setup MSI hardware registers */ |
3544 |
+ mobiveil_pcie_enable_msi(pcie); |
3545 |
+ |
3546 |
+@@ -797,9 +810,6 @@ static int mobiveil_pcie_probe(struct platform_device *pdev) |
3547 |
+ goto error; |
3548 |
+ } |
3549 |
+ |
3550 |
+- /* fixup for PCIe class register */ |
3551 |
+- csr_writel(pcie, 0x060402ab, PAB_INTP_AXI_PIO_CLASS); |
3552 |
+- |
3553 |
+ /* initialize the IRQ domains */ |
3554 |
+ ret = mobiveil_pcie_init_irq_domain(pcie); |
3555 |
+ if (ret) { |
3556 |
+diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c |
3557 |
+index 3b031f00a94a..45c0f344ccd1 100644 |
3558 |
+--- a/drivers/pci/controller/pcie-xilinx-nwl.c |
3559 |
++++ b/drivers/pci/controller/pcie-xilinx-nwl.c |
3560 |
+@@ -482,15 +482,13 @@ static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, |
3561 |
+ int i; |
3562 |
+ |
3563 |
+ mutex_lock(&msi->lock); |
3564 |
+- bit = bitmap_find_next_zero_area(msi->bitmap, INT_PCI_MSI_NR, 0, |
3565 |
+- nr_irqs, 0); |
3566 |
+- if (bit >= INT_PCI_MSI_NR) { |
3567 |
++ bit = bitmap_find_free_region(msi->bitmap, INT_PCI_MSI_NR, |
3568 |
++ get_count_order(nr_irqs)); |
3569 |
++ if (bit < 0) { |
3570 |
+ mutex_unlock(&msi->lock); |
3571 |
+ return -ENOSPC; |
3572 |
+ } |
3573 |
+ |
3574 |
+- bitmap_set(msi->bitmap, bit, nr_irqs); |
3575 |
+- |
3576 |
+ for (i = 0; i < nr_irqs; i++) { |
3577 |
+ irq_domain_set_info(domain, virq + i, bit + i, &nwl_irq_chip, |
3578 |
+ domain->host_data, handle_simple_irq, |
3579 |
+@@ -508,7 +506,8 @@ static void nwl_irq_domain_free(struct irq_domain *domain, unsigned int virq, |
3580 |
+ struct nwl_msi *msi = &pcie->msi; |
3581 |
+ |
3582 |
+ mutex_lock(&msi->lock); |
3583 |
+- bitmap_clear(msi->bitmap, data->hwirq, nr_irqs); |
3584 |
++ bitmap_release_region(msi->bitmap, data->hwirq, |
3585 |
++ get_count_order(nr_irqs)); |
3586 |
+ mutex_unlock(&msi->lock); |
3587 |
+ } |
3588 |
+ |
3589 |
+diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c |
3590 |
+index 27806987e93b..7d41e6684b87 100644 |
3591 |
+--- a/drivers/pci/endpoint/functions/pci-epf-test.c |
3592 |
++++ b/drivers/pci/endpoint/functions/pci-epf-test.c |
3593 |
+@@ -434,10 +434,16 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf) |
3594 |
+ int bar; |
3595 |
+ enum pci_barno test_reg_bar = epf_test->test_reg_bar; |
3596 |
+ const struct pci_epc_features *epc_features; |
3597 |
++ size_t test_reg_size; |
3598 |
+ |
3599 |
+ epc_features = epf_test->epc_features; |
3600 |
+ |
3601 |
+- base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg), |
3602 |
++ if (epc_features->bar_fixed_size[test_reg_bar]) |
3603 |
++ test_reg_size = bar_size[test_reg_bar]; |
3604 |
++ else |
3605 |
++ test_reg_size = sizeof(struct pci_epf_test_reg); |
3606 |
++ |
3607 |
++ base = pci_epf_alloc_space(epf, test_reg_size, |
3608 |
+ test_reg_bar, epc_features->align); |
3609 |
+ if (!base) { |
3610 |
+ dev_err(dev, "Failed to allocated register space\n"); |
3611 |
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c |
3612 |
+index ca3793002e2f..74c3df250d9c 100644 |
3613 |
+--- a/drivers/pci/pci-driver.c |
3614 |
++++ b/drivers/pci/pci-driver.c |
3615 |
+@@ -414,6 +414,9 @@ static int pci_device_probe(struct device *dev) |
3616 |
+ struct pci_dev *pci_dev = to_pci_dev(dev); |
3617 |
+ struct pci_driver *drv = to_pci_driver(dev->driver); |
3618 |
+ |
3619 |
++ if (!pci_device_can_probe(pci_dev)) |
3620 |
++ return -ENODEV; |
3621 |
++ |
3622 |
+ pci_assign_irq(pci_dev); |
3623 |
+ |
3624 |
+ error = pcibios_alloc_irq(pci_dev); |
3625 |
+@@ -421,12 +424,10 @@ static int pci_device_probe(struct device *dev) |
3626 |
+ return error; |
3627 |
+ |
3628 |
+ pci_dev_get(pci_dev); |
3629 |
+- if (pci_device_can_probe(pci_dev)) { |
3630 |
+- error = __pci_device_probe(drv, pci_dev); |
3631 |
+- if (error) { |
3632 |
+- pcibios_free_irq(pci_dev); |
3633 |
+- pci_dev_put(pci_dev); |
3634 |
+- } |
3635 |
++ error = __pci_device_probe(drv, pci_dev); |
3636 |
++ if (error) { |
3637 |
++ pcibios_free_irq(pci_dev); |
3638 |
++ pci_dev_put(pci_dev); |
3639 |
+ } |
3640 |
+ |
3641 |
+ return error; |
3642 |
+diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c |
3643 |
+index 6d27475e39b2..4e83c347de5d 100644 |
3644 |
+--- a/drivers/pci/pci-sysfs.c |
3645 |
++++ b/drivers/pci/pci-sysfs.c |
3646 |
+@@ -477,7 +477,7 @@ static ssize_t remove_store(struct device *dev, struct device_attribute *attr, |
3647 |
+ pci_stop_and_remove_bus_device_locked(to_pci_dev(dev)); |
3648 |
+ return count; |
3649 |
+ } |
3650 |
+-static struct device_attribute dev_remove_attr = __ATTR(remove, |
3651 |
++static struct device_attribute dev_remove_attr = __ATTR_IGNORE_LOCKDEP(remove, |
3652 |
+ (S_IWUSR|S_IWGRP), |
3653 |
+ NULL, remove_store); |
3654 |
+ |
3655 |
+diff --git a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c |
3656 |
+index 6233a7979a93..ac322d643c7a 100644 |
3657 |
+--- a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c |
3658 |
++++ b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c |
3659 |
+@@ -188,7 +188,7 @@ static const struct regmap_config phy_g12a_usb3_pcie_cr_regmap_conf = { |
3660 |
+ .reg_read = phy_g12a_usb3_pcie_cr_bus_read, |
3661 |
+ .reg_write = phy_g12a_usb3_pcie_cr_bus_write, |
3662 |
+ .max_register = 0xffff, |
3663 |
+- .fast_io = true, |
3664 |
++ .disable_locking = true, |
3665 |
+ }; |
3666 |
+ |
3667 |
+ static int phy_g12a_usb3_init(struct phy *phy) |
3668 |
+diff --git a/drivers/phy/renesas/phy-rcar-gen2.c b/drivers/phy/renesas/phy-rcar-gen2.c |
3669 |
+index 8dc5710d9c98..2926e4937301 100644 |
3670 |
+--- a/drivers/phy/renesas/phy-rcar-gen2.c |
3671 |
++++ b/drivers/phy/renesas/phy-rcar-gen2.c |
3672 |
+@@ -391,6 +391,7 @@ static int rcar_gen2_phy_probe(struct platform_device *pdev) |
3673 |
+ error = of_property_read_u32(np, "reg", &channel_num); |
3674 |
+ if (error || channel_num > 2) { |
3675 |
+ dev_err(dev, "Invalid \"reg\" property\n"); |
3676 |
++ of_node_put(np); |
3677 |
+ return error; |
3678 |
+ } |
3679 |
+ channel->select_mask = select_mask[channel_num]; |
3680 |
+@@ -406,6 +407,7 @@ static int rcar_gen2_phy_probe(struct platform_device *pdev) |
3681 |
+ data->gen2_phy_ops); |
3682 |
+ if (IS_ERR(phy->phy)) { |
3683 |
+ dev_err(dev, "Failed to create PHY\n"); |
3684 |
++ of_node_put(np); |
3685 |
+ return PTR_ERR(phy->phy); |
3686 |
+ } |
3687 |
+ phy_set_drvdata(phy->phy, phy); |
3688 |
+diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c |
3689 |
+index 1322185a00a2..8ffba67568ec 100644 |
3690 |
+--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c |
3691 |
++++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c |
3692 |
+@@ -13,6 +13,7 @@ |
3693 |
+ #include <linux/interrupt.h> |
3694 |
+ #include <linux/io.h> |
3695 |
+ #include <linux/module.h> |
3696 |
++#include <linux/mutex.h> |
3697 |
+ #include <linux/of.h> |
3698 |
+ #include <linux/of_address.h> |
3699 |
+ #include <linux/of_device.h> |
3700 |
+@@ -106,6 +107,7 @@ struct rcar_gen3_chan { |
3701 |
+ struct rcar_gen3_phy rphys[NUM_OF_PHYS]; |
3702 |
+ struct regulator *vbus; |
3703 |
+ struct work_struct work; |
3704 |
++ struct mutex lock; /* protects rphys[...].powered */ |
3705 |
+ enum usb_dr_mode dr_mode; |
3706 |
+ bool extcon_host; |
3707 |
+ bool is_otg_channel; |
3708 |
+@@ -437,15 +439,16 @@ static int rcar_gen3_phy_usb2_power_on(struct phy *p) |
3709 |
+ struct rcar_gen3_chan *channel = rphy->ch; |
3710 |
+ void __iomem *usb2_base = channel->base; |
3711 |
+ u32 val; |
3712 |
+- int ret; |
3713 |
++ int ret = 0; |
3714 |
+ |
3715 |
++ mutex_lock(&channel->lock); |
3716 |
+ if (!rcar_gen3_are_all_rphys_power_off(channel)) |
3717 |
+- return 0; |
3718 |
++ goto out; |
3719 |
+ |
3720 |
+ if (channel->vbus) { |
3721 |
+ ret = regulator_enable(channel->vbus); |
3722 |
+ if (ret) |
3723 |
+- return ret; |
3724 |
++ goto out; |
3725 |
+ } |
3726 |
+ |
3727 |
+ val = readl(usb2_base + USB2_USBCTR); |
3728 |
+@@ -454,7 +457,10 @@ static int rcar_gen3_phy_usb2_power_on(struct phy *p) |
3729 |
+ val &= ~USB2_USBCTR_PLL_RST; |
3730 |
+ writel(val, usb2_base + USB2_USBCTR); |
3731 |
+ |
3732 |
++out: |
3733 |
++ /* The powered flag should be set for any other phys anyway */ |
3734 |
+ rphy->powered = true; |
3735 |
++ mutex_unlock(&channel->lock); |
3736 |
+ |
3737 |
+ return 0; |
3738 |
+ } |
3739 |
+@@ -465,14 +471,18 @@ static int rcar_gen3_phy_usb2_power_off(struct phy *p) |
3740 |
+ struct rcar_gen3_chan *channel = rphy->ch; |
3741 |
+ int ret = 0; |
3742 |
+ |
3743 |
++ mutex_lock(&channel->lock); |
3744 |
+ rphy->powered = false; |
3745 |
+ |
3746 |
+ if (!rcar_gen3_are_all_rphys_power_off(channel)) |
3747 |
+- return 0; |
3748 |
++ goto out; |
3749 |
+ |
3750 |
+ if (channel->vbus) |
3751 |
+ ret = regulator_disable(channel->vbus); |
3752 |
+ |
3753 |
++out: |
3754 |
++ mutex_unlock(&channel->lock); |
3755 |
++ |
3756 |
+ return ret; |
3757 |
+ } |
3758 |
+ |
3759 |
+@@ -639,6 +649,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) |
3760 |
+ if (!phy_usb2_ops) |
3761 |
+ return -EINVAL; |
3762 |
+ |
3763 |
++ mutex_init(&channel->lock); |
3764 |
+ for (i = 0; i < NUM_OF_PHYS; i++) { |
3765 |
+ channel->rphys[i].phy = devm_phy_create(dev, NULL, |
3766 |
+ phy_usb2_ops); |
3767 |
+diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c |
3768 |
+index 807a3263d849..62a622159006 100644 |
3769 |
+--- a/drivers/pinctrl/pinctrl-rockchip.c |
3770 |
++++ b/drivers/pinctrl/pinctrl-rockchip.c |
3771 |
+@@ -3204,6 +3204,7 @@ static int rockchip_get_bank_data(struct rockchip_pin_bank *bank, |
3772 |
+ base, |
3773 |
+ &rockchip_regmap_config); |
3774 |
+ } |
3775 |
++ of_node_put(node); |
3776 |
+ } |
3777 |
+ |
3778 |
+ bank->irq = irq_of_parse_and_map(bank->of_node, 0); |
3779 |
+diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig |
3780 |
+index 5d5cc6111081..7c2fd1d72e18 100644 |
3781 |
+--- a/drivers/platform/x86/Kconfig |
3782 |
++++ b/drivers/platform/x86/Kconfig |
3783 |
+@@ -1317,7 +1317,7 @@ config HUAWEI_WMI |
3784 |
+ |
3785 |
+ config PCENGINES_APU2 |
3786 |
+ tristate "PC Engines APUv2/3 front button and LEDs driver" |
3787 |
+- depends on INPUT && INPUT_KEYBOARD |
3788 |
++ depends on INPUT && INPUT_KEYBOARD && GPIOLIB |
3789 |
+ depends on LEDS_CLASS |
3790 |
+ select GPIO_AMD_FCH |
3791 |
+ select KEYBOARD_GPIO_POLLED |
3792 |
+diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c |
3793 |
+index 9b18a184e0aa..abfa99d18fea 100644 |
3794 |
+--- a/drivers/platform/x86/asus-wmi.c |
3795 |
++++ b/drivers/platform/x86/asus-wmi.c |
3796 |
+@@ -85,6 +85,7 @@ static bool ashs_present(void) |
3797 |
+ struct bios_args { |
3798 |
+ u32 arg0; |
3799 |
+ u32 arg1; |
3800 |
++ u32 arg2; /* At least TUF Gaming series uses 3 dword input buffer. */ |
3801 |
+ } __packed; |
3802 |
+ |
3803 |
+ /* |
3804 |
+@@ -211,11 +212,13 @@ static void asus_wmi_input_exit(struct asus_wmi *asus) |
3805 |
+ asus->inputdev = NULL; |
3806 |
+ } |
3807 |
+ |
3808 |
+-int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, u32 *retval) |
3809 |
++static int asus_wmi_evaluate_method3(u32 method_id, |
3810 |
++ u32 arg0, u32 arg1, u32 arg2, u32 *retval) |
3811 |
+ { |
3812 |
+ struct bios_args args = { |
3813 |
+ .arg0 = arg0, |
3814 |
+ .arg1 = arg1, |
3815 |
++ .arg2 = arg2, |
3816 |
+ }; |
3817 |
+ struct acpi_buffer input = { (acpi_size) sizeof(args), &args }; |
3818 |
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; |
3819 |
+@@ -247,6 +250,11 @@ exit: |
3820 |
+ |
3821 |
+ return 0; |
3822 |
+ } |
3823 |
++ |
3824 |
++int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, u32 *retval) |
3825 |
++{ |
3826 |
++ return asus_wmi_evaluate_method3(method_id, arg0, arg1, 0, retval); |
3827 |
++} |
3828 |
+ EXPORT_SYMBOL_GPL(asus_wmi_evaluate_method); |
3829 |
+ |
3830 |
+ static int asus_wmi_evaluate_method_agfn(const struct acpi_buffer args) |
3831 |
+diff --git a/drivers/regulator/88pm800-regulator.c b/drivers/regulator/88pm800-regulator.c |
3832 |
+new file mode 100644 |
3833 |
+index 000000000000..69ae25886181 |
3834 |
+--- /dev/null |
3835 |
++++ b/drivers/regulator/88pm800-regulator.c |
3836 |
+@@ -0,0 +1,286 @@ |
3837 |
++// SPDX-License-Identifier: GPL-2.0-only |
3838 |
++/* |
3839 |
++ * Regulators driver for Marvell 88PM800 |
3840 |
++ * |
3841 |
++ * Copyright (C) 2012 Marvell International Ltd. |
3842 |
++ * Joseph(Yossi) Hanin <yhanin@×××××××.com> |
3843 |
++ * Yi Zhang <yizhang@×××××××.com> |
3844 |
++ */ |
3845 |
++#include <linux/module.h> |
3846 |
++#include <linux/moduleparam.h> |
3847 |
++#include <linux/init.h> |
3848 |
++#include <linux/err.h> |
3849 |
++#include <linux/regmap.h> |
3850 |
++#include <linux/regulator/driver.h> |
3851 |
++#include <linux/regulator/machine.h> |
3852 |
++#include <linux/mfd/88pm80x.h> |
3853 |
++#include <linux/delay.h> |
3854 |
++#include <linux/io.h> |
3855 |
++#include <linux/of.h> |
3856 |
++#include <linux/regulator/of_regulator.h> |
3857 |
++ |
3858 |
++/* LDO1 with DVC[0..3] */ |
3859 |
++#define PM800_LDO1_VOUT (0x08) /* VOUT1 */ |
3860 |
++#define PM800_LDO1_VOUT_2 (0x09) |
3861 |
++#define PM800_LDO1_VOUT_3 (0x0A) |
3862 |
++#define PM800_LDO2_VOUT (0x0B) |
3863 |
++#define PM800_LDO3_VOUT (0x0C) |
3864 |
++#define PM800_LDO4_VOUT (0x0D) |
3865 |
++#define PM800_LDO5_VOUT (0x0E) |
3866 |
++#define PM800_LDO6_VOUT (0x0F) |
3867 |
++#define PM800_LDO7_VOUT (0x10) |
3868 |
++#define PM800_LDO8_VOUT (0x11) |
3869 |
++#define PM800_LDO9_VOUT (0x12) |
3870 |
++#define PM800_LDO10_VOUT (0x13) |
3871 |
++#define PM800_LDO11_VOUT (0x14) |
3872 |
++#define PM800_LDO12_VOUT (0x15) |
3873 |
++#define PM800_LDO13_VOUT (0x16) |
3874 |
++#define PM800_LDO14_VOUT (0x17) |
3875 |
++#define PM800_LDO15_VOUT (0x18) |
3876 |
++#define PM800_LDO16_VOUT (0x19) |
3877 |
++#define PM800_LDO17_VOUT (0x1A) |
3878 |
++#define PM800_LDO18_VOUT (0x1B) |
3879 |
++#define PM800_LDO19_VOUT (0x1C) |
3880 |
++ |
3881 |
++/* BUCK1 with DVC[0..3] */ |
3882 |
++#define PM800_BUCK1 (0x3C) |
3883 |
++#define PM800_BUCK1_1 (0x3D) |
3884 |
++#define PM800_BUCK1_2 (0x3E) |
3885 |
++#define PM800_BUCK1_3 (0x3F) |
3886 |
++#define PM800_BUCK2 (0x40) |
3887 |
++#define PM800_BUCK3 (0x41) |
3888 |
++#define PM800_BUCK4 (0x42) |
3889 |
++#define PM800_BUCK4_1 (0x43) |
3890 |
++#define PM800_BUCK4_2 (0x44) |
3891 |
++#define PM800_BUCK4_3 (0x45) |
3892 |
++#define PM800_BUCK5 (0x46) |
3893 |
++ |
3894 |
++#define PM800_BUCK_ENA (0x50) |
3895 |
++#define PM800_LDO_ENA1_1 (0x51) |
3896 |
++#define PM800_LDO_ENA1_2 (0x52) |
3897 |
++#define PM800_LDO_ENA1_3 (0x53) |
3898 |
++ |
3899 |
++#define PM800_LDO_ENA2_1 (0x56) |
3900 |
++#define PM800_LDO_ENA2_2 (0x57) |
3901 |
++#define PM800_LDO_ENA2_3 (0x58) |
3902 |
++ |
3903 |
++#define PM800_BUCK1_MISC1 (0x78) |
3904 |
++#define PM800_BUCK3_MISC1 (0x7E) |
3905 |
++#define PM800_BUCK4_MISC1 (0x81) |
3906 |
++#define PM800_BUCK5_MISC1 (0x84) |
3907 |
++ |
3908 |
++struct pm800_regulator_info { |
3909 |
++ struct regulator_desc desc; |
3910 |
++ int max_ua; |
3911 |
++}; |
3912 |
++ |
3913 |
++/* |
3914 |
++ * vreg - the buck regs string. |
3915 |
++ * ereg - the string for the enable register. |
3916 |
++ * ebit - the bit number in the enable register. |
3917 |
++ * amax - the current |
3918 |
++ * Buck has 2 kinds of voltage steps. It is easy to find voltage by ranges, |
3919 |
++ * not the constant voltage table. |
3920 |
++ * n_volt - Number of available selectors |
3921 |
++ */ |
3922 |
++#define PM800_BUCK(match, vreg, ereg, ebit, amax, volt_ranges, n_volt) \ |
3923 |
++{ \ |
3924 |
++ .desc = { \ |
3925 |
++ .name = #vreg, \ |
3926 |
++ .of_match = of_match_ptr(#match), \ |
3927 |
++ .regulators_node = of_match_ptr("regulators"), \ |
3928 |
++ .ops = &pm800_volt_range_ops, \ |
3929 |
++ .type = REGULATOR_VOLTAGE, \ |
3930 |
++ .id = PM800_ID_##vreg, \ |
3931 |
++ .owner = THIS_MODULE, \ |
3932 |
++ .n_voltages = n_volt, \ |
3933 |
++ .linear_ranges = volt_ranges, \ |
3934 |
++ .n_linear_ranges = ARRAY_SIZE(volt_ranges), \ |
3935 |
++ .vsel_reg = PM800_##vreg, \ |
3936 |
++ .vsel_mask = 0x7f, \ |
3937 |
++ .enable_reg = PM800_##ereg, \ |
3938 |
++ .enable_mask = 1 << (ebit), \ |
3939 |
++ }, \ |
3940 |
++ .max_ua = (amax), \ |
3941 |
++} |
3942 |
++ |
3943 |
++/* |
3944 |
++ * vreg - the LDO regs string |
3945 |
++ * ereg - the string for the enable register. |
3946 |
++ * ebit - the bit number in the enable register. |
3947 |
++ * amax - the current |
3948 |
++ * volt_table - the LDO voltage table |
3949 |
++ * For all the LDOes, there are too many ranges. Using volt_table will be |
3950 |
++ * simpler and faster. |
3951 |
++ */ |
3952 |
++#define PM800_LDO(match, vreg, ereg, ebit, amax, ldo_volt_table) \ |
3953 |
++{ \ |
3954 |
++ .desc = { \ |
3955 |
++ .name = #vreg, \ |
3956 |
++ .of_match = of_match_ptr(#match), \ |
3957 |
++ .regulators_node = of_match_ptr("regulators"), \ |
3958 |
++ .ops = &pm800_volt_table_ops, \ |
3959 |
++ .type = REGULATOR_VOLTAGE, \ |
3960 |
++ .id = PM800_ID_##vreg, \ |
3961 |
++ .owner = THIS_MODULE, \ |
3962 |
++ .n_voltages = ARRAY_SIZE(ldo_volt_table), \ |
3963 |
++ .vsel_reg = PM800_##vreg##_VOUT, \ |
3964 |
++ .vsel_mask = 0xf, \ |
3965 |
++ .enable_reg = PM800_##ereg, \ |
3966 |
++ .enable_mask = 1 << (ebit), \ |
3967 |
++ .volt_table = ldo_volt_table, \ |
3968 |
++ }, \ |
3969 |
++ .max_ua = (amax), \ |
3970 |
++} |
3971 |
++ |
3972 |
++/* Ranges are sorted in ascending order. */ |
3973 |
++static const struct regulator_linear_range buck1_volt_range[] = { |
3974 |
++ REGULATOR_LINEAR_RANGE(600000, 0, 0x4f, 12500), |
3975 |
++ REGULATOR_LINEAR_RANGE(1600000, 0x50, 0x54, 50000), |
3976 |
++}; |
3977 |
++ |
3978 |
++/* BUCK 2~5 have same ranges. */ |
3979 |
++static const struct regulator_linear_range buck2_5_volt_range[] = { |
3980 |
++ REGULATOR_LINEAR_RANGE(600000, 0, 0x4f, 12500), |
3981 |
++ REGULATOR_LINEAR_RANGE(1600000, 0x50, 0x72, 50000), |
3982 |
++}; |
3983 |
++ |
3984 |
++static const unsigned int ldo1_volt_table[] = { |
3985 |
++ 600000, 650000, 700000, 750000, 800000, 850000, 900000, 950000, |
3986 |
++ 1000000, 1050000, 1100000, 1150000, 1200000, 1300000, 1400000, 1500000, |
3987 |
++}; |
3988 |
++ |
3989 |
++static const unsigned int ldo2_volt_table[] = { |
3990 |
++ 1700000, 1800000, 1900000, 2000000, 2100000, 2500000, 2700000, 2800000, |
3991 |
++}; |
3992 |
++ |
3993 |
++/* LDO 3~17 have same voltage table. */ |
3994 |
++static const unsigned int ldo3_17_volt_table[] = { |
3995 |
++ 1200000, 1250000, 1700000, 1800000, 1850000, 1900000, 2500000, 2600000, |
3996 |
++ 2700000, 2750000, 2800000, 2850000, 2900000, 3000000, 3100000, 3300000, |
3997 |
++}; |
3998 |
++ |
3999 |
++/* LDO 18~19 have same voltage table. */ |
4000 |
++static const unsigned int ldo18_19_volt_table[] = { |
4001 |
++ 1700000, 1800000, 1900000, 2500000, 2800000, 2900000, 3100000, 3300000, |
4002 |
++}; |
4003 |
++ |
4004 |
++static int pm800_get_current_limit(struct regulator_dev *rdev) |
4005 |
++{ |
4006 |
++ struct pm800_regulator_info *info = rdev_get_drvdata(rdev); |
4007 |
++ |
4008 |
++ return info->max_ua; |
4009 |
++} |
4010 |
++ |
4011 |
++static const struct regulator_ops pm800_volt_range_ops = { |
4012 |
++ .list_voltage = regulator_list_voltage_linear_range, |
4013 |
++ .map_voltage = regulator_map_voltage_linear_range, |
4014 |
++ .set_voltage_sel = regulator_set_voltage_sel_regmap, |
4015 |
++ .get_voltage_sel = regulator_get_voltage_sel_regmap, |
4016 |
++ .enable = regulator_enable_regmap, |
4017 |
++ .disable = regulator_disable_regmap, |
4018 |
++ .is_enabled = regulator_is_enabled_regmap, |
4019 |
++ .get_current_limit = pm800_get_current_limit, |
4020 |
++}; |
4021 |
++ |
4022 |
++static const struct regulator_ops pm800_volt_table_ops = { |
4023 |
++ .list_voltage = regulator_list_voltage_table, |
4024 |
++ .map_voltage = regulator_map_voltage_iterate, |
4025 |
++ .set_voltage_sel = regulator_set_voltage_sel_regmap, |
4026 |
++ .get_voltage_sel = regulator_get_voltage_sel_regmap, |
4027 |
++ .enable = regulator_enable_regmap, |
4028 |
++ .disable = regulator_disable_regmap, |
4029 |
++ .is_enabled = regulator_is_enabled_regmap, |
4030 |
++ .get_current_limit = pm800_get_current_limit, |
4031 |
++}; |
4032 |
++ |
4033 |
++/* The array is indexed by id(PM800_ID_XXX) */ |
4034 |
++static struct pm800_regulator_info pm800_regulator_info[] = { |
4035 |
++ PM800_BUCK(buck1, BUCK1, BUCK_ENA, 0, 3000000, buck1_volt_range, 0x55), |
4036 |
++ PM800_BUCK(buck2, BUCK2, BUCK_ENA, 1, 1200000, buck2_5_volt_range, 0x73), |
4037 |
++ PM800_BUCK(buck3, BUCK3, BUCK_ENA, 2, 1200000, buck2_5_volt_range, 0x73), |
4038 |
++ PM800_BUCK(buck4, BUCK4, BUCK_ENA, 3, 1200000, buck2_5_volt_range, 0x73), |
4039 |
++ PM800_BUCK(buck5, BUCK5, BUCK_ENA, 4, 1200000, buck2_5_volt_range, 0x73), |
4040 |
++ |
4041 |
++ PM800_LDO(ldo1, LDO1, LDO_ENA1_1, 0, 200000, ldo1_volt_table), |
4042 |
++ PM800_LDO(ldo2, LDO2, LDO_ENA1_1, 1, 10000, ldo2_volt_table), |
4043 |
++ PM800_LDO(ldo3, LDO3, LDO_ENA1_1, 2, 300000, ldo3_17_volt_table), |
4044 |
++ PM800_LDO(ldo4, LDO4, LDO_ENA1_1, 3, 300000, ldo3_17_volt_table), |
4045 |
++ PM800_LDO(ldo5, LDO5, LDO_ENA1_1, 4, 300000, ldo3_17_volt_table), |
4046 |
++ PM800_LDO(ldo6, LDO6, LDO_ENA1_1, 5, 300000, ldo3_17_volt_table), |
4047 |
++ PM800_LDO(ldo7, LDO7, LDO_ENA1_1, 6, 300000, ldo3_17_volt_table), |
4048 |
++ PM800_LDO(ldo8, LDO8, LDO_ENA1_1, 7, 300000, ldo3_17_volt_table), |
4049 |
++ PM800_LDO(ldo9, LDO9, LDO_ENA1_2, 0, 300000, ldo3_17_volt_table), |
4050 |
++ PM800_LDO(ldo10, LDO10, LDO_ENA1_2, 1, 300000, ldo3_17_volt_table), |
4051 |
++ PM800_LDO(ldo11, LDO11, LDO_ENA1_2, 2, 300000, ldo3_17_volt_table), |
4052 |
++ PM800_LDO(ldo12, LDO12, LDO_ENA1_2, 3, 300000, ldo3_17_volt_table), |
4053 |
++ PM800_LDO(ldo13, LDO13, LDO_ENA1_2, 4, 300000, ldo3_17_volt_table), |
4054 |
++ PM800_LDO(ldo14, LDO14, LDO_ENA1_2, 5, 300000, ldo3_17_volt_table), |
4055 |
++ PM800_LDO(ldo15, LDO15, LDO_ENA1_2, 6, 300000, ldo3_17_volt_table), |
4056 |
++ PM800_LDO(ldo16, LDO16, LDO_ENA1_2, 7, 300000, ldo3_17_volt_table), |
4057 |
++ PM800_LDO(ldo17, LDO17, LDO_ENA1_3, 0, 300000, ldo3_17_volt_table), |
4058 |
++ PM800_LDO(ldo18, LDO18, LDO_ENA1_3, 1, 200000, ldo18_19_volt_table), |
4059 |
++ PM800_LDO(ldo19, LDO19, LDO_ENA1_3, 2, 200000, ldo18_19_volt_table), |
4060 |
++}; |
4061 |
++ |
4062 |
++static int pm800_regulator_probe(struct platform_device *pdev) |
4063 |
++{ |
4064 |
++ struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent); |
4065 |
++ struct pm80x_platform_data *pdata = dev_get_platdata(pdev->dev.parent); |
4066 |
++ struct regulator_config config = { }; |
4067 |
++ struct regulator_init_data *init_data; |
4068 |
++ int i, ret; |
4069 |
++ |
4070 |
++ if (pdata && pdata->num_regulators) { |
4071 |
++ unsigned int count = 0; |
4072 |
++ |
4073 |
++ /* Check whether num_regulator is valid. */ |
4074 |
++ for (i = 0; i < ARRAY_SIZE(pdata->regulators); i++) { |
4075 |
++ if (pdata->regulators[i]) |
4076 |
++ count++; |
4077 |
++ } |
4078 |
++ if (count != pdata->num_regulators) |
4079 |
++ return -EINVAL; |
4080 |
++ } |
4081 |
++ |
4082 |
++ config.dev = chip->dev; |
4083 |
++ config.regmap = chip->subchip->regmap_power; |
4084 |
++ for (i = 0; i < PM800_ID_RG_MAX; i++) { |
4085 |
++ struct regulator_dev *regulator; |
4086 |
++ |
4087 |
++ if (pdata && pdata->num_regulators) { |
4088 |
++ init_data = pdata->regulators[i]; |
4089 |
++ if (!init_data) |
4090 |
++ continue; |
4091 |
++ |
4092 |
++ config.init_data = init_data; |
4093 |
++ } |
4094 |
++ |
4095 |
++ config.driver_data = &pm800_regulator_info[i]; |
4096 |
++ |
4097 |
++ regulator = devm_regulator_register(&pdev->dev, |
4098 |
++ &pm800_regulator_info[i].desc, &config); |
4099 |
++ if (IS_ERR(regulator)) { |
4100 |
++ ret = PTR_ERR(regulator); |
4101 |
++ dev_err(&pdev->dev, "Failed to register %s\n", |
4102 |
++ pm800_regulator_info[i].desc.name); |
4103 |
++ return ret; |
4104 |
++ } |
4105 |
++ } |
4106 |
++ |
4107 |
++ return 0; |
4108 |
++} |
4109 |
++ |
4110 |
++static struct platform_driver pm800_regulator_driver = { |
4111 |
++ .driver = { |
4112 |
++ .name = "88pm80x-regulator", |
4113 |
++ }, |
4114 |
++ .probe = pm800_regulator_probe, |
4115 |
++}; |
4116 |
++ |
4117 |
++module_platform_driver(pm800_regulator_driver); |
4118 |
++ |
4119 |
++MODULE_LICENSE("GPL"); |
4120 |
++MODULE_AUTHOR("Joseph(Yossi) Hanin <yhanin@×××××××.com>"); |
4121 |
++MODULE_DESCRIPTION("Regulator Driver for Marvell 88PM800 PMIC"); |
4122 |
++MODULE_ALIAS("platform:88pm800-regulator"); |
4123 |
+diff --git a/drivers/regulator/88pm800.c b/drivers/regulator/88pm800.c |
4124 |
+deleted file mode 100644 |
4125 |
+index 69ae25886181..000000000000 |
4126 |
+--- a/drivers/regulator/88pm800.c |
4127 |
++++ /dev/null |
4128 |
+@@ -1,286 +0,0 @@ |
4129 |
+-// SPDX-License-Identifier: GPL-2.0-only |
4130 |
+-/* |
4131 |
+- * Regulators driver for Marvell 88PM800 |
4132 |
+- * |
4133 |
+- * Copyright (C) 2012 Marvell International Ltd. |
4134 |
+- * Joseph(Yossi) Hanin <yhanin@×××××××.com> |
4135 |
+- * Yi Zhang <yizhang@×××××××.com> |
4136 |
+- */ |
4137 |
+-#include <linux/module.h> |
4138 |
+-#include <linux/moduleparam.h> |
4139 |
+-#include <linux/init.h> |
4140 |
+-#include <linux/err.h> |
4141 |
+-#include <linux/regmap.h> |
4142 |
+-#include <linux/regulator/driver.h> |
4143 |
+-#include <linux/regulator/machine.h> |
4144 |
+-#include <linux/mfd/88pm80x.h> |
4145 |
+-#include <linux/delay.h> |
4146 |
+-#include <linux/io.h> |
4147 |
+-#include <linux/of.h> |
4148 |
+-#include <linux/regulator/of_regulator.h> |
4149 |
+- |
4150 |
+-/* LDO1 with DVC[0..3] */ |
4151 |
+-#define PM800_LDO1_VOUT (0x08) /* VOUT1 */ |
4152 |
+-#define PM800_LDO1_VOUT_2 (0x09) |
4153 |
+-#define PM800_LDO1_VOUT_3 (0x0A) |
4154 |
+-#define PM800_LDO2_VOUT (0x0B) |
4155 |
+-#define PM800_LDO3_VOUT (0x0C) |
4156 |
+-#define PM800_LDO4_VOUT (0x0D) |
4157 |
+-#define PM800_LDO5_VOUT (0x0E) |
4158 |
+-#define PM800_LDO6_VOUT (0x0F) |
4159 |
+-#define PM800_LDO7_VOUT (0x10) |
4160 |
+-#define PM800_LDO8_VOUT (0x11) |
4161 |
+-#define PM800_LDO9_VOUT (0x12) |
4162 |
+-#define PM800_LDO10_VOUT (0x13) |
4163 |
+-#define PM800_LDO11_VOUT (0x14) |
4164 |
+-#define PM800_LDO12_VOUT (0x15) |
4165 |
+-#define PM800_LDO13_VOUT (0x16) |
4166 |
+-#define PM800_LDO14_VOUT (0x17) |
4167 |
+-#define PM800_LDO15_VOUT (0x18) |
4168 |
+-#define PM800_LDO16_VOUT (0x19) |
4169 |
+-#define PM800_LDO17_VOUT (0x1A) |
4170 |
+-#define PM800_LDO18_VOUT (0x1B) |
4171 |
+-#define PM800_LDO19_VOUT (0x1C) |
4172 |
+- |
4173 |
+-/* BUCK1 with DVC[0..3] */ |
4174 |
+-#define PM800_BUCK1 (0x3C) |
4175 |
+-#define PM800_BUCK1_1 (0x3D) |
4176 |
+-#define PM800_BUCK1_2 (0x3E) |
4177 |
+-#define PM800_BUCK1_3 (0x3F) |
4178 |
+-#define PM800_BUCK2 (0x40) |
4179 |
+-#define PM800_BUCK3 (0x41) |
4180 |
+-#define PM800_BUCK4 (0x42) |
4181 |
+-#define PM800_BUCK4_1 (0x43) |
4182 |
+-#define PM800_BUCK4_2 (0x44) |
4183 |
+-#define PM800_BUCK4_3 (0x45) |
4184 |
+-#define PM800_BUCK5 (0x46) |
4185 |
+- |
4186 |
+-#define PM800_BUCK_ENA (0x50) |
4187 |
+-#define PM800_LDO_ENA1_1 (0x51) |
4188 |
+-#define PM800_LDO_ENA1_2 (0x52) |
4189 |
+-#define PM800_LDO_ENA1_3 (0x53) |
4190 |
+- |
4191 |
+-#define PM800_LDO_ENA2_1 (0x56) |
4192 |
+-#define PM800_LDO_ENA2_2 (0x57) |
4193 |
+-#define PM800_LDO_ENA2_3 (0x58) |
4194 |
+- |
4195 |
+-#define PM800_BUCK1_MISC1 (0x78) |
4196 |
+-#define PM800_BUCK3_MISC1 (0x7E) |
4197 |
+-#define PM800_BUCK4_MISC1 (0x81) |
4198 |
+-#define PM800_BUCK5_MISC1 (0x84) |
4199 |
+- |
4200 |
+-struct pm800_regulator_info { |
4201 |
+- struct regulator_desc desc; |
4202 |
+- int max_ua; |
4203 |
+-}; |
4204 |
+- |
4205 |
+-/* |
4206 |
+- * vreg - the buck regs string. |
4207 |
+- * ereg - the string for the enable register. |
4208 |
+- * ebit - the bit number in the enable register. |
4209 |
+- * amax - the current |
4210 |
+- * Buck has 2 kinds of voltage steps. It is easy to find voltage by ranges, |
4211 |
+- * not the constant voltage table. |
4212 |
+- * n_volt - Number of available selectors |
4213 |
+- */ |
4214 |
+-#define PM800_BUCK(match, vreg, ereg, ebit, amax, volt_ranges, n_volt) \ |
4215 |
+-{ \ |
4216 |
+- .desc = { \ |
4217 |
+- .name = #vreg, \ |
4218 |
+- .of_match = of_match_ptr(#match), \ |
4219 |
+- .regulators_node = of_match_ptr("regulators"), \ |
4220 |
+- .ops = &pm800_volt_range_ops, \ |
4221 |
+- .type = REGULATOR_VOLTAGE, \ |
4222 |
+- .id = PM800_ID_##vreg, \ |
4223 |
+- .owner = THIS_MODULE, \ |
4224 |
+- .n_voltages = n_volt, \ |
4225 |
+- .linear_ranges = volt_ranges, \ |
4226 |
+- .n_linear_ranges = ARRAY_SIZE(volt_ranges), \ |
4227 |
+- .vsel_reg = PM800_##vreg, \ |
4228 |
+- .vsel_mask = 0x7f, \ |
4229 |
+- .enable_reg = PM800_##ereg, \ |
4230 |
+- .enable_mask = 1 << (ebit), \ |
4231 |
+- }, \ |
4232 |
+- .max_ua = (amax), \ |
4233 |
+-} |
4234 |
+- |
4235 |
+-/* |
4236 |
+- * vreg - the LDO regs string |
4237 |
+- * ereg - the string for the enable register. |
4238 |
+- * ebit - the bit number in the enable register. |
4239 |
+- * amax - the current |
4240 |
+- * volt_table - the LDO voltage table |
4241 |
+- * For all the LDOes, there are too many ranges. Using volt_table will be |
4242 |
+- * simpler and faster. |
4243 |
+- */ |
4244 |
+-#define PM800_LDO(match, vreg, ereg, ebit, amax, ldo_volt_table) \ |
4245 |
+-{ \ |
4246 |
+- .desc = { \ |
4247 |
+- .name = #vreg, \ |
4248 |
+- .of_match = of_match_ptr(#match), \ |
4249 |
+- .regulators_node = of_match_ptr("regulators"), \ |
4250 |
+- .ops = &pm800_volt_table_ops, \ |
4251 |
+- .type = REGULATOR_VOLTAGE, \ |
4252 |
+- .id = PM800_ID_##vreg, \ |
4253 |
+- .owner = THIS_MODULE, \ |
4254 |
+- .n_voltages = ARRAY_SIZE(ldo_volt_table), \ |
4255 |
+- .vsel_reg = PM800_##vreg##_VOUT, \ |
4256 |
+- .vsel_mask = 0xf, \ |
4257 |
+- .enable_reg = PM800_##ereg, \ |
4258 |
+- .enable_mask = 1 << (ebit), \ |
4259 |
+- .volt_table = ldo_volt_table, \ |
4260 |
+- }, \ |
4261 |
+- .max_ua = (amax), \ |
4262 |
+-} |
4263 |
+- |
4264 |
+-/* Ranges are sorted in ascending order. */ |
4265 |
+-static const struct regulator_linear_range buck1_volt_range[] = { |
4266 |
+- REGULATOR_LINEAR_RANGE(600000, 0, 0x4f, 12500), |
4267 |
+- REGULATOR_LINEAR_RANGE(1600000, 0x50, 0x54, 50000), |
4268 |
+-}; |
4269 |
+- |
4270 |
+-/* BUCK 2~5 have same ranges. */ |
4271 |
+-static const struct regulator_linear_range buck2_5_volt_range[] = { |
4272 |
+- REGULATOR_LINEAR_RANGE(600000, 0, 0x4f, 12500), |
4273 |
+- REGULATOR_LINEAR_RANGE(1600000, 0x50, 0x72, 50000), |
4274 |
+-}; |
4275 |
+- |
4276 |
+-static const unsigned int ldo1_volt_table[] = { |
4277 |
+- 600000, 650000, 700000, 750000, 800000, 850000, 900000, 950000, |
4278 |
+- 1000000, 1050000, 1100000, 1150000, 1200000, 1300000, 1400000, 1500000, |
4279 |
+-}; |
4280 |
+- |
4281 |
+-static const unsigned int ldo2_volt_table[] = { |
4282 |
+- 1700000, 1800000, 1900000, 2000000, 2100000, 2500000, 2700000, 2800000, |
4283 |
+-}; |
4284 |
+- |
4285 |
+-/* LDO 3~17 have same voltage table. */ |
4286 |
+-static const unsigned int ldo3_17_volt_table[] = { |
4287 |
+- 1200000, 1250000, 1700000, 1800000, 1850000, 1900000, 2500000, 2600000, |
4288 |
+- 2700000, 2750000, 2800000, 2850000, 2900000, 3000000, 3100000, 3300000, |
4289 |
+-}; |
4290 |
+- |
4291 |
+-/* LDO 18~19 have same voltage table. */ |
4292 |
+-static const unsigned int ldo18_19_volt_table[] = { |
4293 |
+- 1700000, 1800000, 1900000, 2500000, 2800000, 2900000, 3100000, 3300000, |
4294 |
+-}; |
4295 |
+- |
4296 |
+-static int pm800_get_current_limit(struct regulator_dev *rdev) |
4297 |
+-{ |
4298 |
+- struct pm800_regulator_info *info = rdev_get_drvdata(rdev); |
4299 |
+- |
4300 |
+- return info->max_ua; |
4301 |
+-} |
4302 |
+- |
4303 |
+-static const struct regulator_ops pm800_volt_range_ops = { |
4304 |
+- .list_voltage = regulator_list_voltage_linear_range, |
4305 |
+- .map_voltage = regulator_map_voltage_linear_range, |
4306 |
+- .set_voltage_sel = regulator_set_voltage_sel_regmap, |
4307 |
+- .get_voltage_sel = regulator_get_voltage_sel_regmap, |
4308 |
+- .enable = regulator_enable_regmap, |
4309 |
+- .disable = regulator_disable_regmap, |
4310 |
+- .is_enabled = regulator_is_enabled_regmap, |
4311 |
+- .get_current_limit = pm800_get_current_limit, |
4312 |
+-}; |
4313 |
+- |
4314 |
+-static const struct regulator_ops pm800_volt_table_ops = { |
4315 |
+- .list_voltage = regulator_list_voltage_table, |
4316 |
+- .map_voltage = regulator_map_voltage_iterate, |
4317 |
+- .set_voltage_sel = regulator_set_voltage_sel_regmap, |
4318 |
+- .get_voltage_sel = regulator_get_voltage_sel_regmap, |
4319 |
+- .enable = regulator_enable_regmap, |
4320 |
+- .disable = regulator_disable_regmap, |
4321 |
+- .is_enabled = regulator_is_enabled_regmap, |
4322 |
+- .get_current_limit = pm800_get_current_limit, |
4323 |
+-}; |
4324 |
+- |
4325 |
+-/* The array is indexed by id(PM800_ID_XXX) */ |
4326 |
+-static struct pm800_regulator_info pm800_regulator_info[] = { |
4327 |
+- PM800_BUCK(buck1, BUCK1, BUCK_ENA, 0, 3000000, buck1_volt_range, 0x55), |
4328 |
+- PM800_BUCK(buck2, BUCK2, BUCK_ENA, 1, 1200000, buck2_5_volt_range, 0x73), |
4329 |
+- PM800_BUCK(buck3, BUCK3, BUCK_ENA, 2, 1200000, buck2_5_volt_range, 0x73), |
4330 |
+- PM800_BUCK(buck4, BUCK4, BUCK_ENA, 3, 1200000, buck2_5_volt_range, 0x73), |
4331 |
+- PM800_BUCK(buck5, BUCK5, BUCK_ENA, 4, 1200000, buck2_5_volt_range, 0x73), |
4332 |
+- |
4333 |
+- PM800_LDO(ldo1, LDO1, LDO_ENA1_1, 0, 200000, ldo1_volt_table), |
4334 |
+- PM800_LDO(ldo2, LDO2, LDO_ENA1_1, 1, 10000, ldo2_volt_table), |
4335 |
+- PM800_LDO(ldo3, LDO3, LDO_ENA1_1, 2, 300000, ldo3_17_volt_table), |
4336 |
+- PM800_LDO(ldo4, LDO4, LDO_ENA1_1, 3, 300000, ldo3_17_volt_table), |
4337 |
+- PM800_LDO(ldo5, LDO5, LDO_ENA1_1, 4, 300000, ldo3_17_volt_table), |
4338 |
+- PM800_LDO(ldo6, LDO6, LDO_ENA1_1, 5, 300000, ldo3_17_volt_table), |
4339 |
+- PM800_LDO(ldo7, LDO7, LDO_ENA1_1, 6, 300000, ldo3_17_volt_table), |
4340 |
+- PM800_LDO(ldo8, LDO8, LDO_ENA1_1, 7, 300000, ldo3_17_volt_table), |
4341 |
+- PM800_LDO(ldo9, LDO9, LDO_ENA1_2, 0, 300000, ldo3_17_volt_table), |
4342 |
+- PM800_LDO(ldo10, LDO10, LDO_ENA1_2, 1, 300000, ldo3_17_volt_table), |
4343 |
+- PM800_LDO(ldo11, LDO11, LDO_ENA1_2, 2, 300000, ldo3_17_volt_table), |
4344 |
+- PM800_LDO(ldo12, LDO12, LDO_ENA1_2, 3, 300000, ldo3_17_volt_table), |
4345 |
+- PM800_LDO(ldo13, LDO13, LDO_ENA1_2, 4, 300000, ldo3_17_volt_table), |
4346 |
+- PM800_LDO(ldo14, LDO14, LDO_ENA1_2, 5, 300000, ldo3_17_volt_table), |
4347 |
+- PM800_LDO(ldo15, LDO15, LDO_ENA1_2, 6, 300000, ldo3_17_volt_table), |
4348 |
+- PM800_LDO(ldo16, LDO16, LDO_ENA1_2, 7, 300000, ldo3_17_volt_table), |
4349 |
+- PM800_LDO(ldo17, LDO17, LDO_ENA1_3, 0, 300000, ldo3_17_volt_table), |
4350 |
+- PM800_LDO(ldo18, LDO18, LDO_ENA1_3, 1, 200000, ldo18_19_volt_table), |
4351 |
+- PM800_LDO(ldo19, LDO19, LDO_ENA1_3, 2, 200000, ldo18_19_volt_table), |
4352 |
+-}; |
4353 |
+- |
4354 |
+-static int pm800_regulator_probe(struct platform_device *pdev) |
4355 |
+-{ |
4356 |
+- struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent); |
4357 |
+- struct pm80x_platform_data *pdata = dev_get_platdata(pdev->dev.parent); |
4358 |
+- struct regulator_config config = { }; |
4359 |
+- struct regulator_init_data *init_data; |
4360 |
+- int i, ret; |
4361 |
+- |
4362 |
+- if (pdata && pdata->num_regulators) { |
4363 |
+- unsigned int count = 0; |
4364 |
+- |
4365 |
+- /* Check whether num_regulator is valid. */ |
4366 |
+- for (i = 0; i < ARRAY_SIZE(pdata->regulators); i++) { |
4367 |
+- if (pdata->regulators[i]) |
4368 |
+- count++; |
4369 |
+- } |
4370 |
+- if (count != pdata->num_regulators) |
4371 |
+- return -EINVAL; |
4372 |
+- } |
4373 |
+- |
4374 |
+- config.dev = chip->dev; |
4375 |
+- config.regmap = chip->subchip->regmap_power; |
4376 |
+- for (i = 0; i < PM800_ID_RG_MAX; i++) { |
4377 |
+- struct regulator_dev *regulator; |
4378 |
+- |
4379 |
+- if (pdata && pdata->num_regulators) { |
4380 |
+- init_data = pdata->regulators[i]; |
4381 |
+- if (!init_data) |
4382 |
+- continue; |
4383 |
+- |
4384 |
+- config.init_data = init_data; |
4385 |
+- } |
4386 |
+- |
4387 |
+- config.driver_data = &pm800_regulator_info[i]; |
4388 |
+- |
4389 |
+- regulator = devm_regulator_register(&pdev->dev, |
4390 |
+- &pm800_regulator_info[i].desc, &config); |
4391 |
+- if (IS_ERR(regulator)) { |
4392 |
+- ret = PTR_ERR(regulator); |
4393 |
+- dev_err(&pdev->dev, "Failed to register %s\n", |
4394 |
+- pm800_regulator_info[i].desc.name); |
4395 |
+- return ret; |
4396 |
+- } |
4397 |
+- } |
4398 |
+- |
4399 |
+- return 0; |
4400 |
+-} |
4401 |
+- |
4402 |
+-static struct platform_driver pm800_regulator_driver = { |
4403 |
+- .driver = { |
4404 |
+- .name = "88pm80x-regulator", |
4405 |
+- }, |
4406 |
+- .probe = pm800_regulator_probe, |
4407 |
+-}; |
4408 |
+- |
4409 |
+-module_platform_driver(pm800_regulator_driver); |
4410 |
+- |
4411 |
+-MODULE_LICENSE("GPL"); |
4412 |
+-MODULE_AUTHOR("Joseph(Yossi) Hanin <yhanin@×××××××.com>"); |
4413 |
+-MODULE_DESCRIPTION("Regulator Driver for Marvell 88PM800 PMIC"); |
4414 |
+-MODULE_ALIAS("platform:88pm800-regulator"); |
4415 |
+diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile |
4416 |
+index 93f53840e8f1..486edf784c13 100644 |
4417 |
+--- a/drivers/regulator/Makefile |
4418 |
++++ b/drivers/regulator/Makefile |
4419 |
+@@ -11,7 +11,7 @@ obj-$(CONFIG_REGULATOR_VIRTUAL_CONSUMER) += virtual.o |
4420 |
+ obj-$(CONFIG_REGULATOR_USERSPACE_CONSUMER) += userspace-consumer.o |
4421 |
+ |
4422 |
+ obj-$(CONFIG_REGULATOR_88PG86X) += 88pg86x.o |
4423 |
+-obj-$(CONFIG_REGULATOR_88PM800) += 88pm800.o |
4424 |
++obj-$(CONFIG_REGULATOR_88PM800) += 88pm800-regulator.o |
4425 |
+ obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o |
4426 |
+ obj-$(CONFIG_REGULATOR_CPCAP) += cpcap-regulator.o |
4427 |
+ obj-$(CONFIG_REGULATOR_AAT2870) += aat2870-regulator.o |
4428 |
+diff --git a/drivers/staging/kpc2000/TODO b/drivers/staging/kpc2000/TODO |
4429 |
+index 8c7af29fefae..ed951acc829a 100644 |
4430 |
+--- a/drivers/staging/kpc2000/TODO |
4431 |
++++ b/drivers/staging/kpc2000/TODO |
4432 |
+@@ -1,7 +1,6 @@ |
4433 |
+ - the kpc_spi driver doesn't seem to let multiple transactions (to different instances of the core) happen in parallel... |
4434 |
+ - The kpc_i2c driver is a hot mess, it should probably be cleaned up a ton. It functions against current hardware though. |
4435 |
+ - pcard->card_num in kp2000_pcie_probe() is a global variable and needs atomic / locking / something better. |
4436 |
+-- probe_core_uio() probably needs error handling |
4437 |
+ - the loop in kp2000_probe_cores() that uses probe_core_uio() also probably needs error handling |
4438 |
+ - would be nice if the AIO fileops in kpc_dma could be made to work |
4439 |
+ - probably want to add a CONFIG_ option to control compilation of the AIO functions |
4440 |
+diff --git a/drivers/staging/kpc2000/kpc2000/cell_probe.c b/drivers/staging/kpc2000/kpc2000/cell_probe.c |
4441 |
+index e0dba91e7fa8..d6b57f550876 100644 |
4442 |
+--- a/drivers/staging/kpc2000/kpc2000/cell_probe.c |
4443 |
++++ b/drivers/staging/kpc2000/kpc2000/cell_probe.c |
4444 |
+@@ -295,6 +295,7 @@ int probe_core_uio(unsigned int core_num, struct kp2000_device *pcard, char *na |
4445 |
+ kudev->dev = device_create(kpc_uio_class, &pcard->pdev->dev, MKDEV(0,0), kudev, "%s.%d.%d.%d", kudev->uioinfo.name, pcard->card_num, cte.type, kudev->core_num); |
4446 |
+ if (IS_ERR(kudev->dev)) { |
4447 |
+ dev_err(&pcard->pdev->dev, "probe_core_uio device_create failed!\n"); |
4448 |
++ kfree(kudev); |
4449 |
+ return -ENODEV; |
4450 |
+ } |
4451 |
+ dev_set_drvdata(kudev->dev, kudev); |
4452 |
+@@ -302,6 +303,8 @@ int probe_core_uio(unsigned int core_num, struct kp2000_device *pcard, char *na |
4453 |
+ rv = uio_register_device(kudev->dev, &kudev->uioinfo); |
4454 |
+ if (rv){ |
4455 |
+ dev_err(&pcard->pdev->dev, "probe_core_uio failed uio_register_device: %d\n", rv); |
4456 |
++ put_device(kudev->dev); |
4457 |
++ kfree(kudev); |
4458 |
+ return rv; |
4459 |
+ } |
4460 |
+ |
4461 |
+diff --git a/drivers/staging/kpc2000/kpc_spi/spi_driver.c b/drivers/staging/kpc2000/kpc_spi/spi_driver.c |
4462 |
+index 86df16547a92..2f535022dc03 100644 |
4463 |
+--- a/drivers/staging/kpc2000/kpc_spi/spi_driver.c |
4464 |
++++ b/drivers/staging/kpc2000/kpc_spi/spi_driver.c |
4465 |
+@@ -333,7 +333,7 @@ kp_spi_transfer_one_message(struct spi_master *master, struct spi_message *m) |
4466 |
+ list_for_each_entry(transfer, &m->transfers, transfer_list) { |
4467 |
+ if (transfer->tx_buf == NULL && transfer->rx_buf == NULL && transfer->len) { |
4468 |
+ status = -EINVAL; |
4469 |
+- break; |
4470 |
++ goto error; |
4471 |
+ } |
4472 |
+ |
4473 |
+ /* transfer */ |
4474 |
+@@ -371,7 +371,7 @@ kp_spi_transfer_one_message(struct spi_master *master, struct spi_message *m) |
4475 |
+ |
4476 |
+ if (count != transfer->len) { |
4477 |
+ status = -EIO; |
4478 |
+- break; |
4479 |
++ goto error; |
4480 |
+ } |
4481 |
+ } |
4482 |
+ |
4483 |
+@@ -389,6 +389,10 @@ kp_spi_transfer_one_message(struct spi_master *master, struct spi_message *m) |
4484 |
+ /* done work */ |
4485 |
+ spi_finalize_current_message(master); |
4486 |
+ return 0; |
4487 |
++ |
4488 |
++ error: |
4489 |
++ m->status = status; |
4490 |
++ return status; |
4491 |
+ } |
4492 |
+ |
4493 |
+ static void |
4494 |
+diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c |
4495 |
+index ccafcc2c87ac..70433f756d8e 100644 |
4496 |
+--- a/drivers/staging/vt6656/main_usb.c |
4497 |
++++ b/drivers/staging/vt6656/main_usb.c |
4498 |
+@@ -402,16 +402,19 @@ static void vnt_free_int_bufs(struct vnt_private *priv) |
4499 |
+ kfree(priv->int_buf.data_buf); |
4500 |
+ } |
4501 |
+ |
4502 |
+-static bool vnt_alloc_bufs(struct vnt_private *priv) |
4503 |
++static int vnt_alloc_bufs(struct vnt_private *priv) |
4504 |
+ { |
4505 |
++ int ret = 0; |
4506 |
+ struct vnt_usb_send_context *tx_context; |
4507 |
+ struct vnt_rcb *rcb; |
4508 |
+ int ii; |
4509 |
+ |
4510 |
+ for (ii = 0; ii < priv->num_tx_context; ii++) { |
4511 |
+ tx_context = kmalloc(sizeof(*tx_context), GFP_KERNEL); |
4512 |
+- if (!tx_context) |
4513 |
++ if (!tx_context) { |
4514 |
++ ret = -ENOMEM; |
4515 |
+ goto free_tx; |
4516 |
++ } |
4517 |
+ |
4518 |
+ priv->tx_context[ii] = tx_context; |
4519 |
+ tx_context->priv = priv; |
4520 |
+@@ -419,16 +422,20 @@ static bool vnt_alloc_bufs(struct vnt_private *priv) |
4521 |
+ |
4522 |
+ /* allocate URBs */ |
4523 |
+ tx_context->urb = usb_alloc_urb(0, GFP_KERNEL); |
4524 |
+- if (!tx_context->urb) |
4525 |
++ if (!tx_context->urb) { |
4526 |
++ ret = -ENOMEM; |
4527 |
+ goto free_tx; |
4528 |
++ } |
4529 |
+ |
4530 |
+ tx_context->in_use = false; |
4531 |
+ } |
4532 |
+ |
4533 |
+ for (ii = 0; ii < priv->num_rcb; ii++) { |
4534 |
+ priv->rcb[ii] = kzalloc(sizeof(*priv->rcb[ii]), GFP_KERNEL); |
4535 |
+- if (!priv->rcb[ii]) |
4536 |
++ if (!priv->rcb[ii]) { |
4537 |
++ ret = -ENOMEM; |
4538 |
+ goto free_rx_tx; |
4539 |
++ } |
4540 |
+ |
4541 |
+ rcb = priv->rcb[ii]; |
4542 |
+ |
4543 |
+@@ -436,39 +443,46 @@ static bool vnt_alloc_bufs(struct vnt_private *priv) |
4544 |
+ |
4545 |
+ /* allocate URBs */ |
4546 |
+ rcb->urb = usb_alloc_urb(0, GFP_KERNEL); |
4547 |
+- if (!rcb->urb) |
4548 |
++ if (!rcb->urb) { |
4549 |
++ ret = -ENOMEM; |
4550 |
+ goto free_rx_tx; |
4551 |
++ } |
4552 |
+ |
4553 |
+ rcb->skb = dev_alloc_skb(priv->rx_buf_sz); |
4554 |
+- if (!rcb->skb) |
4555 |
++ if (!rcb->skb) { |
4556 |
++ ret = -ENOMEM; |
4557 |
+ goto free_rx_tx; |
4558 |
++ } |
4559 |
+ |
4560 |
+ rcb->in_use = false; |
4561 |
+ |
4562 |
+ /* submit rx urb */ |
4563 |
+- if (vnt_submit_rx_urb(priv, rcb)) |
4564 |
++ ret = vnt_submit_rx_urb(priv, rcb); |
4565 |
++ if (ret) |
4566 |
+ goto free_rx_tx; |
4567 |
+ } |
4568 |
+ |
4569 |
+ priv->interrupt_urb = usb_alloc_urb(0, GFP_KERNEL); |
4570 |
+- if (!priv->interrupt_urb) |
4571 |
++ if (!priv->interrupt_urb) { |
4572 |
++ ret = -ENOMEM; |
4573 |
+ goto free_rx_tx; |
4574 |
++ } |
4575 |
+ |
4576 |
+ priv->int_buf.data_buf = kmalloc(MAX_INTERRUPT_SIZE, GFP_KERNEL); |
4577 |
+ if (!priv->int_buf.data_buf) { |
4578 |
+- usb_free_urb(priv->interrupt_urb); |
4579 |
+- goto free_rx_tx; |
4580 |
++ ret = -ENOMEM; |
4581 |
++ goto free_rx_tx_urb; |
4582 |
+ } |
4583 |
+ |
4584 |
+- return true; |
4585 |
++ return 0; |
4586 |
+ |
4587 |
++free_rx_tx_urb: |
4588 |
++ usb_free_urb(priv->interrupt_urb); |
4589 |
+ free_rx_tx: |
4590 |
+ vnt_free_rx_bufs(priv); |
4591 |
+- |
4592 |
+ free_tx: |
4593 |
+ vnt_free_tx_bufs(priv); |
4594 |
+- |
4595 |
+- return false; |
4596 |
++ return ret; |
4597 |
+ } |
4598 |
+ |
4599 |
+ static void vnt_tx_80211(struct ieee80211_hw *hw, |
4600 |
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c |
4601 |
+index 682300713be4..eb2e2d141c01 100644 |
4602 |
+--- a/drivers/tty/serial/8250/8250_port.c |
4603 |
++++ b/drivers/tty/serial/8250/8250_port.c |
4604 |
+@@ -1874,7 +1874,8 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir) |
4605 |
+ status = serial8250_rx_chars(up, status); |
4606 |
+ } |
4607 |
+ serial8250_modem_status(up); |
4608 |
+- if ((!up->dma || up->dma->tx_err) && (status & UART_LSR_THRE)) |
4609 |
++ if ((!up->dma || up->dma->tx_err) && (status & UART_LSR_THRE) && |
4610 |
++ (up->ier & UART_IER_THRI)) |
4611 |
+ serial8250_tx_chars(up); |
4612 |
+ |
4613 |
+ uart_unlock_and_check_sysrq(port, flags); |
4614 |
+diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c |
4615 |
+index b929c7ae3a27..7bab9a3eda92 100644 |
4616 |
+--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c |
4617 |
++++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c |
4618 |
+@@ -407,7 +407,16 @@ static int cpm_uart_startup(struct uart_port *port) |
4619 |
+ clrbits16(&pinfo->sccp->scc_sccm, UART_SCCM_RX); |
4620 |
+ } |
4621 |
+ cpm_uart_initbd(pinfo); |
4622 |
+- cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX); |
4623 |
++ if (IS_SMC(pinfo)) { |
4624 |
++ out_be32(&pinfo->smcup->smc_rstate, 0); |
4625 |
++ out_be32(&pinfo->smcup->smc_tstate, 0); |
4626 |
++ out_be16(&pinfo->smcup->smc_rbptr, |
4627 |
++ in_be16(&pinfo->smcup->smc_rbase)); |
4628 |
++ out_be16(&pinfo->smcup->smc_tbptr, |
4629 |
++ in_be16(&pinfo->smcup->smc_tbase)); |
4630 |
++ } else { |
4631 |
++ cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX); |
4632 |
++ } |
4633 |
+ } |
4634 |
+ /* Install interrupt handler. */ |
4635 |
+ retval = request_irq(port->irq, cpm_uart_int, 0, "cpm_uart", port); |
4636 |
+@@ -861,16 +870,14 @@ static void cpm_uart_init_smc(struct uart_cpm_port *pinfo) |
4637 |
+ (u8 __iomem *)pinfo->tx_bd_base - DPRAM_BASE); |
4638 |
+ |
4639 |
+ /* |
4640 |
+- * In case SMC1 is being relocated... |
4641 |
++ * In case SMC is being relocated... |
4642 |
+ */ |
4643 |
+-#if defined (CONFIG_I2C_SPI_SMC1_UCODE_PATCH) |
4644 |
+ out_be16(&up->smc_rbptr, in_be16(&pinfo->smcup->smc_rbase)); |
4645 |
+ out_be16(&up->smc_tbptr, in_be16(&pinfo->smcup->smc_tbase)); |
4646 |
+ out_be32(&up->smc_rstate, 0); |
4647 |
+ out_be32(&up->smc_tstate, 0); |
4648 |
+ out_be16(&up->smc_brkcr, 1); /* number of break chars */ |
4649 |
+ out_be16(&up->smc_brkec, 0); |
4650 |
+-#endif |
4651 |
+ |
4652 |
+ /* Set up the uart parameters in the |
4653 |
+ * parameter ram. |
4654 |
+@@ -884,8 +891,6 @@ static void cpm_uart_init_smc(struct uart_cpm_port *pinfo) |
4655 |
+ out_be16(&up->smc_brkec, 0); |
4656 |
+ out_be16(&up->smc_brkcr, 1); |
4657 |
+ |
4658 |
+- cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX); |
4659 |
+- |
4660 |
+ /* Set UART mode, 8 bit, no parity, one stop. |
4661 |
+ * Enable receive and transmit. |
4662 |
+ */ |
4663 |
+diff --git a/drivers/tty/serial/digicolor-usart.c b/drivers/tty/serial/digicolor-usart.c |
4664 |
+index f460cca139e2..13ac36e2da4f 100644 |
4665 |
+--- a/drivers/tty/serial/digicolor-usart.c |
4666 |
++++ b/drivers/tty/serial/digicolor-usart.c |
4667 |
+@@ -541,7 +541,11 @@ static int __init digicolor_uart_init(void) |
4668 |
+ if (ret) |
4669 |
+ return ret; |
4670 |
+ |
4671 |
+- return platform_driver_register(&digicolor_uart_platform); |
4672 |
++ ret = platform_driver_register(&digicolor_uart_platform); |
4673 |
++ if (ret) |
4674 |
++ uart_unregister_driver(&digicolor_uart); |
4675 |
++ |
4676 |
++ return ret; |
4677 |
+ } |
4678 |
+ module_init(digicolor_uart_init); |
4679 |
+ |
4680 |
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c |
4681 |
+index 8b752e895053..10db3e54ac9e 100644 |
4682 |
+--- a/drivers/tty/serial/imx.c |
4683 |
++++ b/drivers/tty/serial/imx.c |
4684 |
+@@ -383,6 +383,7 @@ static void imx_uart_ucrs_restore(struct imx_port *sport, |
4685 |
+ } |
4686 |
+ #endif |
4687 |
+ |
4688 |
++/* called with port.lock taken and irqs caller dependent */ |
4689 |
+ static void imx_uart_rts_active(struct imx_port *sport, u32 *ucr2) |
4690 |
+ { |
4691 |
+ *ucr2 &= ~(UCR2_CTSC | UCR2_CTS); |
4692 |
+@@ -391,6 +392,7 @@ static void imx_uart_rts_active(struct imx_port *sport, u32 *ucr2) |
4693 |
+ mctrl_gpio_set(sport->gpios, sport->port.mctrl); |
4694 |
+ } |
4695 |
+ |
4696 |
++/* called with port.lock taken and irqs caller dependent */ |
4697 |
+ static void imx_uart_rts_inactive(struct imx_port *sport, u32 *ucr2) |
4698 |
+ { |
4699 |
+ *ucr2 &= ~UCR2_CTSC; |
4700 |
+@@ -400,6 +402,7 @@ static void imx_uart_rts_inactive(struct imx_port *sport, u32 *ucr2) |
4701 |
+ mctrl_gpio_set(sport->gpios, sport->port.mctrl); |
4702 |
+ } |
4703 |
+ |
4704 |
++/* called with port.lock taken and irqs caller dependent */ |
4705 |
+ static void imx_uart_rts_auto(struct imx_port *sport, u32 *ucr2) |
4706 |
+ { |
4707 |
+ *ucr2 |= UCR2_CTSC; |
4708 |
+@@ -1549,6 +1552,16 @@ imx_uart_set_termios(struct uart_port *port, struct ktermios *termios, |
4709 |
+ old_csize = CS8; |
4710 |
+ } |
4711 |
+ |
4712 |
++ del_timer_sync(&sport->timer); |
4713 |
++ |
4714 |
++ /* |
4715 |
++ * Ask the core to calculate the divisor for us. |
4716 |
++ */ |
4717 |
++ baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16); |
4718 |
++ quot = uart_get_divisor(port, baud); |
4719 |
++ |
4720 |
++ spin_lock_irqsave(&sport->port.lock, flags); |
4721 |
++ |
4722 |
+ if ((termios->c_cflag & CSIZE) == CS8) |
4723 |
+ ucr2 = UCR2_WS | UCR2_SRST | UCR2_IRTS; |
4724 |
+ else |
4725 |
+@@ -1592,16 +1605,6 @@ imx_uart_set_termios(struct uart_port *port, struct ktermios *termios, |
4726 |
+ ucr2 |= UCR2_PROE; |
4727 |
+ } |
4728 |
+ |
4729 |
+- del_timer_sync(&sport->timer); |
4730 |
+- |
4731 |
+- /* |
4732 |
+- * Ask the core to calculate the divisor for us. |
4733 |
+- */ |
4734 |
+- baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16); |
4735 |
+- quot = uart_get_divisor(port, baud); |
4736 |
+- |
4737 |
+- spin_lock_irqsave(&sport->port.lock, flags); |
4738 |
+- |
4739 |
+ sport->port.read_status_mask = 0; |
4740 |
+ if (termios->c_iflag & INPCK) |
4741 |
+ sport->port.read_status_mask |= (URXD_FRMERR | URXD_PRERR); |
4742 |
+diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c |
4743 |
+index e5aebbf5f302..c3afd128b8fc 100644 |
4744 |
+--- a/drivers/tty/serial/max310x.c |
4745 |
++++ b/drivers/tty/serial/max310x.c |
4746 |
+@@ -496,37 +496,48 @@ static bool max310x_reg_precious(struct device *dev, unsigned int reg) |
4747 |
+ |
4748 |
+ static int max310x_set_baud(struct uart_port *port, int baud) |
4749 |
+ { |
4750 |
+- unsigned int mode = 0, clk = port->uartclk, div = clk / baud; |
4751 |
++ unsigned int mode = 0, div = 0, frac = 0, c = 0, F = 0; |
4752 |
+ |
4753 |
+- /* Check for minimal value for divider */ |
4754 |
+- if (div < 16) |
4755 |
+- div = 16; |
4756 |
+- |
4757 |
+- if (clk % baud && (div / 16) < 0x8000) { |
4758 |
++ /* |
4759 |
++ * Calculate the integer divisor first. Select a proper mode |
4760 |
++ * in case if the requested baud is too high for the pre-defined |
4761 |
++ * clocks frequency. |
4762 |
++ */ |
4763 |
++ div = port->uartclk / baud; |
4764 |
++ if (div < 8) { |
4765 |
++ /* Mode x4 */ |
4766 |
++ c = 4; |
4767 |
++ mode = MAX310X_BRGCFG_4XMODE_BIT; |
4768 |
++ } else if (div < 16) { |
4769 |
+ /* Mode x2 */ |
4770 |
++ c = 8; |
4771 |
+ mode = MAX310X_BRGCFG_2XMODE_BIT; |
4772 |
+- clk = port->uartclk * 2; |
4773 |
+- div = clk / baud; |
4774 |
+- |
4775 |
+- if (clk % baud && (div / 16) < 0x8000) { |
4776 |
+- /* Mode x4 */ |
4777 |
+- mode = MAX310X_BRGCFG_4XMODE_BIT; |
4778 |
+- clk = port->uartclk * 4; |
4779 |
+- div = clk / baud; |
4780 |
+- } |
4781 |
++ } else { |
4782 |
++ c = 16; |
4783 |
+ } |
4784 |
+ |
4785 |
+- max310x_port_write(port, MAX310X_BRGDIVMSB_REG, (div / 16) >> 8); |
4786 |
+- max310x_port_write(port, MAX310X_BRGDIVLSB_REG, div / 16); |
4787 |
+- max310x_port_write(port, MAX310X_BRGCFG_REG, (div % 16) | mode); |
4788 |
++ /* Calculate the divisor in accordance with the fraction coefficient */ |
4789 |
++ div /= c; |
4790 |
++ F = c*baud; |
4791 |
++ |
4792 |
++ /* Calculate the baud rate fraction */ |
4793 |
++ if (div > 0) |
4794 |
++ frac = (16*(port->uartclk % F)) / F; |
4795 |
++ else |
4796 |
++ div = 1; |
4797 |
++ |
4798 |
++ max310x_port_write(port, MAX310X_BRGDIVMSB_REG, div >> 8); |
4799 |
++ max310x_port_write(port, MAX310X_BRGDIVLSB_REG, div); |
4800 |
++ max310x_port_write(port, MAX310X_BRGCFG_REG, frac | mode); |
4801 |
+ |
4802 |
+- return DIV_ROUND_CLOSEST(clk, div); |
4803 |
++ /* Return the actual baud rate we just programmed */ |
4804 |
++ return (16*port->uartclk) / (c*(16*div + frac)); |
4805 |
+ } |
4806 |
+ |
4807 |
+ static int max310x_update_best_err(unsigned long f, long *besterr) |
4808 |
+ { |
4809 |
+ /* Use baudrate 115200 for calculate error */ |
4810 |
+- long err = f % (115200 * 16); |
4811 |
++ long err = f % (460800 * 16); |
4812 |
+ |
4813 |
+ if ((*besterr < 0) || (*besterr > err)) { |
4814 |
+ *besterr = err; |
4815 |
+diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c |
4816 |
+index 23833ad952ba..3657a24913fc 100644 |
4817 |
+--- a/drivers/tty/serial/msm_serial.c |
4818 |
++++ b/drivers/tty/serial/msm_serial.c |
4819 |
+@@ -383,10 +383,14 @@ no_rx: |
4820 |
+ |
4821 |
+ static inline void msm_wait_for_xmitr(struct uart_port *port) |
4822 |
+ { |
4823 |
++ unsigned int timeout = 500000; |
4824 |
++ |
4825 |
+ while (!(msm_read(port, UART_SR) & UART_SR_TX_EMPTY)) { |
4826 |
+ if (msm_read(port, UART_ISR) & UART_ISR_TX_READY) |
4827 |
+ break; |
4828 |
+ udelay(1); |
4829 |
++ if (!timeout--) |
4830 |
++ break; |
4831 |
+ } |
4832 |
+ msm_write(port, UART_CR_CMD_RESET_TX_READY, UART_CR); |
4833 |
+ } |
4834 |
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c |
4835 |
+index 83f4dd0bfd74..4223cb496764 100644 |
4836 |
+--- a/drivers/tty/serial/serial_core.c |
4837 |
++++ b/drivers/tty/serial/serial_core.c |
4838 |
+@@ -1777,6 +1777,7 @@ static int uart_port_activate(struct tty_port *port, struct tty_struct *tty) |
4839 |
+ { |
4840 |
+ struct uart_state *state = container_of(port, struct uart_state, port); |
4841 |
+ struct uart_port *uport; |
4842 |
++ int ret; |
4843 |
+ |
4844 |
+ uport = uart_port_check(state); |
4845 |
+ if (!uport || uport->flags & UPF_DEAD) |
4846 |
+@@ -1787,7 +1788,11 @@ static int uart_port_activate(struct tty_port *port, struct tty_struct *tty) |
4847 |
+ /* |
4848 |
+ * Start up the serial port. |
4849 |
+ */ |
4850 |
+- return uart_startup(tty, state, 0); |
4851 |
++ ret = uart_startup(tty, state, 0); |
4852 |
++ if (ret > 0) |
4853 |
++ tty_port_set_active(port, 1); |
4854 |
++ |
4855 |
++ return ret; |
4856 |
+ } |
4857 |
+ |
4858 |
+ static const char *uart_type(struct uart_port *port) |
4859 |
+diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c |
4860 |
+index 39ed56214cd3..2b400189be91 100644 |
4861 |
+--- a/drivers/tty/serial/serial_mctrl_gpio.c |
4862 |
++++ b/drivers/tty/serial/serial_mctrl_gpio.c |
4863 |
+@@ -12,6 +12,7 @@ |
4864 |
+ #include <linux/termios.h> |
4865 |
+ #include <linux/serial_core.h> |
4866 |
+ #include <linux/module.h> |
4867 |
++#include <linux/property.h> |
4868 |
+ |
4869 |
+ #include "serial_mctrl_gpio.h" |
4870 |
+ |
4871 |
+@@ -116,6 +117,19 @@ struct mctrl_gpios *mctrl_gpio_init_noauto(struct device *dev, unsigned int idx) |
4872 |
+ |
4873 |
+ for (i = 0; i < UART_GPIO_MAX; i++) { |
4874 |
+ enum gpiod_flags flags; |
4875 |
++ char *gpio_str; |
4876 |
++ bool present; |
4877 |
++ |
4878 |
++ /* Check if GPIO property exists and continue if not */ |
4879 |
++ gpio_str = kasprintf(GFP_KERNEL, "%s-gpios", |
4880 |
++ mctrl_gpios_desc[i].name); |
4881 |
++ if (!gpio_str) |
4882 |
++ continue; |
4883 |
++ |
4884 |
++ present = device_property_present(dev, gpio_str); |
4885 |
++ kfree(gpio_str); |
4886 |
++ if (!present) |
4887 |
++ continue; |
4888 |
+ |
4889 |
+ if (mctrl_gpios_desc[i].dir_out) |
4890 |
+ flags = GPIOD_OUT_LOW; |
4891 |
+diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c |
4892 |
+index abc705716aa0..d18c680aa64b 100644 |
4893 |
+--- a/drivers/tty/serial/sh-sci.c |
4894 |
++++ b/drivers/tty/serial/sh-sci.c |
4895 |
+@@ -1398,6 +1398,7 @@ static void sci_dma_tx_work_fn(struct work_struct *work) |
4896 |
+ struct circ_buf *xmit = &port->state->xmit; |
4897 |
+ unsigned long flags; |
4898 |
+ dma_addr_t buf; |
4899 |
++ int head, tail; |
4900 |
+ |
4901 |
+ /* |
4902 |
+ * DMA is idle now. |
4903 |
+@@ -1407,16 +1408,23 @@ static void sci_dma_tx_work_fn(struct work_struct *work) |
4904 |
+ * consistent xmit buffer state. |
4905 |
+ */ |
4906 |
+ spin_lock_irq(&port->lock); |
4907 |
+- buf = s->tx_dma_addr + (xmit->tail & (UART_XMIT_SIZE - 1)); |
4908 |
++ head = xmit->head; |
4909 |
++ tail = xmit->tail; |
4910 |
++ buf = s->tx_dma_addr + (tail & (UART_XMIT_SIZE - 1)); |
4911 |
+ s->tx_dma_len = min_t(unsigned int, |
4912 |
+- CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE), |
4913 |
+- CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE)); |
4914 |
+- spin_unlock_irq(&port->lock); |
4915 |
++ CIRC_CNT(head, tail, UART_XMIT_SIZE), |
4916 |
++ CIRC_CNT_TO_END(head, tail, UART_XMIT_SIZE)); |
4917 |
++ if (!s->tx_dma_len) { |
4918 |
++ /* Transmit buffer has been flushed */ |
4919 |
++ spin_unlock_irq(&port->lock); |
4920 |
++ return; |
4921 |
++ } |
4922 |
+ |
4923 |
+ desc = dmaengine_prep_slave_single(chan, buf, s->tx_dma_len, |
4924 |
+ DMA_MEM_TO_DEV, |
4925 |
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
4926 |
+ if (!desc) { |
4927 |
++ spin_unlock_irq(&port->lock); |
4928 |
+ dev_warn(port->dev, "Failed preparing Tx DMA descriptor\n"); |
4929 |
+ goto switch_to_pio; |
4930 |
+ } |
4931 |
+@@ -1424,18 +1432,18 @@ static void sci_dma_tx_work_fn(struct work_struct *work) |
4932 |
+ dma_sync_single_for_device(chan->device->dev, buf, s->tx_dma_len, |
4933 |
+ DMA_TO_DEVICE); |
4934 |
+ |
4935 |
+- spin_lock_irq(&port->lock); |
4936 |
+ desc->callback = sci_dma_tx_complete; |
4937 |
+ desc->callback_param = s; |
4938 |
+- spin_unlock_irq(&port->lock); |
4939 |
+ s->cookie_tx = dmaengine_submit(desc); |
4940 |
+ if (dma_submit_error(s->cookie_tx)) { |
4941 |
++ spin_unlock_irq(&port->lock); |
4942 |
+ dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n"); |
4943 |
+ goto switch_to_pio; |
4944 |
+ } |
4945 |
+ |
4946 |
++ spin_unlock_irq(&port->lock); |
4947 |
+ dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n", |
4948 |
+- __func__, xmit->buf, xmit->tail, xmit->head, s->cookie_tx); |
4949 |
++ __func__, xmit->buf, tail, head, s->cookie_tx); |
4950 |
+ |
4951 |
+ dma_async_issue_pending(chan); |
4952 |
+ return; |
4953 |
+@@ -1648,11 +1656,18 @@ static void sci_free_dma(struct uart_port *port) |
4954 |
+ |
4955 |
+ static void sci_flush_buffer(struct uart_port *port) |
4956 |
+ { |
4957 |
++ struct sci_port *s = to_sci_port(port); |
4958 |
++ |
4959 |
+ /* |
4960 |
+ * In uart_flush_buffer(), the xmit circular buffer has just been |
4961 |
+- * cleared, so we have to reset tx_dma_len accordingly. |
4962 |
++ * cleared, so we have to reset tx_dma_len accordingly, and stop any |
4963 |
++ * pending transfers |
4964 |
+ */ |
4965 |
+- to_sci_port(port)->tx_dma_len = 0; |
4966 |
++ s->tx_dma_len = 0; |
4967 |
++ if (s->chan_tx) { |
4968 |
++ dmaengine_terminate_async(s->chan_tx); |
4969 |
++ s->cookie_tx = -EINVAL; |
4970 |
++ } |
4971 |
+ } |
4972 |
+ #else /* !CONFIG_SERIAL_SH_SCI_DMA */ |
4973 |
+ static inline void sci_request_dma(struct uart_port *port) |
4974 |
+diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c |
4975 |
+index 63e34d868de8..f8503f8fc44e 100644 |
4976 |
+--- a/drivers/tty/serial/sunhv.c |
4977 |
++++ b/drivers/tty/serial/sunhv.c |
4978 |
+@@ -397,7 +397,7 @@ static const struct uart_ops sunhv_pops = { |
4979 |
+ static struct uart_driver sunhv_reg = { |
4980 |
+ .owner = THIS_MODULE, |
4981 |
+ .driver_name = "sunhv", |
4982 |
+- .dev_name = "ttyS", |
4983 |
++ .dev_name = "ttyHV", |
4984 |
+ .major = TTY_MAJOR, |
4985 |
+ }; |
4986 |
+ |
4987 |
+diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c |
4988 |
+index 605354fd60b1..9dcc4d855ddd 100644 |
4989 |
+--- a/drivers/tty/serial/xilinx_uartps.c |
4990 |
++++ b/drivers/tty/serial/xilinx_uartps.c |
4991 |
+@@ -29,12 +29,12 @@ |
4992 |
+ |
4993 |
+ #define CDNS_UART_TTY_NAME "ttyPS" |
4994 |
+ #define CDNS_UART_NAME "xuartps" |
4995 |
+-#define CDNS_UART_MAJOR 0 /* use dynamic node allocation */ |
4996 |
+ #define CDNS_UART_FIFO_SIZE 64 /* FIFO size */ |
4997 |
+ #define CDNS_UART_REGISTER_SPACE 0x1000 |
4998 |
+ |
4999 |
+ /* Rx Trigger level */ |
5000 |
+ static int rx_trigger_level = 56; |
5001 |
++static int uartps_major; |
5002 |
+ module_param(rx_trigger_level, uint, S_IRUGO); |
5003 |
+ MODULE_PARM_DESC(rx_trigger_level, "Rx trigger level, 1-63 bytes"); |
5004 |
+ |
5005 |
+@@ -1517,7 +1517,7 @@ static int cdns_uart_probe(struct platform_device *pdev) |
5006 |
+ cdns_uart_uart_driver->owner = THIS_MODULE; |
5007 |
+ cdns_uart_uart_driver->driver_name = driver_name; |
5008 |
+ cdns_uart_uart_driver->dev_name = CDNS_UART_TTY_NAME; |
5009 |
+- cdns_uart_uart_driver->major = CDNS_UART_MAJOR; |
5010 |
++ cdns_uart_uart_driver->major = uartps_major; |
5011 |
+ cdns_uart_uart_driver->minor = cdns_uart_data->id; |
5012 |
+ cdns_uart_uart_driver->nr = 1; |
5013 |
+ |
5014 |
+@@ -1546,6 +1546,7 @@ static int cdns_uart_probe(struct platform_device *pdev) |
5015 |
+ goto err_out_id; |
5016 |
+ } |
5017 |
+ |
5018 |
++ uartps_major = cdns_uart_uart_driver->tty_driver->major; |
5019 |
+ cdns_uart_data->cdns_uart_driver = cdns_uart_uart_driver; |
5020 |
+ |
5021 |
+ /* |
5022 |
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c |
5023 |
+index 2c8e60c7dbd8..2844366dc173 100644 |
5024 |
+--- a/drivers/usb/core/hub.c |
5025 |
++++ b/drivers/usb/core/hub.c |
5026 |
+@@ -4002,6 +4002,9 @@ static int usb_set_lpm_timeout(struct usb_device *udev, |
5027 |
+ * control transfers to set the hub timeout or enable device-initiated U1/U2 |
5028 |
+ * will be successful. |
5029 |
+ * |
5030 |
++ * If the control transfer to enable device-initiated U1/U2 entry fails, then |
5031 |
++ * hub-initiated U1/U2 will be disabled. |
5032 |
++ * |
5033 |
+ * If we cannot set the parent hub U1/U2 timeout, we attempt to let the xHCI |
5034 |
+ * driver know about it. If that call fails, it should be harmless, and just |
5035 |
+ * take up more slightly more bus bandwidth for unnecessary U1/U2 exit latency. |
5036 |
+@@ -4056,23 +4059,24 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev, |
5037 |
+ * host know that this link state won't be enabled. |
5038 |
+ */ |
5039 |
+ hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state); |
5040 |
+- } else { |
5041 |
+- /* Only a configured device will accept the Set Feature |
5042 |
+- * U1/U2_ENABLE |
5043 |
+- */ |
5044 |
+- if (udev->actconfig) |
5045 |
+- usb_set_device_initiated_lpm(udev, state, true); |
5046 |
++ return; |
5047 |
++ } |
5048 |
+ |
5049 |
+- /* As soon as usb_set_lpm_timeout(timeout) returns 0, the |
5050 |
+- * hub-initiated LPM is enabled. Thus, LPM is enabled no |
5051 |
+- * matter the result of usb_set_device_initiated_lpm(). |
5052 |
+- * The only difference is whether device is able to initiate |
5053 |
+- * LPM. |
5054 |
+- */ |
5055 |
++ /* Only a configured device will accept the Set Feature |
5056 |
++ * U1/U2_ENABLE |
5057 |
++ */ |
5058 |
++ if (udev->actconfig && |
5059 |
++ usb_set_device_initiated_lpm(udev, state, true) == 0) { |
5060 |
+ if (state == USB3_LPM_U1) |
5061 |
+ udev->usb3_lpm_u1_enabled = 1; |
5062 |
+ else if (state == USB3_LPM_U2) |
5063 |
+ udev->usb3_lpm_u2_enabled = 1; |
5064 |
++ } else { |
5065 |
++ /* Don't request U1/U2 entry if the device |
5066 |
++ * cannot transition to U1/U2. |
5067 |
++ */ |
5068 |
++ usb_set_lpm_timeout(udev, state, 0); |
5069 |
++ hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state); |
5070 |
+ } |
5071 |
+ } |
5072 |
+ |
5073 |
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c |
5074 |
+index 4aff1d8dbc4f..6e9e172010fc 100644 |
5075 |
+--- a/drivers/usb/dwc3/core.c |
5076 |
++++ b/drivers/usb/dwc3/core.c |
5077 |
+@@ -1423,11 +1423,6 @@ static int dwc3_probe(struct platform_device *pdev) |
5078 |
+ dwc->regs = regs; |
5079 |
+ dwc->regs_size = resource_size(&dwc_res); |
5080 |
+ |
5081 |
+- if (!dwc3_core_is_valid(dwc)) { |
5082 |
+- dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n"); |
5083 |
+- return -ENODEV; |
5084 |
+- } |
5085 |
+- |
5086 |
+ dwc3_get_properties(dwc); |
5087 |
+ |
5088 |
+ dwc->reset = devm_reset_control_get_optional_shared(dev, NULL); |
5089 |
+@@ -1460,6 +1455,12 @@ static int dwc3_probe(struct platform_device *pdev) |
5090 |
+ if (ret) |
5091 |
+ goto unprepare_clks; |
5092 |
+ |
5093 |
++ if (!dwc3_core_is_valid(dwc)) { |
5094 |
++ dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n"); |
5095 |
++ ret = -ENODEV; |
5096 |
++ goto disable_clks; |
5097 |
++ } |
5098 |
++ |
5099 |
+ platform_set_drvdata(pdev, dwc); |
5100 |
+ dwc3_cache_hwparams(dwc); |
5101 |
+ |
5102 |
+@@ -1525,6 +1526,7 @@ err1: |
5103 |
+ pm_runtime_put_sync(&pdev->dev); |
5104 |
+ pm_runtime_disable(&pdev->dev); |
5105 |
+ |
5106 |
++disable_clks: |
5107 |
+ clk_bulk_disable(dwc->num_clks, dwc->clks); |
5108 |
+ unprepare_clks: |
5109 |
+ clk_bulk_unprepare(dwc->num_clks, dwc->clks); |
5110 |
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c |
5111 |
+index c7ed90084d1a..213ff03c8a9f 100644 |
5112 |
+--- a/drivers/usb/gadget/function/f_fs.c |
5113 |
++++ b/drivers/usb/gadget/function/f_fs.c |
5114 |
+@@ -1183,11 +1183,12 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from) |
5115 |
+ ENTER(); |
5116 |
+ |
5117 |
+ if (!is_sync_kiocb(kiocb)) { |
5118 |
+- p = kmalloc(sizeof(io_data), GFP_KERNEL); |
5119 |
++ p = kzalloc(sizeof(io_data), GFP_KERNEL); |
5120 |
+ if (unlikely(!p)) |
5121 |
+ return -ENOMEM; |
5122 |
+ p->aio = true; |
5123 |
+ } else { |
5124 |
++ memset(p, 0, sizeof(*p)); |
5125 |
+ p->aio = false; |
5126 |
+ } |
5127 |
+ |
5128 |
+@@ -1219,11 +1220,12 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to) |
5129 |
+ ENTER(); |
5130 |
+ |
5131 |
+ if (!is_sync_kiocb(kiocb)) { |
5132 |
+- p = kmalloc(sizeof(io_data), GFP_KERNEL); |
5133 |
++ p = kzalloc(sizeof(io_data), GFP_KERNEL); |
5134 |
+ if (unlikely(!p)) |
5135 |
+ return -ENOMEM; |
5136 |
+ p->aio = true; |
5137 |
+ } else { |
5138 |
++ memset(p, 0, sizeof(*p)); |
5139 |
+ p->aio = false; |
5140 |
+ } |
5141 |
+ |
5142 |
+diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c |
5143 |
+index 09a8ebd95588..6968b9f2b76b 100644 |
5144 |
+--- a/drivers/usb/host/hwa-hc.c |
5145 |
++++ b/drivers/usb/host/hwa-hc.c |
5146 |
+@@ -159,7 +159,7 @@ out: |
5147 |
+ return result; |
5148 |
+ |
5149 |
+ error_set_cluster_id: |
5150 |
+- wusb_cluster_id_put(wusbhc->cluster_id); |
5151 |
++ wusb_cluster_id_put(addr); |
5152 |
+ error_cluster_id_get: |
5153 |
+ goto out; |
5154 |
+ |
5155 |
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c |
5156 |
+index 3ce71cbfbb58..ad05c27b3a7b 100644 |
5157 |
+--- a/drivers/usb/host/pci-quirks.c |
5158 |
++++ b/drivers/usb/host/pci-quirks.c |
5159 |
+@@ -205,7 +205,7 @@ int usb_amd_find_chipset_info(void) |
5160 |
+ { |
5161 |
+ unsigned long flags; |
5162 |
+ struct amd_chipset_info info; |
5163 |
+- int ret; |
5164 |
++ int need_pll_quirk = 0; |
5165 |
+ |
5166 |
+ spin_lock_irqsave(&amd_lock, flags); |
5167 |
+ |
5168 |
+@@ -219,21 +219,28 @@ int usb_amd_find_chipset_info(void) |
5169 |
+ spin_unlock_irqrestore(&amd_lock, flags); |
5170 |
+ |
5171 |
+ if (!amd_chipset_sb_type_init(&info)) { |
5172 |
+- ret = 0; |
5173 |
+ goto commit; |
5174 |
+ } |
5175 |
+ |
5176 |
+- /* Below chipset generations needn't enable AMD PLL quirk */ |
5177 |
+- if (info.sb_type.gen == AMD_CHIPSET_UNKNOWN || |
5178 |
+- info.sb_type.gen == AMD_CHIPSET_SB600 || |
5179 |
+- info.sb_type.gen == AMD_CHIPSET_YANGTZE || |
5180 |
+- (info.sb_type.gen == AMD_CHIPSET_SB700 && |
5181 |
+- info.sb_type.rev > 0x3b)) { |
5182 |
++ switch (info.sb_type.gen) { |
5183 |
++ case AMD_CHIPSET_SB700: |
5184 |
++ need_pll_quirk = info.sb_type.rev <= 0x3B; |
5185 |
++ break; |
5186 |
++ case AMD_CHIPSET_SB800: |
5187 |
++ case AMD_CHIPSET_HUDSON2: |
5188 |
++ case AMD_CHIPSET_BOLTON: |
5189 |
++ need_pll_quirk = 1; |
5190 |
++ break; |
5191 |
++ default: |
5192 |
++ need_pll_quirk = 0; |
5193 |
++ break; |
5194 |
++ } |
5195 |
++ |
5196 |
++ if (!need_pll_quirk) { |
5197 |
+ if (info.smbus_dev) { |
5198 |
+ pci_dev_put(info.smbus_dev); |
5199 |
+ info.smbus_dev = NULL; |
5200 |
+ } |
5201 |
+- ret = 0; |
5202 |
+ goto commit; |
5203 |
+ } |
5204 |
+ |
5205 |
+@@ -252,7 +259,7 @@ int usb_amd_find_chipset_info(void) |
5206 |
+ } |
5207 |
+ } |
5208 |
+ |
5209 |
+- ret = info.probe_result = 1; |
5210 |
++ need_pll_quirk = info.probe_result = 1; |
5211 |
+ printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n"); |
5212 |
+ |
5213 |
+ commit: |
5214 |
+@@ -263,7 +270,7 @@ commit: |
5215 |
+ |
5216 |
+ /* Mark that we where here */ |
5217 |
+ amd_chipset.probe_count++; |
5218 |
+- ret = amd_chipset.probe_result; |
5219 |
++ need_pll_quirk = amd_chipset.probe_result; |
5220 |
+ |
5221 |
+ spin_unlock_irqrestore(&amd_lock, flags); |
5222 |
+ |
5223 |
+@@ -277,7 +284,7 @@ commit: |
5224 |
+ spin_unlock_irqrestore(&amd_lock, flags); |
5225 |
+ } |
5226 |
+ |
5227 |
+- return ret; |
5228 |
++ return need_pll_quirk; |
5229 |
+ } |
5230 |
+ EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info); |
5231 |
+ |
5232 |
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h |
5233 |
+index 92e764c54154..fabbce1c542a 100644 |
5234 |
+--- a/drivers/usb/host/xhci.h |
5235 |
++++ b/drivers/usb/host/xhci.h |
5236 |
+@@ -2170,7 +2170,8 @@ static inline bool xhci_urb_suitable_for_idt(struct urb *urb) |
5237 |
+ if (!usb_endpoint_xfer_isoc(&urb->ep->desc) && usb_urb_dir_out(urb) && |
5238 |
+ usb_endpoint_maxp(&urb->ep->desc) >= TRB_IDT_MAX_SIZE && |
5239 |
+ urb->transfer_buffer_length <= TRB_IDT_MAX_SIZE && |
5240 |
+- !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) |
5241 |
++ !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) && |
5242 |
++ !urb->num_sgs) |
5243 |
+ return true; |
5244 |
+ |
5245 |
+ return false; |
5246 |
+diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c |
5247 |
+index 4d6ae3795a88..6ca9111d150a 100644 |
5248 |
+--- a/drivers/usb/misc/usb251xb.c |
5249 |
++++ b/drivers/usb/misc/usb251xb.c |
5250 |
+@@ -375,7 +375,8 @@ out_err: |
5251 |
+ |
5252 |
+ #ifdef CONFIG_OF |
5253 |
+ static void usb251xb_get_ports_field(struct usb251xb *hub, |
5254 |
+- const char *prop_name, u8 port_cnt, u8 *fld) |
5255 |
++ const char *prop_name, u8 port_cnt, |
5256 |
++ bool ds_only, u8 *fld) |
5257 |
+ { |
5258 |
+ struct device *dev = hub->dev; |
5259 |
+ struct property *prop; |
5260 |
+@@ -383,7 +384,7 @@ static void usb251xb_get_ports_field(struct usb251xb *hub, |
5261 |
+ u32 port; |
5262 |
+ |
5263 |
+ of_property_for_each_u32(dev->of_node, prop_name, prop, p, port) { |
5264 |
+- if ((port >= 1) && (port <= port_cnt)) |
5265 |
++ if ((port >= ds_only ? 1 : 0) && (port <= port_cnt)) |
5266 |
+ *fld |= BIT(port); |
5267 |
+ else |
5268 |
+ dev_warn(dev, "port %u doesn't exist\n", port); |
5269 |
+@@ -501,15 +502,15 @@ static int usb251xb_get_ofdata(struct usb251xb *hub, |
5270 |
+ |
5271 |
+ hub->non_rem_dev = USB251XB_DEF_NON_REMOVABLE_DEVICES; |
5272 |
+ usb251xb_get_ports_field(hub, "non-removable-ports", data->port_cnt, |
5273 |
+- &hub->non_rem_dev); |
5274 |
++ true, &hub->non_rem_dev); |
5275 |
+ |
5276 |
+ hub->port_disable_sp = USB251XB_DEF_PORT_DISABLE_SELF; |
5277 |
+ usb251xb_get_ports_field(hub, "sp-disabled-ports", data->port_cnt, |
5278 |
+- &hub->port_disable_sp); |
5279 |
++ true, &hub->port_disable_sp); |
5280 |
+ |
5281 |
+ hub->port_disable_bp = USB251XB_DEF_PORT_DISABLE_BUS; |
5282 |
+ usb251xb_get_ports_field(hub, "bp-disabled-ports", data->port_cnt, |
5283 |
+- &hub->port_disable_bp); |
5284 |
++ true, &hub->port_disable_bp); |
5285 |
+ |
5286 |
+ hub->max_power_sp = USB251XB_DEF_MAX_POWER_SELF; |
5287 |
+ if (!of_property_read_u32(np, "sp-max-total-current-microamp", |
5288 |
+@@ -573,9 +574,7 @@ static int usb251xb_get_ofdata(struct usb251xb *hub, |
5289 |
+ */ |
5290 |
+ hub->port_swap = USB251XB_DEF_PORT_SWAP; |
5291 |
+ usb251xb_get_ports_field(hub, "swap-dx-lanes", data->port_cnt, |
5292 |
+- &hub->port_swap); |
5293 |
+- if (of_get_property(np, "swap-us-lanes", NULL)) |
5294 |
+- hub->port_swap |= BIT(0); |
5295 |
++ false, &hub->port_swap); |
5296 |
+ |
5297 |
+ /* The following parameters are currently not exposed to devicetree, but |
5298 |
+ * may be as soon as needed. |
5299 |
+diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c |
5300 |
+index 59190d88fa9f..556bb4fa0bee 100644 |
5301 |
+--- a/drivers/usb/storage/scsiglue.c |
5302 |
++++ b/drivers/usb/storage/scsiglue.c |
5303 |
+@@ -28,6 +28,8 @@ |
5304 |
+ * status of a command. |
5305 |
+ */ |
5306 |
+ |
5307 |
++#include <linux/blkdev.h> |
5308 |
++#include <linux/dma-mapping.h> |
5309 |
+ #include <linux/module.h> |
5310 |
+ #include <linux/mutex.h> |
5311 |
+ |
5312 |
+@@ -99,6 +101,7 @@ static int slave_alloc (struct scsi_device *sdev) |
5313 |
+ static int slave_configure(struct scsi_device *sdev) |
5314 |
+ { |
5315 |
+ struct us_data *us = host_to_us(sdev->host); |
5316 |
++ struct device *dev = us->pusb_dev->bus->sysdev; |
5317 |
+ |
5318 |
+ /* |
5319 |
+ * Many devices have trouble transferring more than 32KB at a time, |
5320 |
+@@ -128,6 +131,14 @@ static int slave_configure(struct scsi_device *sdev) |
5321 |
+ blk_queue_max_hw_sectors(sdev->request_queue, 2048); |
5322 |
+ } |
5323 |
+ |
5324 |
++ /* |
5325 |
++ * The max_hw_sectors should be up to maximum size of a mapping for |
5326 |
++ * the device. Otherwise, a DMA API might fail on swiotlb environment. |
5327 |
++ */ |
5328 |
++ blk_queue_max_hw_sectors(sdev->request_queue, |
5329 |
++ min_t(size_t, queue_max_hw_sectors(sdev->request_queue), |
5330 |
++ dma_max_mapping_size(dev) >> SECTOR_SHIFT)); |
5331 |
++ |
5332 |
+ /* |
5333 |
+ * Some USB host controllers can't do DMA; they have to use PIO. |
5334 |
+ * They indicate this by setting their dma_mask to NULL. For |
5335 |
+diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c |
5336 |
+index bc57ae9e2963..cce9ace651a2 100644 |
5337 |
+--- a/fs/9p/vfs_addr.c |
5338 |
++++ b/fs/9p/vfs_addr.c |
5339 |
+@@ -35,8 +35,9 @@ |
5340 |
+ * @page: structure to page |
5341 |
+ * |
5342 |
+ */ |
5343 |
+-static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page) |
5344 |
++static int v9fs_fid_readpage(void *data, struct page *page) |
5345 |
+ { |
5346 |
++ struct p9_fid *fid = data; |
5347 |
+ struct inode *inode = page->mapping->host; |
5348 |
+ struct bio_vec bvec = {.bv_page = page, .bv_len = PAGE_SIZE}; |
5349 |
+ struct iov_iter to; |
5350 |
+@@ -107,7 +108,8 @@ static int v9fs_vfs_readpages(struct file *filp, struct address_space *mapping, |
5351 |
+ if (ret == 0) |
5352 |
+ return ret; |
5353 |
+ |
5354 |
+- ret = read_cache_pages(mapping, pages, (void *)v9fs_vfs_readpage, filp); |
5355 |
++ ret = read_cache_pages(mapping, pages, v9fs_fid_readpage, |
5356 |
++ filp->private_data); |
5357 |
+ p9_debug(P9_DEBUG_VFS, " = %d\n", ret); |
5358 |
+ return ret; |
5359 |
+ } |
5360 |
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c |
5361 |
+index a2aabdb85226..8c9c7d76c900 100644 |
5362 |
+--- a/fs/btrfs/inode.c |
5363 |
++++ b/fs/btrfs/inode.c |
5364 |
+@@ -394,10 +394,31 @@ static noinline int add_async_extent(struct async_chunk *cow, |
5365 |
+ return 0; |
5366 |
+ } |
5367 |
+ |
5368 |
++/* |
5369 |
++ * Check if the inode has flags compatible with compression |
5370 |
++ */ |
5371 |
++static inline bool inode_can_compress(struct inode *inode) |
5372 |
++{ |
5373 |
++ if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW || |
5374 |
++ BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) |
5375 |
++ return false; |
5376 |
++ return true; |
5377 |
++} |
5378 |
++ |
5379 |
++/* |
5380 |
++ * Check if the inode needs to be submitted to compression, based on mount |
5381 |
++ * options, defragmentation, properties or heuristics. |
5382 |
++ */ |
5383 |
+ static inline int inode_need_compress(struct inode *inode, u64 start, u64 end) |
5384 |
+ { |
5385 |
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
5386 |
+ |
5387 |
++ if (!inode_can_compress(inode)) { |
5388 |
++ WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG), |
5389 |
++ KERN_ERR "BTRFS: unexpected compression for ino %llu\n", |
5390 |
++ btrfs_ino(BTRFS_I(inode))); |
5391 |
++ return 0; |
5392 |
++ } |
5393 |
+ /* force compress */ |
5394 |
+ if (btrfs_test_opt(fs_info, FORCE_COMPRESS)) |
5395 |
+ return 1; |
5396 |
+@@ -1630,7 +1651,8 @@ int btrfs_run_delalloc_range(struct inode *inode, struct page *locked_page, |
5397 |
+ } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) { |
5398 |
+ ret = run_delalloc_nocow(inode, locked_page, start, end, |
5399 |
+ page_started, 0, nr_written); |
5400 |
+- } else if (!inode_need_compress(inode, start, end)) { |
5401 |
++ } else if (!inode_can_compress(inode) || |
5402 |
++ !inode_need_compress(inode, start, end)) { |
5403 |
+ ret = cow_file_range(inode, locked_page, start, end, end, |
5404 |
+ page_started, nr_written, 1, NULL); |
5405 |
+ } else { |
5406 |
+diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c |
5407 |
+index af109c0ba720..e0469816c678 100644 |
5408 |
+--- a/fs/btrfs/props.c |
5409 |
++++ b/fs/btrfs/props.c |
5410 |
+@@ -337,7 +337,7 @@ static int inherit_props(struct btrfs_trans_handle *trans, |
5411 |
+ for (i = 0; i < ARRAY_SIZE(prop_handlers); i++) { |
5412 |
+ const struct prop_handler *h = &prop_handlers[i]; |
5413 |
+ const char *value; |
5414 |
+- u64 num_bytes; |
5415 |
++ u64 num_bytes = 0; |
5416 |
+ |
5417 |
+ if (!h->inheritable) |
5418 |
+ continue; |
5419 |
+diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c |
5420 |
+index 114ebfe30929..3951d39b9b75 100644 |
5421 |
+--- a/fs/dlm/lowcomms.c |
5422 |
++++ b/fs/dlm/lowcomms.c |
5423 |
+@@ -1628,8 +1628,10 @@ static void clean_writequeues(void) |
5424 |
+ |
5425 |
+ static void work_stop(void) |
5426 |
+ { |
5427 |
+- destroy_workqueue(recv_workqueue); |
5428 |
+- destroy_workqueue(send_workqueue); |
5429 |
++ if (recv_workqueue) |
5430 |
++ destroy_workqueue(recv_workqueue); |
5431 |
++ if (send_workqueue) |
5432 |
++ destroy_workqueue(send_workqueue); |
5433 |
+ } |
5434 |
+ |
5435 |
+ static int work_start(void) |
5436 |
+@@ -1689,13 +1691,17 @@ static void work_flush(void) |
5437 |
+ struct hlist_node *n; |
5438 |
+ struct connection *con; |
5439 |
+ |
5440 |
+- flush_workqueue(recv_workqueue); |
5441 |
+- flush_workqueue(send_workqueue); |
5442 |
++ if (recv_workqueue) |
5443 |
++ flush_workqueue(recv_workqueue); |
5444 |
++ if (send_workqueue) |
5445 |
++ flush_workqueue(send_workqueue); |
5446 |
+ do { |
5447 |
+ ok = 1; |
5448 |
+ foreach_conn(stop_conn); |
5449 |
+- flush_workqueue(recv_workqueue); |
5450 |
+- flush_workqueue(send_workqueue); |
5451 |
++ if (recv_workqueue) |
5452 |
++ flush_workqueue(recv_workqueue); |
5453 |
++ if (send_workqueue) |
5454 |
++ flush_workqueue(send_workqueue); |
5455 |
+ for (i = 0; i < CONN_HASH_SIZE && ok; i++) { |
5456 |
+ hlist_for_each_entry_safe(con, n, |
5457 |
+ &connection_hash[i], list) { |
5458 |
+diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c |
5459 |
+index ed70b68b2b38..d0539ddad6e2 100644 |
5460 |
+--- a/fs/f2fs/checkpoint.c |
5461 |
++++ b/fs/f2fs/checkpoint.c |
5462 |
+@@ -832,17 +832,6 @@ static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr, |
5463 |
+ return -EINVAL; |
5464 |
+ } |
5465 |
+ |
5466 |
+- if (__is_set_ckpt_flags(*cp_block, CP_LARGE_NAT_BITMAP_FLAG)) { |
5467 |
+- if (crc_offset != CP_MIN_CHKSUM_OFFSET) { |
5468 |
+- f2fs_put_page(*cp_page, 1); |
5469 |
+- f2fs_msg(sbi->sb, KERN_WARNING, |
5470 |
+- "layout of large_nat_bitmap is deprecated, " |
5471 |
+- "run fsck to repair, chksum_offset: %zu", |
5472 |
+- crc_offset); |
5473 |
+- return -EINVAL; |
5474 |
+- } |
5475 |
+- } |
5476 |
+- |
5477 |
+ crc = f2fs_checkpoint_chksum(sbi, *cp_block); |
5478 |
+ if (crc != cur_cp_crc(*cp_block)) { |
5479 |
+ f2fs_put_page(*cp_page, 1); |
5480 |
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c |
5481 |
+index eda4181d2092..923923603a7d 100644 |
5482 |
+--- a/fs/f2fs/data.c |
5483 |
++++ b/fs/f2fs/data.c |
5484 |
+@@ -2262,6 +2262,9 @@ static inline bool __should_serialize_io(struct inode *inode, |
5485 |
+ return false; |
5486 |
+ if (IS_NOQUOTA(inode)) |
5487 |
+ return false; |
5488 |
++ /* to avoid deadlock in path of data flush */ |
5489 |
++ if (F2FS_I(inode)->cp_task) |
5490 |
++ return false; |
5491 |
+ if (wbc->sync_mode != WB_SYNC_ALL) |
5492 |
+ return true; |
5493 |
+ if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks) |
5494 |
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h |
5495 |
+index 06b89a9862ab..cbdc2f88a98c 100644 |
5496 |
+--- a/fs/f2fs/f2fs.h |
5497 |
++++ b/fs/f2fs/f2fs.h |
5498 |
+@@ -1207,6 +1207,7 @@ struct f2fs_sb_info { |
5499 |
+ /* for inode management */ |
5500 |
+ struct list_head inode_list[NR_INODE_TYPE]; /* dirty inode list */ |
5501 |
+ spinlock_t inode_lock[NR_INODE_TYPE]; /* for dirty inode list lock */ |
5502 |
++ struct mutex flush_lock; /* for flush exclusion */ |
5503 |
+ |
5504 |
+ /* for extent tree cache */ |
5505 |
+ struct radix_tree_root extent_tree_root;/* cache extent cache entries */ |
5506 |
+@@ -1766,8 +1767,12 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi, |
5507 |
+ |
5508 |
+ if (!__allow_reserved_blocks(sbi, inode, true)) |
5509 |
+ avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks; |
5510 |
+- if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) |
5511 |
+- avail_user_block_count -= sbi->unusable_block_count; |
5512 |
++ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { |
5513 |
++ if (avail_user_block_count > sbi->unusable_block_count) |
5514 |
++ avail_user_block_count -= sbi->unusable_block_count; |
5515 |
++ else |
5516 |
++ avail_user_block_count = 0; |
5517 |
++ } |
5518 |
+ if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) { |
5519 |
+ diff = sbi->total_valid_block_count - avail_user_block_count; |
5520 |
+ if (diff > *count) |
5521 |
+@@ -1967,7 +1972,7 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi, |
5522 |
+ struct inode *inode, bool is_inode) |
5523 |
+ { |
5524 |
+ block_t valid_block_count; |
5525 |
+- unsigned int valid_node_count; |
5526 |
++ unsigned int valid_node_count, user_block_count; |
5527 |
+ int err; |
5528 |
+ |
5529 |
+ if (is_inode) { |
5530 |
+@@ -1994,10 +1999,11 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi, |
5531 |
+ |
5532 |
+ if (!__allow_reserved_blocks(sbi, inode, false)) |
5533 |
+ valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks; |
5534 |
++ user_block_count = sbi->user_block_count; |
5535 |
+ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) |
5536 |
+- valid_block_count += sbi->unusable_block_count; |
5537 |
++ user_block_count -= sbi->unusable_block_count; |
5538 |
+ |
5539 |
+- if (unlikely(valid_block_count > sbi->user_block_count)) { |
5540 |
++ if (unlikely(valid_block_count > user_block_count)) { |
5541 |
+ spin_unlock(&sbi->stat_lock); |
5542 |
+ goto enospc; |
5543 |
+ } |
5544 |
+@@ -2198,7 +2204,7 @@ static inline bool is_idle(struct f2fs_sb_info *sbi, int type) |
5545 |
+ get_pages(sbi, F2FS_DIO_WRITE)) |
5546 |
+ return false; |
5547 |
+ |
5548 |
+- if (SM_I(sbi) && SM_I(sbi)->dcc_info && |
5549 |
++ if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info && |
5550 |
+ atomic_read(&SM_I(sbi)->dcc_info->queued_discard)) |
5551 |
+ return false; |
5552 |
+ |
5553 |
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c |
5554 |
+index 8dee063c833f..ce15fbcd7cff 100644 |
5555 |
+--- a/fs/f2fs/segment.c |
5556 |
++++ b/fs/f2fs/segment.c |
5557 |
+@@ -546,9 +546,13 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi) |
5558 |
+ if (test_opt(sbi, DATA_FLUSH)) { |
5559 |
+ struct blk_plug plug; |
5560 |
+ |
5561 |
++ mutex_lock(&sbi->flush_lock); |
5562 |
++ |
5563 |
+ blk_start_plug(&plug); |
5564 |
+ f2fs_sync_dirty_inodes(sbi, FILE_INODE); |
5565 |
+ blk_finish_plug(&plug); |
5566 |
++ |
5567 |
++ mutex_unlock(&sbi->flush_lock); |
5568 |
+ } |
5569 |
+ f2fs_sync_fs(sbi->sb, true); |
5570 |
+ stat_inc_bg_cp_count(sbi->stat_info); |
5571 |
+@@ -872,7 +876,9 @@ void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi) |
5572 |
+ int f2fs_disable_cp_again(struct f2fs_sb_info *sbi) |
5573 |
+ { |
5574 |
+ struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); |
5575 |
+- block_t ovp = overprovision_segments(sbi) << sbi->log_blocks_per_seg; |
5576 |
++ int ovp_hole_segs = |
5577 |
++ (overprovision_segments(sbi) - reserved_segments(sbi)); |
5578 |
++ block_t ovp_holes = ovp_hole_segs << sbi->log_blocks_per_seg; |
5579 |
+ block_t holes[2] = {0, 0}; /* DATA and NODE */ |
5580 |
+ struct seg_entry *se; |
5581 |
+ unsigned int segno; |
5582 |
+@@ -887,10 +893,10 @@ int f2fs_disable_cp_again(struct f2fs_sb_info *sbi) |
5583 |
+ } |
5584 |
+ mutex_unlock(&dirty_i->seglist_lock); |
5585 |
+ |
5586 |
+- if (holes[DATA] > ovp || holes[NODE] > ovp) |
5587 |
++ if (holes[DATA] > ovp_holes || holes[NODE] > ovp_holes) |
5588 |
+ return -EAGAIN; |
5589 |
+ if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) && |
5590 |
+- dirty_segments(sbi) > overprovision_segments(sbi)) |
5591 |
++ dirty_segments(sbi) > ovp_hole_segs) |
5592 |
+ return -EAGAIN; |
5593 |
+ return 0; |
5594 |
+ } |
5595 |
+@@ -1480,6 +1486,10 @@ static int __issue_discard_cmd(struct f2fs_sb_info *sbi, |
5596 |
+ list_for_each_entry_safe(dc, tmp, pend_list, list) { |
5597 |
+ f2fs_bug_on(sbi, dc->state != D_PREP); |
5598 |
+ |
5599 |
++ if (dpolicy->timeout != 0 && |
5600 |
++ f2fs_time_over(sbi, dpolicy->timeout)) |
5601 |
++ break; |
5602 |
++ |
5603 |
+ if (dpolicy->io_aware && i < dpolicy->io_aware_gran && |
5604 |
+ !is_idle(sbi, DISCARD_TIME)) { |
5605 |
+ io_interrupted = true; |
5606 |
+@@ -3393,6 +3403,11 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi) |
5607 |
+ seg_i = CURSEG_I(sbi, i); |
5608 |
+ segno = le32_to_cpu(ckpt->cur_data_segno[i]); |
5609 |
+ blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]); |
5610 |
++ if (blk_off > ENTRIES_IN_SUM) { |
5611 |
++ f2fs_bug_on(sbi, 1); |
5612 |
++ f2fs_put_page(page, 1); |
5613 |
++ return -EFAULT; |
5614 |
++ } |
5615 |
+ seg_i->next_segno = segno; |
5616 |
+ reset_curseg(sbi, i, 0); |
5617 |
+ seg_i->alloc_type = ckpt->alloc_type[i]; |
5618 |
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c |
5619 |
+index 6b959bbb336a..4b47ac994daf 100644 |
5620 |
+--- a/fs/f2fs/super.c |
5621 |
++++ b/fs/f2fs/super.c |
5622 |
+@@ -2718,6 +2718,15 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi) |
5623 |
+ return 1; |
5624 |
+ } |
5625 |
+ |
5626 |
++ if (__is_set_ckpt_flags(ckpt, CP_LARGE_NAT_BITMAP_FLAG) && |
5627 |
++ le32_to_cpu(ckpt->checksum_offset) != CP_MIN_CHKSUM_OFFSET) { |
5628 |
++ f2fs_msg(sbi->sb, KERN_WARNING, |
5629 |
++ "layout of large_nat_bitmap is deprecated, " |
5630 |
++ "run fsck to repair, chksum_offset: %u", |
5631 |
++ le32_to_cpu(ckpt->checksum_offset)); |
5632 |
++ return 1; |
5633 |
++ } |
5634 |
++ |
5635 |
+ if (unlikely(f2fs_cp_error(sbi))) { |
5636 |
+ f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck"); |
5637 |
+ return 1; |
5638 |
+@@ -3287,6 +3296,7 @@ try_onemore: |
5639 |
+ INIT_LIST_HEAD(&sbi->inode_list[i]); |
5640 |
+ spin_lock_init(&sbi->inode_lock[i]); |
5641 |
+ } |
5642 |
++ mutex_init(&sbi->flush_lock); |
5643 |
+ |
5644 |
+ f2fs_init_extent_cache_info(sbi); |
5645 |
+ |
5646 |
+diff --git a/fs/io_uring.c b/fs/io_uring.c |
5647 |
+index 4ef62a45045d..6c09cedcf17d 100644 |
5648 |
+--- a/fs/io_uring.c |
5649 |
++++ b/fs/io_uring.c |
5650 |
+@@ -231,6 +231,7 @@ struct io_ring_ctx { |
5651 |
+ struct task_struct *sqo_thread; /* if using sq thread polling */ |
5652 |
+ struct mm_struct *sqo_mm; |
5653 |
+ wait_queue_head_t sqo_wait; |
5654 |
++ struct completion sqo_thread_started; |
5655 |
+ |
5656 |
+ struct { |
5657 |
+ /* CQ ring */ |
5658 |
+@@ -330,6 +331,9 @@ struct io_kiocb { |
5659 |
+ #define REQ_F_SEQ_PREV 8 /* sequential with previous */ |
5660 |
+ #define REQ_F_IO_DRAIN 16 /* drain existing IO first */ |
5661 |
+ #define REQ_F_IO_DRAINED 32 /* drain done */ |
5662 |
++#define REQ_F_LINK 64 /* linked sqes */ |
5663 |
++#define REQ_F_LINK_DONE 128 /* linked sqes done */ |
5664 |
++#define REQ_F_FAIL_LINK 256 /* fail rest of links */ |
5665 |
+ u64 user_data; |
5666 |
+ u32 error; /* iopoll result from callback */ |
5667 |
+ u32 sequence; |
5668 |
+@@ -403,6 +407,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) |
5669 |
+ ctx->flags = p->flags; |
5670 |
+ init_waitqueue_head(&ctx->cq_wait); |
5671 |
+ init_completion(&ctx->ctx_done); |
5672 |
++ init_completion(&ctx->sqo_thread_started); |
5673 |
+ mutex_init(&ctx->uring_lock); |
5674 |
+ init_waitqueue_head(&ctx->wait); |
5675 |
+ for (i = 0; i < ARRAY_SIZE(ctx->pending_async); i++) { |
5676 |
+@@ -423,7 +428,7 @@ static inline bool io_sequence_defer(struct io_ring_ctx *ctx, |
5677 |
+ if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN) |
5678 |
+ return false; |
5679 |
+ |
5680 |
+- return req->sequence > ctx->cached_cq_tail + ctx->sq_ring->dropped; |
5681 |
++ return req->sequence != ctx->cached_cq_tail + ctx->sq_ring->dropped; |
5682 |
+ } |
5683 |
+ |
5684 |
+ static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx) |
5685 |
+@@ -996,8 +1001,43 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw, |
5686 |
+ */ |
5687 |
+ offset = buf_addr - imu->ubuf; |
5688 |
+ iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len); |
5689 |
+- if (offset) |
5690 |
+- iov_iter_advance(iter, offset); |
5691 |
++ |
5692 |
++ if (offset) { |
5693 |
++ /* |
5694 |
++ * Don't use iov_iter_advance() here, as it's really slow for |
5695 |
++ * using the latter parts of a big fixed buffer - it iterates |
5696 |
++ * over each segment manually. We can cheat a bit here, because |
5697 |
++ * we know that: |
5698 |
++ * |
5699 |
++ * 1) it's a BVEC iter, we set it up |
5700 |
++ * 2) all bvecs are PAGE_SIZE in size, except potentially the |
5701 |
++ * first and last bvec |
5702 |
++ * |
5703 |
++ * So just find our index, and adjust the iterator afterwards. |
5704 |
++ * If the offset is within the first bvec (or the whole first |
5705 |
++ * bvec, just use iov_iter_advance(). This makes it easier |
5706 |
++ * since we can just skip the first segment, which may not |
5707 |
++ * be PAGE_SIZE aligned. |
5708 |
++ */ |
5709 |
++ const struct bio_vec *bvec = imu->bvec; |
5710 |
++ |
5711 |
++ if (offset <= bvec->bv_len) { |
5712 |
++ iov_iter_advance(iter, offset); |
5713 |
++ } else { |
5714 |
++ unsigned long seg_skip; |
5715 |
++ |
5716 |
++ /* skip first vec */ |
5717 |
++ offset -= bvec->bv_len; |
5718 |
++ seg_skip = 1 + (offset >> PAGE_SHIFT); |
5719 |
++ |
5720 |
++ iter->bvec = bvec + seg_skip; |
5721 |
++ iter->nr_segs -= seg_skip; |
5722 |
++ iter->count -= (seg_skip << PAGE_SHIFT); |
5723 |
++ iter->iov_offset = offset & ~PAGE_MASK; |
5724 |
++ if (iter->iov_offset) |
5725 |
++ iter->count -= iter->iov_offset; |
5726 |
++ } |
5727 |
++ } |
5728 |
+ |
5729 |
+ /* don't drop a reference to these pages */ |
5730 |
+ iter->type |= ITER_BVEC_FLAG_NO_REF; |
5731 |
+@@ -1487,6 +1527,8 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
5732 |
+ INIT_LIST_HEAD(&poll->wait.entry); |
5733 |
+ init_waitqueue_func_entry(&poll->wait, io_poll_wake); |
5734 |
+ |
5735 |
++ INIT_LIST_HEAD(&req->list); |
5736 |
++ |
5737 |
+ mask = vfs_poll(poll->file, &ipt.pt) & poll->events; |
5738 |
+ |
5739 |
+ spin_lock_irq(&ctx->completion_lock); |
5740 |
+@@ -1694,6 +1736,10 @@ restart: |
5741 |
+ /* async context always use a copy of the sqe */ |
5742 |
+ kfree(sqe); |
5743 |
+ |
5744 |
++ /* req from defer and link list needn't decrease async cnt */ |
5745 |
++ if (req->flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE)) |
5746 |
++ goto out; |
5747 |
++ |
5748 |
+ if (!async_list) |
5749 |
+ break; |
5750 |
+ if (!list_empty(&req_list)) { |
5751 |
+@@ -1741,6 +1787,7 @@ restart: |
5752 |
+ } |
5753 |
+ } |
5754 |
+ |
5755 |
++out: |
5756 |
+ if (cur_mm) { |
5757 |
+ set_fs(old_fs); |
5758 |
+ unuse_mm(cur_mm); |
5759 |
+@@ -1767,6 +1814,10 @@ static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req) |
5760 |
+ ret = true; |
5761 |
+ spin_lock(&list->lock); |
5762 |
+ list_add_tail(&req->list, &list->list); |
5763 |
++ /* |
5764 |
++ * Ensure we see a simultaneous modification from io_sq_wq_submit_work() |
5765 |
++ */ |
5766 |
++ smp_mb(); |
5767 |
+ if (!atomic_read(&list->cnt)) { |
5768 |
+ list_del_init(&req->list); |
5769 |
+ ret = false; |
5770 |
+@@ -2009,6 +2060,8 @@ static int io_sq_thread(void *data) |
5771 |
+ unsigned inflight; |
5772 |
+ unsigned long timeout; |
5773 |
+ |
5774 |
++ complete(&ctx->sqo_thread_started); |
5775 |
++ |
5776 |
+ old_fs = get_fs(); |
5777 |
+ set_fs(USER_DS); |
5778 |
+ |
5779 |
+@@ -2243,6 +2296,7 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx) |
5780 |
+ static void io_sq_thread_stop(struct io_ring_ctx *ctx) |
5781 |
+ { |
5782 |
+ if (ctx->sqo_thread) { |
5783 |
++ wait_for_completion(&ctx->sqo_thread_started); |
5784 |
+ /* |
5785 |
+ * The park is a bit of a work-around, without it we get |
5786 |
+ * warning spews on shutdown with SQPOLL set and affinity |
5787 |
+diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c |
5788 |
+index b428c295d13f..5778d1347b35 100644 |
5789 |
+--- a/fs/notify/fanotify/fanotify.c |
5790 |
++++ b/fs/notify/fanotify/fanotify.c |
5791 |
+@@ -288,10 +288,13 @@ struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group, |
5792 |
+ /* |
5793 |
+ * For queues with unlimited length lost events are not expected and |
5794 |
+ * can possibly have security implications. Avoid losing events when |
5795 |
+- * memory is short. |
5796 |
++ * memory is short. For the limited size queues, avoid OOM killer in the |
5797 |
++ * target monitoring memcg as it may have security repercussion. |
5798 |
+ */ |
5799 |
+ if (group->max_events == UINT_MAX) |
5800 |
+ gfp |= __GFP_NOFAIL; |
5801 |
++ else |
5802 |
++ gfp |= __GFP_RETRY_MAYFAIL; |
5803 |
+ |
5804 |
+ /* Whoever is interested in the event, pays for the allocation. */ |
5805 |
+ memalloc_use_memcg(group->memcg); |
5806 |
+diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c |
5807 |
+index 2fda08b2b885..d510223d302c 100644 |
5808 |
+--- a/fs/notify/inotify/inotify_fsnotify.c |
5809 |
++++ b/fs/notify/inotify/inotify_fsnotify.c |
5810 |
+@@ -90,9 +90,13 @@ int inotify_handle_event(struct fsnotify_group *group, |
5811 |
+ i_mark = container_of(inode_mark, struct inotify_inode_mark, |
5812 |
+ fsn_mark); |
5813 |
+ |
5814 |
+- /* Whoever is interested in the event, pays for the allocation. */ |
5815 |
++ /* |
5816 |
++ * Whoever is interested in the event, pays for the allocation. Do not |
5817 |
++ * trigger OOM killer in the target monitoring memcg as it may have |
5818 |
++ * security repercussion. |
5819 |
++ */ |
5820 |
+ memalloc_use_memcg(group->memcg); |
5821 |
+- event = kmalloc(alloc_len, GFP_KERNEL_ACCOUNT); |
5822 |
++ event = kmalloc(alloc_len, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); |
5823 |
+ memalloc_unuse_memcg(); |
5824 |
+ |
5825 |
+ if (unlikely(!event)) { |
5826 |
+diff --git a/fs/open.c b/fs/open.c |
5827 |
+index b5b80469b93d..a59abe3c669a 100644 |
5828 |
+--- a/fs/open.c |
5829 |
++++ b/fs/open.c |
5830 |
+@@ -374,6 +374,25 @@ long do_faccessat(int dfd, const char __user *filename, int mode) |
5831 |
+ override_cred->cap_permitted; |
5832 |
+ } |
5833 |
+ |
5834 |
++ /* |
5835 |
++ * The new set of credentials can *only* be used in |
5836 |
++ * task-synchronous circumstances, and does not need |
5837 |
++ * RCU freeing, unless somebody then takes a separate |
5838 |
++ * reference to it. |
5839 |
++ * |
5840 |
++ * NOTE! This is _only_ true because this credential |
5841 |
++ * is used purely for override_creds() that installs |
5842 |
++ * it as the subjective cred. Other threads will be |
5843 |
++ * accessing ->real_cred, not the subjective cred. |
5844 |
++ * |
5845 |
++ * If somebody _does_ make a copy of this (using the |
5846 |
++ * 'get_current_cred()' function), that will clear the |
5847 |
++ * non_rcu field, because now that other user may be |
5848 |
++ * expecting RCU freeing. But normal thread-synchronous |
5849 |
++ * cred accesses will keep things non-RCY. |
5850 |
++ */ |
5851 |
++ override_cred->non_rcu = 1; |
5852 |
++ |
5853 |
+ old_cred = override_creds(override_cred); |
5854 |
+ retry: |
5855 |
+ res = user_path_at(dfd, filename, lookup_flags, &path); |
5856 |
+diff --git a/fs/proc/base.c b/fs/proc/base.c |
5857 |
+index 255f6754c70d..03517154fe0f 100644 |
5858 |
+--- a/fs/proc/base.c |
5859 |
++++ b/fs/proc/base.c |
5860 |
+@@ -1962,9 +1962,12 @@ static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags) |
5861 |
+ goto out; |
5862 |
+ |
5863 |
+ if (!dname_to_vma_addr(dentry, &vm_start, &vm_end)) { |
5864 |
+- down_read(&mm->mmap_sem); |
5865 |
+- exact_vma_exists = !!find_exact_vma(mm, vm_start, vm_end); |
5866 |
+- up_read(&mm->mmap_sem); |
5867 |
++ status = down_read_killable(&mm->mmap_sem); |
5868 |
++ if (!status) { |
5869 |
++ exact_vma_exists = !!find_exact_vma(mm, vm_start, |
5870 |
++ vm_end); |
5871 |
++ up_read(&mm->mmap_sem); |
5872 |
++ } |
5873 |
+ } |
5874 |
+ |
5875 |
+ mmput(mm); |
5876 |
+@@ -2010,8 +2013,11 @@ static int map_files_get_link(struct dentry *dentry, struct path *path) |
5877 |
+ if (rc) |
5878 |
+ goto out_mmput; |
5879 |
+ |
5880 |
++ rc = down_read_killable(&mm->mmap_sem); |
5881 |
++ if (rc) |
5882 |
++ goto out_mmput; |
5883 |
++ |
5884 |
+ rc = -ENOENT; |
5885 |
+- down_read(&mm->mmap_sem); |
5886 |
+ vma = find_exact_vma(mm, vm_start, vm_end); |
5887 |
+ if (vma && vma->vm_file) { |
5888 |
+ *path = vma->vm_file->f_path; |
5889 |
+@@ -2107,7 +2113,11 @@ static struct dentry *proc_map_files_lookup(struct inode *dir, |
5890 |
+ if (!mm) |
5891 |
+ goto out_put_task; |
5892 |
+ |
5893 |
+- down_read(&mm->mmap_sem); |
5894 |
++ result = ERR_PTR(-EINTR); |
5895 |
++ if (down_read_killable(&mm->mmap_sem)) |
5896 |
++ goto out_put_mm; |
5897 |
++ |
5898 |
++ result = ERR_PTR(-ENOENT); |
5899 |
+ vma = find_exact_vma(mm, vm_start, vm_end); |
5900 |
+ if (!vma) |
5901 |
+ goto out_no_vma; |
5902 |
+@@ -2118,6 +2128,7 @@ static struct dentry *proc_map_files_lookup(struct inode *dir, |
5903 |
+ |
5904 |
+ out_no_vma: |
5905 |
+ up_read(&mm->mmap_sem); |
5906 |
++out_put_mm: |
5907 |
+ mmput(mm); |
5908 |
+ out_put_task: |
5909 |
+ put_task_struct(task); |
5910 |
+@@ -2160,7 +2171,12 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx) |
5911 |
+ mm = get_task_mm(task); |
5912 |
+ if (!mm) |
5913 |
+ goto out_put_task; |
5914 |
+- down_read(&mm->mmap_sem); |
5915 |
++ |
5916 |
++ ret = down_read_killable(&mm->mmap_sem); |
5917 |
++ if (ret) { |
5918 |
++ mmput(mm); |
5919 |
++ goto out_put_task; |
5920 |
++ } |
5921 |
+ |
5922 |
+ nr_files = 0; |
5923 |
+ |
5924 |
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c |
5925 |
+index 01d4eb0e6bd1..7f84d1477b5b 100644 |
5926 |
+--- a/fs/proc/task_mmu.c |
5927 |
++++ b/fs/proc/task_mmu.c |
5928 |
+@@ -166,7 +166,11 @@ static void *m_start(struct seq_file *m, loff_t *ppos) |
5929 |
+ if (!mm || !mmget_not_zero(mm)) |
5930 |
+ return NULL; |
5931 |
+ |
5932 |
+- down_read(&mm->mmap_sem); |
5933 |
++ if (down_read_killable(&mm->mmap_sem)) { |
5934 |
++ mmput(mm); |
5935 |
++ return ERR_PTR(-EINTR); |
5936 |
++ } |
5937 |
++ |
5938 |
+ hold_task_mempolicy(priv); |
5939 |
+ priv->tail_vma = get_gate_vma(mm); |
5940 |
+ |
5941 |
+@@ -828,7 +832,10 @@ static int show_smaps_rollup(struct seq_file *m, void *v) |
5942 |
+ |
5943 |
+ memset(&mss, 0, sizeof(mss)); |
5944 |
+ |
5945 |
+- down_read(&mm->mmap_sem); |
5946 |
++ ret = down_read_killable(&mm->mmap_sem); |
5947 |
++ if (ret) |
5948 |
++ goto out_put_mm; |
5949 |
++ |
5950 |
+ hold_task_mempolicy(priv); |
5951 |
+ |
5952 |
+ for (vma = priv->mm->mmap; vma; vma = vma->vm_next) { |
5953 |
+@@ -845,8 +852,9 @@ static int show_smaps_rollup(struct seq_file *m, void *v) |
5954 |
+ |
5955 |
+ release_task_mempolicy(priv); |
5956 |
+ up_read(&mm->mmap_sem); |
5957 |
+- mmput(mm); |
5958 |
+ |
5959 |
++out_put_mm: |
5960 |
++ mmput(mm); |
5961 |
+ out_put_task: |
5962 |
+ put_task_struct(priv->task); |
5963 |
+ priv->task = NULL; |
5964 |
+@@ -1132,7 +1140,10 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, |
5965 |
+ goto out_mm; |
5966 |
+ } |
5967 |
+ |
5968 |
+- down_read(&mm->mmap_sem); |
5969 |
++ if (down_read_killable(&mm->mmap_sem)) { |
5970 |
++ count = -EINTR; |
5971 |
++ goto out_mm; |
5972 |
++ } |
5973 |
+ tlb_gather_mmu(&tlb, mm, 0, -1); |
5974 |
+ if (type == CLEAR_REFS_SOFT_DIRTY) { |
5975 |
+ for (vma = mm->mmap; vma; vma = vma->vm_next) { |
5976 |
+@@ -1539,7 +1550,9 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, |
5977 |
+ /* overflow ? */ |
5978 |
+ if (end < start_vaddr || end > end_vaddr) |
5979 |
+ end = end_vaddr; |
5980 |
+- down_read(&mm->mmap_sem); |
5981 |
++ ret = down_read_killable(&mm->mmap_sem); |
5982 |
++ if (ret) |
5983 |
++ goto out_free; |
5984 |
+ ret = walk_page_range(start_vaddr, end, &pagemap_walk); |
5985 |
+ up_read(&mm->mmap_sem); |
5986 |
+ start_vaddr = end; |
5987 |
+diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c |
5988 |
+index 36bf0f2e102e..7907e6419e57 100644 |
5989 |
+--- a/fs/proc/task_nommu.c |
5990 |
++++ b/fs/proc/task_nommu.c |
5991 |
+@@ -211,7 +211,11 @@ static void *m_start(struct seq_file *m, loff_t *pos) |
5992 |
+ if (!mm || !mmget_not_zero(mm)) |
5993 |
+ return NULL; |
5994 |
+ |
5995 |
+- down_read(&mm->mmap_sem); |
5996 |
++ if (down_read_killable(&mm->mmap_sem)) { |
5997 |
++ mmput(mm); |
5998 |
++ return ERR_PTR(-EINTR); |
5999 |
++ } |
6000 |
++ |
6001 |
+ /* start from the Nth VMA */ |
6002 |
+ for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) |
6003 |
+ if (n-- == 0) |
6004 |
+diff --git a/include/linux/cred.h b/include/linux/cred.h |
6005 |
+index 7eb43a038330..f7a30e0099be 100644 |
6006 |
+--- a/include/linux/cred.h |
6007 |
++++ b/include/linux/cred.h |
6008 |
+@@ -145,7 +145,11 @@ struct cred { |
6009 |
+ struct user_struct *user; /* real user ID subscription */ |
6010 |
+ struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */ |
6011 |
+ struct group_info *group_info; /* supplementary groups for euid/fsgid */ |
6012 |
+- struct rcu_head rcu; /* RCU deletion hook */ |
6013 |
++ /* RCU deletion */ |
6014 |
++ union { |
6015 |
++ int non_rcu; /* Can we skip RCU deletion? */ |
6016 |
++ struct rcu_head rcu; /* RCU deletion hook */ |
6017 |
++ }; |
6018 |
+ } __randomize_layout; |
6019 |
+ |
6020 |
+ extern void __put_cred(struct cred *); |
6021 |
+@@ -246,6 +250,7 @@ static inline const struct cred *get_cred(const struct cred *cred) |
6022 |
+ if (!cred) |
6023 |
+ return cred; |
6024 |
+ validate_creds(cred); |
6025 |
++ nonconst_cred->non_rcu = 0; |
6026 |
+ return get_new_cred(nonconst_cred); |
6027 |
+ } |
6028 |
+ |
6029 |
+@@ -257,6 +262,7 @@ static inline const struct cred *get_cred_rcu(const struct cred *cred) |
6030 |
+ if (!atomic_inc_not_zero(&nonconst_cred->usage)) |
6031 |
+ return NULL; |
6032 |
+ validate_creds(cred); |
6033 |
++ nonconst_cred->non_rcu = 0; |
6034 |
+ return cred; |
6035 |
+ } |
6036 |
+ |
6037 |
+diff --git a/include/linux/device.h b/include/linux/device.h |
6038 |
+index 4a295e324ac5..b12c586fae28 100644 |
6039 |
+--- a/include/linux/device.h |
6040 |
++++ b/include/linux/device.h |
6041 |
+@@ -1375,6 +1375,7 @@ extern int (*platform_notify_remove)(struct device *dev); |
6042 |
+ */ |
6043 |
+ extern struct device *get_device(struct device *dev); |
6044 |
+ extern void put_device(struct device *dev); |
6045 |
++extern bool kill_device(struct device *dev); |
6046 |
+ |
6047 |
+ #ifdef CONFIG_DEVTMPFS |
6048 |
+ extern int devtmpfs_create_node(struct device *dev); |
6049 |
+diff --git a/include/linux/hmm.h b/include/linux/hmm.h |
6050 |
+index 044a36d7c3f8..89508dc0795f 100644 |
6051 |
+--- a/include/linux/hmm.h |
6052 |
++++ b/include/linux/hmm.h |
6053 |
+@@ -93,6 +93,7 @@ struct hmm { |
6054 |
+ struct mmu_notifier mmu_notifier; |
6055 |
+ struct rw_semaphore mirrors_sem; |
6056 |
+ wait_queue_head_t wq; |
6057 |
++ struct rcu_head rcu; |
6058 |
+ long notifiers; |
6059 |
+ bool dead; |
6060 |
+ }; |
6061 |
+diff --git a/include/linux/host1x.h b/include/linux/host1x.h |
6062 |
+index cfff30b9a62e..e6eea45e1154 100644 |
6063 |
+--- a/include/linux/host1x.h |
6064 |
++++ b/include/linux/host1x.h |
6065 |
+@@ -297,6 +297,8 @@ struct host1x_device { |
6066 |
+ struct list_head clients; |
6067 |
+ |
6068 |
+ bool registered; |
6069 |
++ |
6070 |
++ struct device_dma_parameters dma_parms; |
6071 |
+ }; |
6072 |
+ |
6073 |
+ static inline struct host1x_device *to_host1x_device(struct device *dev) |
6074 |
+diff --git a/include/linux/iova.h b/include/linux/iova.h |
6075 |
+index 781b96ac706f..a0637abffee8 100644 |
6076 |
+--- a/include/linux/iova.h |
6077 |
++++ b/include/linux/iova.h |
6078 |
+@@ -155,6 +155,7 @@ struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, |
6079 |
+ void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); |
6080 |
+ void init_iova_domain(struct iova_domain *iovad, unsigned long granule, |
6081 |
+ unsigned long start_pfn); |
6082 |
++bool has_iova_flush_queue(struct iova_domain *iovad); |
6083 |
+ int init_iova_flush_queue(struct iova_domain *iovad, |
6084 |
+ iova_flush_cb flush_cb, iova_entry_dtor entry_dtor); |
6085 |
+ struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); |
6086 |
+@@ -235,6 +236,11 @@ static inline void init_iova_domain(struct iova_domain *iovad, |
6087 |
+ { |
6088 |
+ } |
6089 |
+ |
6090 |
++static inline bool has_iova_flush_queue(struct iova_domain *iovad) |
6091 |
++{ |
6092 |
++ return false; |
6093 |
++} |
6094 |
++ |
6095 |
+ static inline int init_iova_flush_queue(struct iova_domain *iovad, |
6096 |
+ iova_flush_cb flush_cb, |
6097 |
+ iova_entry_dtor entry_dtor) |
6098 |
+diff --git a/include/linux/swap.h b/include/linux/swap.h |
6099 |
+index 4bfb5c4ac108..6358a6185634 100644 |
6100 |
+--- a/include/linux/swap.h |
6101 |
++++ b/include/linux/swap.h |
6102 |
+@@ -175,8 +175,9 @@ enum { |
6103 |
+ SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */ |
6104 |
+ SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */ |
6105 |
+ SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */ |
6106 |
++ SWP_VALID = (1 << 13), /* swap is valid to be operated on? */ |
6107 |
+ /* add others here before... */ |
6108 |
+- SWP_SCANNING = (1 << 13), /* refcount in scan_swap_map */ |
6109 |
++ SWP_SCANNING = (1 << 14), /* refcount in scan_swap_map */ |
6110 |
+ }; |
6111 |
+ |
6112 |
+ #define SWAP_CLUSTER_MAX 32UL |
6113 |
+@@ -460,7 +461,7 @@ extern unsigned int count_swap_pages(int, int); |
6114 |
+ extern sector_t map_swap_page(struct page *, struct block_device **); |
6115 |
+ extern sector_t swapdev_block(int, pgoff_t); |
6116 |
+ extern int page_swapcount(struct page *); |
6117 |
+-extern int __swap_count(struct swap_info_struct *si, swp_entry_t entry); |
6118 |
++extern int __swap_count(swp_entry_t entry); |
6119 |
+ extern int __swp_swapcount(swp_entry_t entry); |
6120 |
+ extern int swp_swapcount(swp_entry_t entry); |
6121 |
+ extern struct swap_info_struct *page_swap_info(struct page *); |
6122 |
+@@ -470,6 +471,12 @@ extern int try_to_free_swap(struct page *); |
6123 |
+ struct backing_dev_info; |
6124 |
+ extern int init_swap_address_space(unsigned int type, unsigned long nr_pages); |
6125 |
+ extern void exit_swap_address_space(unsigned int type); |
6126 |
++extern struct swap_info_struct *get_swap_device(swp_entry_t entry); |
6127 |
++ |
6128 |
++static inline void put_swap_device(struct swap_info_struct *si) |
6129 |
++{ |
6130 |
++ rcu_read_unlock(); |
6131 |
++} |
6132 |
+ |
6133 |
+ #else /* CONFIG_SWAP */ |
6134 |
+ |
6135 |
+@@ -576,7 +583,7 @@ static inline int page_swapcount(struct page *page) |
6136 |
+ return 0; |
6137 |
+ } |
6138 |
+ |
6139 |
+-static inline int __swap_count(struct swap_info_struct *si, swp_entry_t entry) |
6140 |
++static inline int __swap_count(swp_entry_t entry) |
6141 |
+ { |
6142 |
+ return 0; |
6143 |
+ } |
6144 |
+diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h |
6145 |
+index 1050a75fb7ef..dcd776e77442 100644 |
6146 |
+--- a/include/uapi/linux/videodev2.h |
6147 |
++++ b/include/uapi/linux/videodev2.h |
6148 |
+@@ -518,7 +518,13 @@ struct v4l2_pix_format { |
6149 |
+ #define V4L2_PIX_FMT_RGBX444 v4l2_fourcc('R', 'X', '1', '2') /* 16 rrrrgggg bbbbxxxx */ |
6150 |
+ #define V4L2_PIX_FMT_ABGR444 v4l2_fourcc('A', 'B', '1', '2') /* 16 aaaabbbb ggggrrrr */ |
6151 |
+ #define V4L2_PIX_FMT_XBGR444 v4l2_fourcc('X', 'B', '1', '2') /* 16 xxxxbbbb ggggrrrr */ |
6152 |
+-#define V4L2_PIX_FMT_BGRA444 v4l2_fourcc('B', 'A', '1', '2') /* 16 bbbbgggg rrrraaaa */ |
6153 |
++ |
6154 |
++/* |
6155 |
++ * Originally this had 'BA12' as fourcc, but this clashed with the older |
6156 |
++ * V4L2_PIX_FMT_SGRBG12 which inexplicably used that same fourcc. |
6157 |
++ * So use 'GA12' instead for V4L2_PIX_FMT_BGRA444. |
6158 |
++ */ |
6159 |
++#define V4L2_PIX_FMT_BGRA444 v4l2_fourcc('G', 'A', '1', '2') /* 16 bbbbgggg rrrraaaa */ |
6160 |
+ #define V4L2_PIX_FMT_BGRX444 v4l2_fourcc('B', 'X', '1', '2') /* 16 bbbbgggg rrrrxxxx */ |
6161 |
+ #define V4L2_PIX_FMT_RGB555 v4l2_fourcc('R', 'G', 'B', 'O') /* 16 RGB-5-5-5 */ |
6162 |
+ #define V4L2_PIX_FMT_ARGB555 v4l2_fourcc('A', 'R', '1', '5') /* 16 ARGB-1-5-5-5 */ |
6163 |
+diff --git a/kernel/cred.c b/kernel/cred.c |
6164 |
+index c73a87a4df13..153ae369e024 100644 |
6165 |
+--- a/kernel/cred.c |
6166 |
++++ b/kernel/cred.c |
6167 |
+@@ -144,7 +144,10 @@ void __put_cred(struct cred *cred) |
6168 |
+ BUG_ON(cred == current->cred); |
6169 |
+ BUG_ON(cred == current->real_cred); |
6170 |
+ |
6171 |
+- call_rcu(&cred->rcu, put_cred_rcu); |
6172 |
++ if (cred->non_rcu) |
6173 |
++ put_cred_rcu(&cred->rcu); |
6174 |
++ else |
6175 |
++ call_rcu(&cred->rcu, put_cred_rcu); |
6176 |
+ } |
6177 |
+ EXPORT_SYMBOL(__put_cred); |
6178 |
+ |
6179 |
+@@ -256,6 +259,7 @@ struct cred *prepare_creds(void) |
6180 |
+ old = task->cred; |
6181 |
+ memcpy(new, old, sizeof(struct cred)); |
6182 |
+ |
6183 |
++ new->non_rcu = 0; |
6184 |
+ atomic_set(&new->usage, 1); |
6185 |
+ set_cred_subscribers(new, 0); |
6186 |
+ get_group_info(new->group_info); |
6187 |
+@@ -535,7 +539,19 @@ const struct cred *override_creds(const struct cred *new) |
6188 |
+ |
6189 |
+ validate_creds(old); |
6190 |
+ validate_creds(new); |
6191 |
+- get_cred(new); |
6192 |
++ |
6193 |
++ /* |
6194 |
++ * NOTE! This uses 'get_new_cred()' rather than 'get_cred()'. |
6195 |
++ * |
6196 |
++ * That means that we do not clear the 'non_rcu' flag, since |
6197 |
++ * we are only installing the cred into the thread-synchronous |
6198 |
++ * '->cred' pointer, not the '->real_cred' pointer that is |
6199 |
++ * visible to other threads under RCU. |
6200 |
++ * |
6201 |
++ * Also note that we did validate_creds() manually, not depending |
6202 |
++ * on the validation in 'get_cred()'. |
6203 |
++ */ |
6204 |
++ get_new_cred((struct cred *)new); |
6205 |
+ alter_cred_subscribers(new, 1); |
6206 |
+ rcu_assign_pointer(current->cred, new); |
6207 |
+ alter_cred_subscribers(old, -1); |
6208 |
+@@ -672,6 +688,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon) |
6209 |
+ validate_creds(old); |
6210 |
+ |
6211 |
+ *new = *old; |
6212 |
++ new->non_rcu = 0; |
6213 |
+ atomic_set(&new->usage, 1); |
6214 |
+ set_cred_subscribers(new, 0); |
6215 |
+ get_uid(new->user); |
6216 |
+diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c |
6217 |
+index 7a723194ecbe..0207e3764d52 100644 |
6218 |
+--- a/kernel/dma/remap.c |
6219 |
++++ b/kernel/dma/remap.c |
6220 |
+@@ -158,6 +158,9 @@ out: |
6221 |
+ |
6222 |
+ bool dma_in_atomic_pool(void *start, size_t size) |
6223 |
+ { |
6224 |
++ if (unlikely(!atomic_pool)) |
6225 |
++ return false; |
6226 |
++ |
6227 |
+ return addr_in_gen_pool(atomic_pool, (unsigned long)start, size); |
6228 |
+ } |
6229 |
+ |
6230 |
+diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c |
6231 |
+index 9c49ec645d8b..bda006f8a88b 100644 |
6232 |
+--- a/kernel/locking/lockdep_proc.c |
6233 |
++++ b/kernel/locking/lockdep_proc.c |
6234 |
+@@ -200,7 +200,6 @@ static void lockdep_stats_debug_show(struct seq_file *m) |
6235 |
+ |
6236 |
+ static int lockdep_stats_show(struct seq_file *m, void *v) |
6237 |
+ { |
6238 |
+- struct lock_class *class; |
6239 |
+ unsigned long nr_unused = 0, nr_uncategorized = 0, |
6240 |
+ nr_irq_safe = 0, nr_irq_unsafe = 0, |
6241 |
+ nr_softirq_safe = 0, nr_softirq_unsafe = 0, |
6242 |
+@@ -210,6 +209,9 @@ static int lockdep_stats_show(struct seq_file *m, void *v) |
6243 |
+ nr_hardirq_read_safe = 0, nr_hardirq_read_unsafe = 0, |
6244 |
+ sum_forward_deps = 0; |
6245 |
+ |
6246 |
++#ifdef CONFIG_PROVE_LOCKING |
6247 |
++ struct lock_class *class; |
6248 |
++ |
6249 |
+ list_for_each_entry(class, &all_lock_classes, lock_entry) { |
6250 |
+ |
6251 |
+ if (class->usage_mask == 0) |
6252 |
+@@ -241,12 +243,12 @@ static int lockdep_stats_show(struct seq_file *m, void *v) |
6253 |
+ if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ) |
6254 |
+ nr_hardirq_read_unsafe++; |
6255 |
+ |
6256 |
+-#ifdef CONFIG_PROVE_LOCKING |
6257 |
+ sum_forward_deps += lockdep_count_forward_deps(class); |
6258 |
+-#endif |
6259 |
+ } |
6260 |
+ #ifdef CONFIG_DEBUG_LOCKDEP |
6261 |
+ DEBUG_LOCKS_WARN_ON(debug_atomic_read(nr_unused_locks) != nr_unused); |
6262 |
++#endif |
6263 |
++ |
6264 |
+ #endif |
6265 |
+ seq_printf(m, " lock-classes: %11lu [max: %lu]\n", |
6266 |
+ nr_lock_classes, MAX_LOCKDEP_KEYS); |
6267 |
+diff --git a/mm/gup.c b/mm/gup.c |
6268 |
+index ddde097cf9e4..d2c14fc4b5d4 100644 |
6269 |
+--- a/mm/gup.c |
6270 |
++++ b/mm/gup.c |
6271 |
+@@ -585,11 +585,14 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address, |
6272 |
+ pgd = pgd_offset_k(address); |
6273 |
+ else |
6274 |
+ pgd = pgd_offset_gate(mm, address); |
6275 |
+- BUG_ON(pgd_none(*pgd)); |
6276 |
++ if (pgd_none(*pgd)) |
6277 |
++ return -EFAULT; |
6278 |
+ p4d = p4d_offset(pgd, address); |
6279 |
+- BUG_ON(p4d_none(*p4d)); |
6280 |
++ if (p4d_none(*p4d)) |
6281 |
++ return -EFAULT; |
6282 |
+ pud = pud_offset(p4d, address); |
6283 |
+- BUG_ON(pud_none(*pud)); |
6284 |
++ if (pud_none(*pud)) |
6285 |
++ return -EFAULT; |
6286 |
+ pmd = pmd_offset(pud, address); |
6287 |
+ if (!pmd_present(*pmd)) |
6288 |
+ return -EFAULT; |
6289 |
+@@ -1696,7 +1699,8 @@ static inline pte_t gup_get_pte(pte_t *ptep) |
6290 |
+ } |
6291 |
+ #endif |
6292 |
+ |
6293 |
+-static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages) |
6294 |
++static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start, |
6295 |
++ struct page **pages) |
6296 |
+ { |
6297 |
+ while ((*nr) - nr_start) { |
6298 |
+ struct page *page = pages[--(*nr)]; |
6299 |
+diff --git a/mm/hmm.c b/mm/hmm.c |
6300 |
+index f702a3895d05..4c405dfbd2b3 100644 |
6301 |
+--- a/mm/hmm.c |
6302 |
++++ b/mm/hmm.c |
6303 |
+@@ -104,6 +104,11 @@ error: |
6304 |
+ return NULL; |
6305 |
+ } |
6306 |
+ |
6307 |
++static void hmm_free_rcu(struct rcu_head *rcu) |
6308 |
++{ |
6309 |
++ kfree(container_of(rcu, struct hmm, rcu)); |
6310 |
++} |
6311 |
++ |
6312 |
+ static void hmm_free(struct kref *kref) |
6313 |
+ { |
6314 |
+ struct hmm *hmm = container_of(kref, struct hmm, kref); |
6315 |
+@@ -116,7 +121,7 @@ static void hmm_free(struct kref *kref) |
6316 |
+ mm->hmm = NULL; |
6317 |
+ spin_unlock(&mm->page_table_lock); |
6318 |
+ |
6319 |
+- kfree(hmm); |
6320 |
++ mmu_notifier_call_srcu(&hmm->rcu, hmm_free_rcu); |
6321 |
+ } |
6322 |
+ |
6323 |
+ static inline void hmm_put(struct hmm *hmm) |
6324 |
+@@ -144,10 +149,14 @@ void hmm_mm_destroy(struct mm_struct *mm) |
6325 |
+ |
6326 |
+ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm) |
6327 |
+ { |
6328 |
+- struct hmm *hmm = mm_get_hmm(mm); |
6329 |
++ struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); |
6330 |
+ struct hmm_mirror *mirror; |
6331 |
+ struct hmm_range *range; |
6332 |
+ |
6333 |
++ /* Bail out if hmm is in the process of being freed */ |
6334 |
++ if (!kref_get_unless_zero(&hmm->kref)) |
6335 |
++ return; |
6336 |
++ |
6337 |
+ /* Report this HMM as dying. */ |
6338 |
+ hmm->dead = true; |
6339 |
+ |
6340 |
+@@ -185,13 +194,14 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm) |
6341 |
+ static int hmm_invalidate_range_start(struct mmu_notifier *mn, |
6342 |
+ const struct mmu_notifier_range *nrange) |
6343 |
+ { |
6344 |
+- struct hmm *hmm = mm_get_hmm(nrange->mm); |
6345 |
++ struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); |
6346 |
+ struct hmm_mirror *mirror; |
6347 |
+ struct hmm_update update; |
6348 |
+ struct hmm_range *range; |
6349 |
+ int ret = 0; |
6350 |
+ |
6351 |
+- VM_BUG_ON(!hmm); |
6352 |
++ if (!kref_get_unless_zero(&hmm->kref)) |
6353 |
++ return 0; |
6354 |
+ |
6355 |
+ update.start = nrange->start; |
6356 |
+ update.end = nrange->end; |
6357 |
+@@ -239,9 +249,10 @@ out: |
6358 |
+ static void hmm_invalidate_range_end(struct mmu_notifier *mn, |
6359 |
+ const struct mmu_notifier_range *nrange) |
6360 |
+ { |
6361 |
+- struct hmm *hmm = mm_get_hmm(nrange->mm); |
6362 |
++ struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); |
6363 |
+ |
6364 |
+- VM_BUG_ON(!hmm); |
6365 |
++ if (!kref_get_unless_zero(&hmm->kref)) |
6366 |
++ return; |
6367 |
+ |
6368 |
+ mutex_lock(&hmm->lock); |
6369 |
+ hmm->notifiers--; |
6370 |
+diff --git a/mm/kmemleak.c b/mm/kmemleak.c |
6371 |
+index 9dd581d11565..3e147ea83182 100644 |
6372 |
+--- a/mm/kmemleak.c |
6373 |
++++ b/mm/kmemleak.c |
6374 |
+@@ -575,7 +575,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, |
6375 |
+ if (in_irq()) { |
6376 |
+ object->pid = 0; |
6377 |
+ strncpy(object->comm, "hardirq", sizeof(object->comm)); |
6378 |
+- } else if (in_softirq()) { |
6379 |
++ } else if (in_serving_softirq()) { |
6380 |
+ object->pid = 0; |
6381 |
+ strncpy(object->comm, "softirq", sizeof(object->comm)); |
6382 |
+ } else { |
6383 |
+diff --git a/mm/memory.c b/mm/memory.c |
6384 |
+index ddf20bd0c317..b0efc69b2634 100644 |
6385 |
+--- a/mm/memory.c |
6386 |
++++ b/mm/memory.c |
6387 |
+@@ -2807,7 +2807,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) |
6388 |
+ struct swap_info_struct *si = swp_swap_info(entry); |
6389 |
+ |
6390 |
+ if (si->flags & SWP_SYNCHRONOUS_IO && |
6391 |
+- __swap_count(si, entry) == 1) { |
6392 |
++ __swap_count(entry) == 1) { |
6393 |
+ /* skip swapcache */ |
6394 |
+ page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, |
6395 |
+ vmf->address); |
6396 |
+@@ -4349,7 +4349,9 @@ int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, |
6397 |
+ void *old_buf = buf; |
6398 |
+ int write = gup_flags & FOLL_WRITE; |
6399 |
+ |
6400 |
+- down_read(&mm->mmap_sem); |
6401 |
++ if (down_read_killable(&mm->mmap_sem)) |
6402 |
++ return 0; |
6403 |
++ |
6404 |
+ /* ignore errors, just check how much was successfully transferred */ |
6405 |
+ while (len) { |
6406 |
+ int bytes, ret, offset; |
6407 |
+diff --git a/mm/mincore.c b/mm/mincore.c |
6408 |
+index c3f058bd0faf..4fe91d497436 100644 |
6409 |
+--- a/mm/mincore.c |
6410 |
++++ b/mm/mincore.c |
6411 |
+@@ -68,8 +68,16 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff) |
6412 |
+ */ |
6413 |
+ if (xa_is_value(page)) { |
6414 |
+ swp_entry_t swp = radix_to_swp_entry(page); |
6415 |
+- page = find_get_page(swap_address_space(swp), |
6416 |
+- swp_offset(swp)); |
6417 |
++ struct swap_info_struct *si; |
6418 |
++ |
6419 |
++ /* Prevent swap device to being swapoff under us */ |
6420 |
++ si = get_swap_device(swp); |
6421 |
++ if (si) { |
6422 |
++ page = find_get_page(swap_address_space(swp), |
6423 |
++ swp_offset(swp)); |
6424 |
++ put_swap_device(si); |
6425 |
++ } else |
6426 |
++ page = NULL; |
6427 |
+ } |
6428 |
+ } else |
6429 |
+ page = find_get_page(mapping, pgoff); |
6430 |
+diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c |
6431 |
+index 513b9607409d..b5670620aea0 100644 |
6432 |
+--- a/mm/mmu_notifier.c |
6433 |
++++ b/mm/mmu_notifier.c |
6434 |
+@@ -274,7 +274,7 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn, |
6435 |
+ * thanks to mm_take_all_locks(). |
6436 |
+ */ |
6437 |
+ spin_lock(&mm->mmu_notifier_mm->lock); |
6438 |
+- hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list); |
6439 |
++ hlist_add_head_rcu(&mn->hlist, &mm->mmu_notifier_mm->list); |
6440 |
+ spin_unlock(&mm->mmu_notifier_mm->lock); |
6441 |
+ |
6442 |
+ mm_drop_all_locks(mm); |
6443 |
+diff --git a/mm/nommu.c b/mm/nommu.c |
6444 |
+index d8c02fbe03b5..b2823519f8cd 100644 |
6445 |
+--- a/mm/nommu.c |
6446 |
++++ b/mm/nommu.c |
6447 |
+@@ -1792,7 +1792,8 @@ int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, |
6448 |
+ struct vm_area_struct *vma; |
6449 |
+ int write = gup_flags & FOLL_WRITE; |
6450 |
+ |
6451 |
+- down_read(&mm->mmap_sem); |
6452 |
++ if (down_read_killable(&mm->mmap_sem)) |
6453 |
++ return 0; |
6454 |
+ |
6455 |
+ /* the access must start within one of the target process's mappings */ |
6456 |
+ vma = find_vma(mm, addr); |
6457 |
+diff --git a/mm/swap.c b/mm/swap.c |
6458 |
+index 7ede3eddc12a..607c48229a1d 100644 |
6459 |
+--- a/mm/swap.c |
6460 |
++++ b/mm/swap.c |
6461 |
+@@ -740,15 +740,20 @@ void release_pages(struct page **pages, int nr) |
6462 |
+ if (is_huge_zero_page(page)) |
6463 |
+ continue; |
6464 |
+ |
6465 |
+- /* Device public page can not be huge page */ |
6466 |
+- if (is_device_public_page(page)) { |
6467 |
++ if (is_zone_device_page(page)) { |
6468 |
+ if (locked_pgdat) { |
6469 |
+ spin_unlock_irqrestore(&locked_pgdat->lru_lock, |
6470 |
+ flags); |
6471 |
+ locked_pgdat = NULL; |
6472 |
+ } |
6473 |
+- put_devmap_managed_page(page); |
6474 |
+- continue; |
6475 |
++ /* |
6476 |
++ * ZONE_DEVICE pages that return 'false' from |
6477 |
++ * put_devmap_managed_page() do not require special |
6478 |
++ * processing, and instead, expect a call to |
6479 |
++ * put_page_testzero(). |
6480 |
++ */ |
6481 |
++ if (put_devmap_managed_page(page)) |
6482 |
++ continue; |
6483 |
+ } |
6484 |
+ |
6485 |
+ page = compound_head(page); |
6486 |
+diff --git a/mm/swap_state.c b/mm/swap_state.c |
6487 |
+index 85245fdec8d9..61453f1faf72 100644 |
6488 |
+--- a/mm/swap_state.c |
6489 |
++++ b/mm/swap_state.c |
6490 |
+@@ -310,8 +310,13 @@ struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma, |
6491 |
+ unsigned long addr) |
6492 |
+ { |
6493 |
+ struct page *page; |
6494 |
++ struct swap_info_struct *si; |
6495 |
+ |
6496 |
++ si = get_swap_device(entry); |
6497 |
++ if (!si) |
6498 |
++ return NULL; |
6499 |
+ page = find_get_page(swap_address_space(entry), swp_offset(entry)); |
6500 |
++ put_swap_device(si); |
6501 |
+ |
6502 |
+ INC_CACHE_INFO(find_total); |
6503 |
+ if (page) { |
6504 |
+@@ -354,8 +359,8 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, |
6505 |
+ struct vm_area_struct *vma, unsigned long addr, |
6506 |
+ bool *new_page_allocated) |
6507 |
+ { |
6508 |
+- struct page *found_page, *new_page = NULL; |
6509 |
+- struct address_space *swapper_space = swap_address_space(entry); |
6510 |
++ struct page *found_page = NULL, *new_page = NULL; |
6511 |
++ struct swap_info_struct *si; |
6512 |
+ int err; |
6513 |
+ *new_page_allocated = false; |
6514 |
+ |
6515 |
+@@ -365,7 +370,12 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, |
6516 |
+ * called after lookup_swap_cache() failed, re-calling |
6517 |
+ * that would confuse statistics. |
6518 |
+ */ |
6519 |
+- found_page = find_get_page(swapper_space, swp_offset(entry)); |
6520 |
++ si = get_swap_device(entry); |
6521 |
++ if (!si) |
6522 |
++ break; |
6523 |
++ found_page = find_get_page(swap_address_space(entry), |
6524 |
++ swp_offset(entry)); |
6525 |
++ put_swap_device(si); |
6526 |
+ if (found_page) |
6527 |
+ break; |
6528 |
+ |
6529 |
+diff --git a/mm/swapfile.c b/mm/swapfile.c |
6530 |
+index 596ac98051c5..dbab16ddefa6 100644 |
6531 |
+--- a/mm/swapfile.c |
6532 |
++++ b/mm/swapfile.c |
6533 |
+@@ -1079,12 +1079,11 @@ fail: |
6534 |
+ static struct swap_info_struct *__swap_info_get(swp_entry_t entry) |
6535 |
+ { |
6536 |
+ struct swap_info_struct *p; |
6537 |
+- unsigned long offset, type; |
6538 |
++ unsigned long offset; |
6539 |
+ |
6540 |
+ if (!entry.val) |
6541 |
+ goto out; |
6542 |
+- type = swp_type(entry); |
6543 |
+- p = swap_type_to_swap_info(type); |
6544 |
++ p = swp_swap_info(entry); |
6545 |
+ if (!p) |
6546 |
+ goto bad_nofile; |
6547 |
+ if (!(p->flags & SWP_USED)) |
6548 |
+@@ -1187,6 +1186,69 @@ static unsigned char __swap_entry_free_locked(struct swap_info_struct *p, |
6549 |
+ return usage; |
6550 |
+ } |
6551 |
+ |
6552 |
++/* |
6553 |
++ * Check whether swap entry is valid in the swap device. If so, |
6554 |
++ * return pointer to swap_info_struct, and keep the swap entry valid |
6555 |
++ * via preventing the swap device from being swapoff, until |
6556 |
++ * put_swap_device() is called. Otherwise return NULL. |
6557 |
++ * |
6558 |
++ * The entirety of the RCU read critical section must come before the |
6559 |
++ * return from or after the call to synchronize_rcu() in |
6560 |
++ * enable_swap_info() or swapoff(). So if "si->flags & SWP_VALID" is |
6561 |
++ * true, the si->map, si->cluster_info, etc. must be valid in the |
6562 |
++ * critical section. |
6563 |
++ * |
6564 |
++ * Notice that swapoff or swapoff+swapon can still happen before the |
6565 |
++ * rcu_read_lock() in get_swap_device() or after the rcu_read_unlock() |
6566 |
++ * in put_swap_device() if there isn't any other way to prevent |
6567 |
++ * swapoff, such as page lock, page table lock, etc. The caller must |
6568 |
++ * be prepared for that. For example, the following situation is |
6569 |
++ * possible. |
6570 |
++ * |
6571 |
++ * CPU1 CPU2 |
6572 |
++ * do_swap_page() |
6573 |
++ * ... swapoff+swapon |
6574 |
++ * __read_swap_cache_async() |
6575 |
++ * swapcache_prepare() |
6576 |
++ * __swap_duplicate() |
6577 |
++ * // check swap_map |
6578 |
++ * // verify PTE not changed |
6579 |
++ * |
6580 |
++ * In __swap_duplicate(), the swap_map need to be checked before |
6581 |
++ * changing partly because the specified swap entry may be for another |
6582 |
++ * swap device which has been swapoff. And in do_swap_page(), after |
6583 |
++ * the page is read from the swap device, the PTE is verified not |
6584 |
++ * changed with the page table locked to check whether the swap device |
6585 |
++ * has been swapoff or swapoff+swapon. |
6586 |
++ */ |
6587 |
++struct swap_info_struct *get_swap_device(swp_entry_t entry) |
6588 |
++{ |
6589 |
++ struct swap_info_struct *si; |
6590 |
++ unsigned long offset; |
6591 |
++ |
6592 |
++ if (!entry.val) |
6593 |
++ goto out; |
6594 |
++ si = swp_swap_info(entry); |
6595 |
++ if (!si) |
6596 |
++ goto bad_nofile; |
6597 |
++ |
6598 |
++ rcu_read_lock(); |
6599 |
++ if (!(si->flags & SWP_VALID)) |
6600 |
++ goto unlock_out; |
6601 |
++ offset = swp_offset(entry); |
6602 |
++ if (offset >= si->max) |
6603 |
++ goto unlock_out; |
6604 |
++ |
6605 |
++ return si; |
6606 |
++bad_nofile: |
6607 |
++ pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val); |
6608 |
++out: |
6609 |
++ return NULL; |
6610 |
++unlock_out: |
6611 |
++ rcu_read_unlock(); |
6612 |
++ return NULL; |
6613 |
++} |
6614 |
++ |
6615 |
+ static unsigned char __swap_entry_free(struct swap_info_struct *p, |
6616 |
+ swp_entry_t entry, unsigned char usage) |
6617 |
+ { |
6618 |
+@@ -1358,11 +1420,18 @@ int page_swapcount(struct page *page) |
6619 |
+ return count; |
6620 |
+ } |
6621 |
+ |
6622 |
+-int __swap_count(struct swap_info_struct *si, swp_entry_t entry) |
6623 |
++int __swap_count(swp_entry_t entry) |
6624 |
+ { |
6625 |
++ struct swap_info_struct *si; |
6626 |
+ pgoff_t offset = swp_offset(entry); |
6627 |
++ int count = 0; |
6628 |
+ |
6629 |
+- return swap_count(si->swap_map[offset]); |
6630 |
++ si = get_swap_device(entry); |
6631 |
++ if (si) { |
6632 |
++ count = swap_count(si->swap_map[offset]); |
6633 |
++ put_swap_device(si); |
6634 |
++ } |
6635 |
++ return count; |
6636 |
+ } |
6637 |
+ |
6638 |
+ static int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry) |
6639 |
+@@ -1387,9 +1456,11 @@ int __swp_swapcount(swp_entry_t entry) |
6640 |
+ int count = 0; |
6641 |
+ struct swap_info_struct *si; |
6642 |
+ |
6643 |
+- si = __swap_info_get(entry); |
6644 |
+- if (si) |
6645 |
++ si = get_swap_device(entry); |
6646 |
++ if (si) { |
6647 |
+ count = swap_swapcount(si, entry); |
6648 |
++ put_swap_device(si); |
6649 |
++ } |
6650 |
+ return count; |
6651 |
+ } |
6652 |
+ |
6653 |
+@@ -2335,9 +2406,9 @@ static int swap_node(struct swap_info_struct *p) |
6654 |
+ return bdev ? bdev->bd_disk->node_id : NUMA_NO_NODE; |
6655 |
+ } |
6656 |
+ |
6657 |
+-static void _enable_swap_info(struct swap_info_struct *p, int prio, |
6658 |
+- unsigned char *swap_map, |
6659 |
+- struct swap_cluster_info *cluster_info) |
6660 |
++static void setup_swap_info(struct swap_info_struct *p, int prio, |
6661 |
++ unsigned char *swap_map, |
6662 |
++ struct swap_cluster_info *cluster_info) |
6663 |
+ { |
6664 |
+ int i; |
6665 |
+ |
6666 |
+@@ -2362,7 +2433,11 @@ static void _enable_swap_info(struct swap_info_struct *p, int prio, |
6667 |
+ } |
6668 |
+ p->swap_map = swap_map; |
6669 |
+ p->cluster_info = cluster_info; |
6670 |
+- p->flags |= SWP_WRITEOK; |
6671 |
++} |
6672 |
++ |
6673 |
++static void _enable_swap_info(struct swap_info_struct *p) |
6674 |
++{ |
6675 |
++ p->flags |= SWP_WRITEOK | SWP_VALID; |
6676 |
+ atomic_long_add(p->pages, &nr_swap_pages); |
6677 |
+ total_swap_pages += p->pages; |
6678 |
+ |
6679 |
+@@ -2389,7 +2464,17 @@ static void enable_swap_info(struct swap_info_struct *p, int prio, |
6680 |
+ frontswap_init(p->type, frontswap_map); |
6681 |
+ spin_lock(&swap_lock); |
6682 |
+ spin_lock(&p->lock); |
6683 |
+- _enable_swap_info(p, prio, swap_map, cluster_info); |
6684 |
++ setup_swap_info(p, prio, swap_map, cluster_info); |
6685 |
++ spin_unlock(&p->lock); |
6686 |
++ spin_unlock(&swap_lock); |
6687 |
++ /* |
6688 |
++ * Guarantee swap_map, cluster_info, etc. fields are valid |
6689 |
++ * between get/put_swap_device() if SWP_VALID bit is set |
6690 |
++ */ |
6691 |
++ synchronize_rcu(); |
6692 |
++ spin_lock(&swap_lock); |
6693 |
++ spin_lock(&p->lock); |
6694 |
++ _enable_swap_info(p); |
6695 |
+ spin_unlock(&p->lock); |
6696 |
+ spin_unlock(&swap_lock); |
6697 |
+ } |
6698 |
+@@ -2398,7 +2483,8 @@ static void reinsert_swap_info(struct swap_info_struct *p) |
6699 |
+ { |
6700 |
+ spin_lock(&swap_lock); |
6701 |
+ spin_lock(&p->lock); |
6702 |
+- _enable_swap_info(p, p->prio, p->swap_map, p->cluster_info); |
6703 |
++ setup_swap_info(p, p->prio, p->swap_map, p->cluster_info); |
6704 |
++ _enable_swap_info(p); |
6705 |
+ spin_unlock(&p->lock); |
6706 |
+ spin_unlock(&swap_lock); |
6707 |
+ } |
6708 |
+@@ -2501,6 +2587,17 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) |
6709 |
+ |
6710 |
+ reenable_swap_slots_cache_unlock(); |
6711 |
+ |
6712 |
++ spin_lock(&swap_lock); |
6713 |
++ spin_lock(&p->lock); |
6714 |
++ p->flags &= ~SWP_VALID; /* mark swap device as invalid */ |
6715 |
++ spin_unlock(&p->lock); |
6716 |
++ spin_unlock(&swap_lock); |
6717 |
++ /* |
6718 |
++ * wait for swap operations protected by get/put_swap_device() |
6719 |
++ * to complete |
6720 |
++ */ |
6721 |
++ synchronize_rcu(); |
6722 |
++ |
6723 |
+ flush_work(&p->discard_work); |
6724 |
+ |
6725 |
+ destroy_swap_extents(p); |
6726 |
+@@ -3265,17 +3362,11 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage) |
6727 |
+ unsigned char has_cache; |
6728 |
+ int err = -EINVAL; |
6729 |
+ |
6730 |
+- if (non_swap_entry(entry)) |
6731 |
+- goto out; |
6732 |
+- |
6733 |
+- p = swp_swap_info(entry); |
6734 |
++ p = get_swap_device(entry); |
6735 |
+ if (!p) |
6736 |
+- goto bad_file; |
6737 |
+- |
6738 |
+- offset = swp_offset(entry); |
6739 |
+- if (unlikely(offset >= p->max)) |
6740 |
+ goto out; |
6741 |
+ |
6742 |
++ offset = swp_offset(entry); |
6743 |
+ ci = lock_cluster_or_swap_info(p, offset); |
6744 |
+ |
6745 |
+ count = p->swap_map[offset]; |
6746 |
+@@ -3321,11 +3412,9 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage) |
6747 |
+ unlock_out: |
6748 |
+ unlock_cluster_or_swap_info(p, ci); |
6749 |
+ out: |
6750 |
++ if (p) |
6751 |
++ put_swap_device(p); |
6752 |
+ return err; |
6753 |
+- |
6754 |
+-bad_file: |
6755 |
+- pr_err("swap_dup: %s%08lx\n", Bad_file, entry.val); |
6756 |
+- goto out; |
6757 |
+ } |
6758 |
+ |
6759 |
+ /* |
6760 |
+@@ -3417,6 +3506,7 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask) |
6761 |
+ struct page *list_page; |
6762 |
+ pgoff_t offset; |
6763 |
+ unsigned char count; |
6764 |
++ int ret = 0; |
6765 |
+ |
6766 |
+ /* |
6767 |
+ * When debugging, it's easier to use __GFP_ZERO here; but it's better |
6768 |
+@@ -3424,15 +3514,15 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask) |
6769 |
+ */ |
6770 |
+ page = alloc_page(gfp_mask | __GFP_HIGHMEM); |
6771 |
+ |
6772 |
+- si = swap_info_get(entry); |
6773 |
++ si = get_swap_device(entry); |
6774 |
+ if (!si) { |
6775 |
+ /* |
6776 |
+ * An acceptable race has occurred since the failing |
6777 |
+- * __swap_duplicate(): the swap entry has been freed, |
6778 |
+- * perhaps even the whole swap_map cleared for swapoff. |
6779 |
++ * __swap_duplicate(): the swap device may be swapoff |
6780 |
+ */ |
6781 |
+ goto outer; |
6782 |
+ } |
6783 |
++ spin_lock(&si->lock); |
6784 |
+ |
6785 |
+ offset = swp_offset(entry); |
6786 |
+ |
6787 |
+@@ -3450,9 +3540,8 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask) |
6788 |
+ } |
6789 |
+ |
6790 |
+ if (!page) { |
6791 |
+- unlock_cluster(ci); |
6792 |
+- spin_unlock(&si->lock); |
6793 |
+- return -ENOMEM; |
6794 |
++ ret = -ENOMEM; |
6795 |
++ goto out; |
6796 |
+ } |
6797 |
+ |
6798 |
+ /* |
6799 |
+@@ -3504,10 +3593,11 @@ out_unlock_cont: |
6800 |
+ out: |
6801 |
+ unlock_cluster(ci); |
6802 |
+ spin_unlock(&si->lock); |
6803 |
++ put_swap_device(si); |
6804 |
+ outer: |
6805 |
+ if (page) |
6806 |
+ __free_page(page); |
6807 |
+- return 0; |
6808 |
++ return ret; |
6809 |
+ } |
6810 |
+ |
6811 |
+ /* |
6812 |
+diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c |
6813 |
+index 46bce8389066..9db455d02255 100644 |
6814 |
+--- a/net/rds/rdma_transport.c |
6815 |
++++ b/net/rds/rdma_transport.c |
6816 |
+@@ -112,7 +112,9 @@ static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id, |
6817 |
+ if (!conn) |
6818 |
+ break; |
6819 |
+ err = (int *)rdma_consumer_reject_data(cm_id, event, &len); |
6820 |
+- if (!err || (err && ((*err) == RDS_RDMA_REJ_INCOMPAT))) { |
6821 |
++ if (!err || |
6822 |
++ (err && len >= sizeof(*err) && |
6823 |
++ ((*err) <= RDS_RDMA_REJ_INCOMPAT))) { |
6824 |
+ pr_warn("RDS/RDMA: conn <%pI6c, %pI6c> rejected, dropping connection\n", |
6825 |
+ &conn->c_laddr, &conn->c_faddr); |
6826 |
+ conn->c_proposed_version = RDS_PROTOCOL_COMPAT_VERSION; |
6827 |
+@@ -122,7 +124,6 @@ static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id, |
6828 |
+ rdsdebug("Connection rejected: %s\n", |
6829 |
+ rdma_reject_msg(cm_id, event->status)); |
6830 |
+ break; |
6831 |
+- /* FALLTHROUGH */ |
6832 |
+ case RDMA_CM_EVENT_ADDR_ERROR: |
6833 |
+ case RDMA_CM_EVENT_ROUTE_ERROR: |
6834 |
+ case RDMA_CM_EVENT_CONNECT_ERROR: |
6835 |
+diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c |
6836 |
+index facbd603adf6..9ba47b0a47b9 100644 |
6837 |
+--- a/scripts/basic/fixdep.c |
6838 |
++++ b/scripts/basic/fixdep.c |
6839 |
+@@ -99,6 +99,7 @@ |
6840 |
+ #include <unistd.h> |
6841 |
+ #include <fcntl.h> |
6842 |
+ #include <string.h> |
6843 |
++#include <stdarg.h> |
6844 |
+ #include <stdlib.h> |
6845 |
+ #include <stdio.h> |
6846 |
+ #include <ctype.h> |
6847 |
+@@ -109,6 +110,36 @@ static void usage(void) |
6848 |
+ exit(1); |
6849 |
+ } |
6850 |
+ |
6851 |
++/* |
6852 |
++ * In the intended usage of this program, the stdout is redirected to .*.cmd |
6853 |
++ * files. The return value of printf() and putchar() must be checked to catch |
6854 |
++ * any error, e.g. "No space left on device". |
6855 |
++ */ |
6856 |
++static void xprintf(const char *format, ...) |
6857 |
++{ |
6858 |
++ va_list ap; |
6859 |
++ int ret; |
6860 |
++ |
6861 |
++ va_start(ap, format); |
6862 |
++ ret = vprintf(format, ap); |
6863 |
++ if (ret < 0) { |
6864 |
++ perror("fixdep"); |
6865 |
++ exit(1); |
6866 |
++ } |
6867 |
++ va_end(ap); |
6868 |
++} |
6869 |
++ |
6870 |
++static void xputchar(int c) |
6871 |
++{ |
6872 |
++ int ret; |
6873 |
++ |
6874 |
++ ret = putchar(c); |
6875 |
++ if (ret == EOF) { |
6876 |
++ perror("fixdep"); |
6877 |
++ exit(1); |
6878 |
++ } |
6879 |
++} |
6880 |
++ |
6881 |
+ /* |
6882 |
+ * Print out a dependency path from a symbol name |
6883 |
+ */ |
6884 |
+@@ -116,7 +147,7 @@ static void print_dep(const char *m, int slen, const char *dir) |
6885 |
+ { |
6886 |
+ int c, prev_c = '/', i; |
6887 |
+ |
6888 |
+- printf(" $(wildcard %s/", dir); |
6889 |
++ xprintf(" $(wildcard %s/", dir); |
6890 |
+ for (i = 0; i < slen; i++) { |
6891 |
+ c = m[i]; |
6892 |
+ if (c == '_') |
6893 |
+@@ -124,10 +155,10 @@ static void print_dep(const char *m, int slen, const char *dir) |
6894 |
+ else |
6895 |
+ c = tolower(c); |
6896 |
+ if (c != '/' || prev_c != '/') |
6897 |
+- putchar(c); |
6898 |
++ xputchar(c); |
6899 |
+ prev_c = c; |
6900 |
+ } |
6901 |
+- printf(".h) \\\n"); |
6902 |
++ xprintf(".h) \\\n"); |
6903 |
+ } |
6904 |
+ |
6905 |
+ struct item { |
6906 |
+@@ -324,13 +355,13 @@ static void parse_dep_file(char *m, const char *target) |
6907 |
+ */ |
6908 |
+ if (!saw_any_target) { |
6909 |
+ saw_any_target = 1; |
6910 |
+- printf("source_%s := %s\n\n", |
6911 |
+- target, m); |
6912 |
+- printf("deps_%s := \\\n", target); |
6913 |
++ xprintf("source_%s := %s\n\n", |
6914 |
++ target, m); |
6915 |
++ xprintf("deps_%s := \\\n", target); |
6916 |
+ } |
6917 |
+ is_first_dep = 0; |
6918 |
+ } else { |
6919 |
+- printf(" %s \\\n", m); |
6920 |
++ xprintf(" %s \\\n", m); |
6921 |
+ } |
6922 |
+ |
6923 |
+ buf = read_file(m); |
6924 |
+@@ -353,8 +384,8 @@ static void parse_dep_file(char *m, const char *target) |
6925 |
+ exit(1); |
6926 |
+ } |
6927 |
+ |
6928 |
+- printf("\n%s: $(deps_%s)\n\n", target, target); |
6929 |
+- printf("$(deps_%s):\n", target); |
6930 |
++ xprintf("\n%s: $(deps_%s)\n\n", target, target); |
6931 |
++ xprintf("$(deps_%s):\n", target); |
6932 |
+ } |
6933 |
+ |
6934 |
+ int main(int argc, char *argv[]) |
6935 |
+@@ -369,7 +400,7 @@ int main(int argc, char *argv[]) |
6936 |
+ target = argv[2]; |
6937 |
+ cmdline = argv[3]; |
6938 |
+ |
6939 |
+- printf("cmd_%s := %s\n\n", target, cmdline); |
6940 |
++ xprintf("cmd_%s := %s\n\n", target, cmdline); |
6941 |
+ |
6942 |
+ buf = read_file(depfile); |
6943 |
+ parse_dep_file(buf, target); |
6944 |
+diff --git a/scripts/genksyms/keywords.c b/scripts/genksyms/keywords.c |
6945 |
+index e93336baaaed..c586d32dd2c3 100644 |
6946 |
+--- a/scripts/genksyms/keywords.c |
6947 |
++++ b/scripts/genksyms/keywords.c |
6948 |
+@@ -25,6 +25,10 @@ static struct resword { |
6949 |
+ { "__volatile__", VOLATILE_KEYW }, |
6950 |
+ { "__builtin_va_list", VA_LIST_KEYW }, |
6951 |
+ |
6952 |
++ { "__int128", BUILTIN_INT_KEYW }, |
6953 |
++ { "__int128_t", BUILTIN_INT_KEYW }, |
6954 |
++ { "__uint128_t", BUILTIN_INT_KEYW }, |
6955 |
++ |
6956 |
+ // According to rth, c99 defines "_Bool", __restrict", __restrict__", "restrict". KAO |
6957 |
+ { "_Bool", BOOL_KEYW }, |
6958 |
+ { "_restrict", RESTRICT_KEYW }, |
6959 |
+diff --git a/scripts/genksyms/parse.y b/scripts/genksyms/parse.y |
6960 |
+index 00a6d7e54971..1ebcf52cd0f9 100644 |
6961 |
+--- a/scripts/genksyms/parse.y |
6962 |
++++ b/scripts/genksyms/parse.y |
6963 |
+@@ -76,6 +76,7 @@ static void record_compound(struct string_list **keyw, |
6964 |
+ %token ATTRIBUTE_KEYW |
6965 |
+ %token AUTO_KEYW |
6966 |
+ %token BOOL_KEYW |
6967 |
++%token BUILTIN_INT_KEYW |
6968 |
+ %token CHAR_KEYW |
6969 |
+ %token CONST_KEYW |
6970 |
+ %token DOUBLE_KEYW |
6971 |
+@@ -263,6 +264,7 @@ simple_type_specifier: |
6972 |
+ | VOID_KEYW |
6973 |
+ | BOOL_KEYW |
6974 |
+ | VA_LIST_KEYW |
6975 |
++ | BUILTIN_INT_KEYW |
6976 |
+ | TYPE { (*$1)->tag = SYM_TYPEDEF; $$ = $1; } |
6977 |
+ ; |
6978 |
+ |
6979 |
+diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c |
6980 |
+index e17837f1d3f2..ae6504d07fd6 100644 |
6981 |
+--- a/scripts/kallsyms.c |
6982 |
++++ b/scripts/kallsyms.c |
6983 |
+@@ -150,6 +150,9 @@ static int read_symbol(FILE *in, struct sym_entry *s) |
6984 |
+ /* exclude debugging symbols */ |
6985 |
+ else if (stype == 'N' || stype == 'n') |
6986 |
+ return -1; |
6987 |
++ /* exclude s390 kasan local symbols */ |
6988 |
++ else if (!strncmp(sym, ".LASANPC", 8)) |
6989 |
++ return -1; |
6990 |
+ |
6991 |
+ /* include the type field in the symbol name, so that it gets |
6992 |
+ * compressed together */ |
6993 |
+diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h |
6994 |
+index 13c5e6c8829c..47fca2c69a73 100644 |
6995 |
+--- a/scripts/recordmcount.h |
6996 |
++++ b/scripts/recordmcount.h |
6997 |
+@@ -325,7 +325,8 @@ static uint_t *sift_rel_mcount(uint_t *mlocp, |
6998 |
+ if (!mcountsym) |
6999 |
+ mcountsym = get_mcountsym(sym0, relp, str0); |
7000 |
+ |
7001 |
+- if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) { |
7002 |
++ if (mcountsym && mcountsym == Elf_r_sym(relp) && |
7003 |
++ !is_fake_mcount(relp)) { |
7004 |
+ uint_t const addend = |
7005 |
+ _w(_w(relp->r_offset) - recval + mcount_adjust); |
7006 |
+ mrelp->r_offset = _w(offbase |
7007 |
+diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening |
7008 |
+index c6cb2d9b2905..107176069af3 100644 |
7009 |
+--- a/security/Kconfig.hardening |
7010 |
++++ b/security/Kconfig.hardening |
7011 |
+@@ -61,6 +61,7 @@ choice |
7012 |
+ config GCC_PLUGIN_STRUCTLEAK_BYREF |
7013 |
+ bool "zero-init structs passed by reference (strong)" |
7014 |
+ depends on GCC_PLUGINS |
7015 |
++ depends on !(KASAN && KASAN_STACK=1) |
7016 |
+ select GCC_PLUGIN_STRUCTLEAK |
7017 |
+ help |
7018 |
+ Zero-initialize any structures on the stack that may |
7019 |
+@@ -70,9 +71,15 @@ choice |
7020 |
+ exposures, like CVE-2017-1000410: |
7021 |
+ https://git.kernel.org/linus/06e7e776ca4d3654 |
7022 |
+ |
7023 |
++ As a side-effect, this keeps a lot of variables on the |
7024 |
++ stack that can otherwise be optimized out, so combining |
7025 |
++ this with CONFIG_KASAN_STACK can lead to a stack overflow |
7026 |
++ and is disallowed. |
7027 |
++ |
7028 |
+ config GCC_PLUGIN_STRUCTLEAK_BYREF_ALL |
7029 |
+ bool "zero-init anything passed by reference (very strong)" |
7030 |
+ depends on GCC_PLUGINS |
7031 |
++ depends on !(KASAN && KASAN_STACK=1) |
7032 |
+ select GCC_PLUGIN_STRUCTLEAK |
7033 |
+ help |
7034 |
+ Zero-initialize any stack variables that may be passed |
7035 |
+diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c |
7036 |
+index e63a90ff2728..1f0a6eaa2d6a 100644 |
7037 |
+--- a/security/selinux/ss/sidtab.c |
7038 |
++++ b/security/selinux/ss/sidtab.c |
7039 |
+@@ -286,6 +286,11 @@ static int sidtab_reverse_lookup(struct sidtab *s, struct context *context, |
7040 |
+ ++count; |
7041 |
+ } |
7042 |
+ |
7043 |
++ /* bail out if we already reached max entries */ |
7044 |
++ rc = -EOVERFLOW; |
7045 |
++ if (count >= SIDTAB_MAX) |
7046 |
++ goto out_unlock; |
7047 |
++ |
7048 |
+ /* insert context into new entry */ |
7049 |
+ rc = -ENOMEM; |
7050 |
+ dst = sidtab_do_lookup(s, count, 1); |
7051 |
+diff --git a/sound/ac97/bus.c b/sound/ac97/bus.c |
7052 |
+index 7b977b753a03..7985dd8198b6 100644 |
7053 |
+--- a/sound/ac97/bus.c |
7054 |
++++ b/sound/ac97/bus.c |
7055 |
+@@ -122,17 +122,12 @@ static int ac97_codec_add(struct ac97_controller *ac97_ctrl, int idx, |
7056 |
+ vendor_id); |
7057 |
+ |
7058 |
+ ret = device_add(&codec->dev); |
7059 |
+- if (ret) |
7060 |
+- goto err_free_codec; |
7061 |
++ if (ret) { |
7062 |
++ put_device(&codec->dev); |
7063 |
++ return ret; |
7064 |
++ } |
7065 |
+ |
7066 |
+ return 0; |
7067 |
+-err_free_codec: |
7068 |
+- of_node_put(codec->dev.of_node); |
7069 |
+- put_device(&codec->dev); |
7070 |
+- kfree(codec); |
7071 |
+- ac97_ctrl->codecs[idx] = NULL; |
7072 |
+- |
7073 |
+- return ret; |
7074 |
+ } |
7075 |
+ |
7076 |
+ unsigned int snd_ac97_bus_scan_one(struct ac97_controller *adrv, |
7077 |
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c |
7078 |
+index 860543a4c840..12dd9b318db1 100644 |
7079 |
+--- a/sound/core/pcm_native.c |
7080 |
++++ b/sound/core/pcm_native.c |
7081 |
+@@ -77,7 +77,7 @@ void snd_pcm_group_init(struct snd_pcm_group *group) |
7082 |
+ spin_lock_init(&group->lock); |
7083 |
+ mutex_init(&group->mutex); |
7084 |
+ INIT_LIST_HEAD(&group->substreams); |
7085 |
+- refcount_set(&group->refs, 0); |
7086 |
++ refcount_set(&group->refs, 1); |
7087 |
+ } |
7088 |
+ |
7089 |
+ /* define group lock helpers */ |
7090 |
+@@ -1096,8 +1096,7 @@ static void snd_pcm_group_unref(struct snd_pcm_group *group, |
7091 |
+ |
7092 |
+ if (!group) |
7093 |
+ return; |
7094 |
+- do_free = refcount_dec_and_test(&group->refs) && |
7095 |
+- list_empty(&group->substreams); |
7096 |
++ do_free = refcount_dec_and_test(&group->refs); |
7097 |
+ snd_pcm_group_unlock(group, substream->pcm->nonatomic); |
7098 |
+ if (do_free) |
7099 |
+ kfree(group); |
7100 |
+@@ -2020,6 +2019,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd) |
7101 |
+ snd_pcm_group_lock_irq(target_group, nonatomic); |
7102 |
+ snd_pcm_stream_lock(substream1); |
7103 |
+ snd_pcm_group_assign(substream1, target_group); |
7104 |
++ refcount_inc(&target_group->refs); |
7105 |
+ snd_pcm_stream_unlock(substream1); |
7106 |
+ snd_pcm_group_unlock_irq(target_group, nonatomic); |
7107 |
+ _end: |
7108 |
+@@ -2056,13 +2056,14 @@ static int snd_pcm_unlink(struct snd_pcm_substream *substream) |
7109 |
+ snd_pcm_group_lock_irq(group, nonatomic); |
7110 |
+ |
7111 |
+ relink_to_local(substream); |
7112 |
++ refcount_dec(&group->refs); |
7113 |
+ |
7114 |
+ /* detach the last stream, too */ |
7115 |
+ if (list_is_singular(&group->substreams)) { |
7116 |
+ relink_to_local(list_first_entry(&group->substreams, |
7117 |
+ struct snd_pcm_substream, |
7118 |
+ link_list)); |
7119 |
+- do_free = !refcount_read(&group->refs); |
7120 |
++ do_free = refcount_dec_and_test(&group->refs); |
7121 |
+ } |
7122 |
+ |
7123 |
+ snd_pcm_group_unlock_irq(group, nonatomic); |
7124 |
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c |
7125 |
+index 50f86f458918..d438c450f04d 100644 |
7126 |
+--- a/sound/pci/hda/hda_intel.c |
7127 |
++++ b/sound/pci/hda/hda_intel.c |
7128 |
+@@ -313,11 +313,10 @@ enum { |
7129 |
+ |
7130 |
+ #define AZX_DCAPS_INTEL_SKYLAKE \ |
7131 |
+ (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\ |
7132 |
++ AZX_DCAPS_SYNC_WRITE |\ |
7133 |
+ AZX_DCAPS_SEPARATE_STREAM_TAG | AZX_DCAPS_I915_COMPONENT) |
7134 |
+ |
7135 |
+-#define AZX_DCAPS_INTEL_BROXTON \ |
7136 |
+- (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\ |
7137 |
+- AZX_DCAPS_SEPARATE_STREAM_TAG | AZX_DCAPS_I915_COMPONENT) |
7138 |
++#define AZX_DCAPS_INTEL_BROXTON AZX_DCAPS_INTEL_SKYLAKE |
7139 |
+ |
7140 |
+ /* quirks for ATI SB / AMD Hudson */ |
7141 |
+ #define AZX_DCAPS_PRESET_ATI_SB \ |
7142 |
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c |
7143 |
+index 4f8d0845ee1e..f299f137eaea 100644 |
7144 |
+--- a/sound/pci/hda/patch_conexant.c |
7145 |
++++ b/sound/pci/hda/patch_conexant.c |
7146 |
+@@ -1083,6 +1083,7 @@ static int patch_conexant_auto(struct hda_codec *codec) |
7147 |
+ */ |
7148 |
+ |
7149 |
+ static const struct hda_device_id snd_hda_id_conexant[] = { |
7150 |
++ HDA_CODEC_ENTRY(0x14f11f86, "CX8070", patch_conexant_auto), |
7151 |
+ HDA_CODEC_ENTRY(0x14f12008, "CX8200", patch_conexant_auto), |
7152 |
+ HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto), |
7153 |
+ HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto), |
7154 |
+diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c |
7155 |
+index 77a1d55334bb..53b53a9a4c6f 100644 |
7156 |
+--- a/sound/usb/line6/podhd.c |
7157 |
++++ b/sound/usb/line6/podhd.c |
7158 |
+@@ -413,7 +413,7 @@ static const struct line6_properties podhd_properties_table[] = { |
7159 |
+ .name = "POD HD500", |
7160 |
+ .capabilities = LINE6_CAP_PCM |
7161 |
+ | LINE6_CAP_HWMON, |
7162 |
+- .altsetting = 1, |
7163 |
++ .altsetting = 0, |
7164 |
+ .ep_ctrl_r = 0x81, |
7165 |
+ .ep_ctrl_w = 0x01, |
7166 |
+ .ep_audio_r = 0x86, |
7167 |
+diff --git a/tools/iio/iio_utils.c b/tools/iio/iio_utils.c |
7168 |
+index a22b6e8fad46..7399eb7f1378 100644 |
7169 |
+--- a/tools/iio/iio_utils.c |
7170 |
++++ b/tools/iio/iio_utils.c |
7171 |
+@@ -156,9 +156,9 @@ int iioutils_get_type(unsigned *is_signed, unsigned *bytes, unsigned *bits_used, |
7172 |
+ *be = (endianchar == 'b'); |
7173 |
+ *bytes = padint / 8; |
7174 |
+ if (*bits_used == 64) |
7175 |
+- *mask = ~0; |
7176 |
++ *mask = ~(0ULL); |
7177 |
+ else |
7178 |
+- *mask = (1ULL << *bits_used) - 1; |
7179 |
++ *mask = (1ULL << *bits_used) - 1ULL; |
7180 |
+ |
7181 |
+ *is_signed = (signchar == 's'); |
7182 |
+ if (fclose(sysfsfp)) { |
7183 |
+diff --git a/tools/pci/pcitest.c b/tools/pci/pcitest.c |
7184 |
+index cb7a47dfd8b6..49ddfa6f5a8c 100644 |
7185 |
+--- a/tools/pci/pcitest.c |
7186 |
++++ b/tools/pci/pcitest.c |
7187 |
+@@ -36,15 +36,15 @@ struct pci_test { |
7188 |
+ unsigned long size; |
7189 |
+ }; |
7190 |
+ |
7191 |
+-static void run_test(struct pci_test *test) |
7192 |
++static int run_test(struct pci_test *test) |
7193 |
+ { |
7194 |
+- long ret; |
7195 |
++ int ret = -EINVAL; |
7196 |
+ int fd; |
7197 |
+ |
7198 |
+ fd = open(test->device, O_RDWR); |
7199 |
+ if (fd < 0) { |
7200 |
+ perror("can't open PCI Endpoint Test device"); |
7201 |
+- return; |
7202 |
++ return -ENODEV; |
7203 |
+ } |
7204 |
+ |
7205 |
+ if (test->barnum >= 0 && test->barnum <= 5) { |
7206 |
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c |
7207 |
+index 1ae66f09dc7d..e28002d90573 100644 |
7208 |
+--- a/tools/perf/builtin-stat.c |
7209 |
++++ b/tools/perf/builtin-stat.c |
7210 |
+@@ -1276,8 +1276,8 @@ static int add_default_attributes(void) |
7211 |
+ fprintf(stderr, |
7212 |
+ "Cannot set up top down events %s: %d\n", |
7213 |
+ str, err); |
7214 |
+- free(str); |
7215 |
+ parse_events_print_error(&errinfo, str); |
7216 |
++ free(str); |
7217 |
+ return -1; |
7218 |
+ } |
7219 |
+ } else { |
7220 |
+diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c |
7221 |
+index 466621cd1017..8a9ff4b11df0 100644 |
7222 |
+--- a/tools/perf/builtin-top.c |
7223 |
++++ b/tools/perf/builtin-top.c |
7224 |
+@@ -100,7 +100,7 @@ static void perf_top__resize(struct perf_top *top) |
7225 |
+ |
7226 |
+ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he) |
7227 |
+ { |
7228 |
+- struct perf_evsel *evsel = hists_to_evsel(he->hists); |
7229 |
++ struct perf_evsel *evsel; |
7230 |
+ struct symbol *sym; |
7231 |
+ struct annotation *notes; |
7232 |
+ struct map *map; |
7233 |
+@@ -109,6 +109,8 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he) |
7234 |
+ if (!he || !he->ms.sym) |
7235 |
+ return -1; |
7236 |
+ |
7237 |
++ evsel = hists_to_evsel(he->hists); |
7238 |
++ |
7239 |
+ sym = he->ms.sym; |
7240 |
+ map = he->ms.map; |
7241 |
+ |
7242 |
+@@ -225,7 +227,7 @@ static void perf_top__record_precise_ip(struct perf_top *top, |
7243 |
+ static void perf_top__show_details(struct perf_top *top) |
7244 |
+ { |
7245 |
+ struct hist_entry *he = top->sym_filter_entry; |
7246 |
+- struct perf_evsel *evsel = hists_to_evsel(he->hists); |
7247 |
++ struct perf_evsel *evsel; |
7248 |
+ struct annotation *notes; |
7249 |
+ struct symbol *symbol; |
7250 |
+ int more; |
7251 |
+@@ -233,6 +235,8 @@ static void perf_top__show_details(struct perf_top *top) |
7252 |
+ if (!he) |
7253 |
+ return; |
7254 |
+ |
7255 |
++ evsel = hists_to_evsel(he->hists); |
7256 |
++ |
7257 |
+ symbol = he->ms.sym; |
7258 |
+ notes = symbol__annotation(symbol); |
7259 |
+ |
7260 |
+diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c |
7261 |
+index 52fadc858ef0..909e68545bb8 100644 |
7262 |
+--- a/tools/perf/builtin-trace.c |
7263 |
++++ b/tools/perf/builtin-trace.c |
7264 |
+@@ -997,10 +997,10 @@ static struct thread_trace *thread_trace__new(void) |
7265 |
+ { |
7266 |
+ struct thread_trace *ttrace = zalloc(sizeof(struct thread_trace)); |
7267 |
+ |
7268 |
+- if (ttrace) |
7269 |
++ if (ttrace) { |
7270 |
+ ttrace->files.max = -1; |
7271 |
+- |
7272 |
+- ttrace->syscall_stats = intlist__new(NULL); |
7273 |
++ ttrace->syscall_stats = intlist__new(NULL); |
7274 |
++ } |
7275 |
+ |
7276 |
+ return ttrace; |
7277 |
+ } |
7278 |
+diff --git a/tools/perf/tests/mmap-thread-lookup.c b/tools/perf/tests/mmap-thread-lookup.c |
7279 |
+index ba87e6e8d18c..0a4301a5155c 100644 |
7280 |
+--- a/tools/perf/tests/mmap-thread-lookup.c |
7281 |
++++ b/tools/perf/tests/mmap-thread-lookup.c |
7282 |
+@@ -53,7 +53,7 @@ static void *thread_fn(void *arg) |
7283 |
+ { |
7284 |
+ struct thread_data *td = arg; |
7285 |
+ ssize_t ret; |
7286 |
+- int go; |
7287 |
++ int go = 0; |
7288 |
+ |
7289 |
+ if (thread_init(td)) |
7290 |
+ return NULL; |
7291 |
+diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c |
7292 |
+index 3421ecbdd3f0..c1dd9b54dc6e 100644 |
7293 |
+--- a/tools/perf/ui/browsers/hists.c |
7294 |
++++ b/tools/perf/ui/browsers/hists.c |
7295 |
+@@ -638,7 +638,11 @@ int hist_browser__run(struct hist_browser *browser, const char *help, |
7296 |
+ switch (key) { |
7297 |
+ case K_TIMER: { |
7298 |
+ u64 nr_entries; |
7299 |
+- hbt->timer(hbt->arg); |
7300 |
++ |
7301 |
++ WARN_ON_ONCE(!hbt); |
7302 |
++ |
7303 |
++ if (hbt) |
7304 |
++ hbt->timer(hbt->arg); |
7305 |
+ |
7306 |
+ if (hist_browser__has_filter(browser) || |
7307 |
+ symbol_conf.report_hierarchy) |
7308 |
+@@ -2819,7 +2823,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, |
7309 |
+ { |
7310 |
+ struct hists *hists = evsel__hists(evsel); |
7311 |
+ struct hist_browser *browser = perf_evsel_browser__new(evsel, hbt, env, annotation_opts); |
7312 |
+- struct branch_info *bi; |
7313 |
++ struct branch_info *bi = NULL; |
7314 |
+ #define MAX_OPTIONS 16 |
7315 |
+ char *options[MAX_OPTIONS]; |
7316 |
+ struct popup_action actions[MAX_OPTIONS]; |
7317 |
+@@ -3085,7 +3089,9 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, |
7318 |
+ goto skip_annotation; |
7319 |
+ |
7320 |
+ if (sort__mode == SORT_MODE__BRANCH) { |
7321 |
+- bi = browser->he_selection->branch_info; |
7322 |
++ |
7323 |
++ if (browser->he_selection) |
7324 |
++ bi = browser->he_selection->branch_info; |
7325 |
+ |
7326 |
+ if (bi == NULL) |
7327 |
+ goto skip_annotation; |
7328 |
+@@ -3269,7 +3275,8 @@ static int perf_evsel_menu__run(struct perf_evsel_menu *menu, |
7329 |
+ |
7330 |
+ switch (key) { |
7331 |
+ case K_TIMER: |
7332 |
+- hbt->timer(hbt->arg); |
7333 |
++ if (hbt) |
7334 |
++ hbt->timer(hbt->arg); |
7335 |
+ |
7336 |
+ if (!menu->lost_events_warned && |
7337 |
+ menu->lost_events && |
7338 |
+diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c |
7339 |
+index c8ce13419d9b..b8dfcfe08bb1 100644 |
7340 |
+--- a/tools/perf/util/annotate.c |
7341 |
++++ b/tools/perf/util/annotate.c |
7342 |
+@@ -1113,16 +1113,14 @@ static int disasm_line__parse(char *line, const char **namep, char **rawp) |
7343 |
+ *namep = strdup(name); |
7344 |
+ |
7345 |
+ if (*namep == NULL) |
7346 |
+- goto out_free_name; |
7347 |
++ goto out; |
7348 |
+ |
7349 |
+ (*rawp)[0] = tmp; |
7350 |
+ *rawp = ltrim(*rawp); |
7351 |
+ |
7352 |
+ return 0; |
7353 |
+ |
7354 |
+-out_free_name: |
7355 |
+- free((void *)namep); |
7356 |
+- *namep = NULL; |
7357 |
++out: |
7358 |
+ return -1; |
7359 |
+ } |
7360 |
+ |
7361 |
+diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c |
7362 |
+index e32dbffebb2f..625ad3639a7e 100644 |
7363 |
+--- a/tools/perf/util/intel-bts.c |
7364 |
++++ b/tools/perf/util/intel-bts.c |
7365 |
+@@ -891,13 +891,12 @@ int intel_bts_process_auxtrace_info(union perf_event *event, |
7366 |
+ if (dump_trace) |
7367 |
+ return 0; |
7368 |
+ |
7369 |
+- if (session->itrace_synth_opts && session->itrace_synth_opts->set) { |
7370 |
++ if (session->itrace_synth_opts->set) { |
7371 |
+ bts->synth_opts = *session->itrace_synth_opts; |
7372 |
+ } else { |
7373 |
+ itrace_synth_opts__set_default(&bts->synth_opts, |
7374 |
+ session->itrace_synth_opts->default_no_sample); |
7375 |
+- if (session->itrace_synth_opts) |
7376 |
+- bts->synth_opts.thread_stack = |
7377 |
++ bts->synth_opts.thread_stack = |
7378 |
+ session->itrace_synth_opts->thread_stack; |
7379 |
+ } |
7380 |
+ |
7381 |
+diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c |
7382 |
+index ee71efb9db62..9c81ee092784 100644 |
7383 |
+--- a/tools/perf/util/map.c |
7384 |
++++ b/tools/perf/util/map.c |
7385 |
+@@ -470,8 +470,11 @@ int map__fprintf_srccode(struct map *map, u64 addr, |
7386 |
+ goto out_free_line; |
7387 |
+ |
7388 |
+ ret = fprintf(fp, "|%-8d %.*s", line, len, srccode); |
7389 |
+- state->srcfile = srcfile; |
7390 |
+- state->line = line; |
7391 |
++ |
7392 |
++ if (state) { |
7393 |
++ state->srcfile = srcfile; |
7394 |
++ state->line = line; |
7395 |
++ } |
7396 |
+ return ret; |
7397 |
+ |
7398 |
+ out_free_line: |
7399 |
+diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c |
7400 |
+index 54cf163347f7..2e61dd6a3574 100644 |
7401 |
+--- a/tools/perf/util/session.c |
7402 |
++++ b/tools/perf/util/session.c |
7403 |
+@@ -1249,6 +1249,9 @@ static void dump_read(struct perf_evsel *evsel, union perf_event *event) |
7404 |
+ evsel ? perf_evsel__name(evsel) : "FAIL", |
7405 |
+ event->read.value); |
7406 |
+ |
7407 |
++ if (!evsel) |
7408 |
++ return; |
7409 |
++ |
7410 |
+ read_format = evsel->attr.read_format; |
7411 |
+ |
7412 |
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) |
7413 |
+diff --git a/tools/testing/selftests/rseq/rseq-arm.h b/tools/testing/selftests/rseq/rseq-arm.h |
7414 |
+index 84f28f147fb6..5943c816c07c 100644 |
7415 |
+--- a/tools/testing/selftests/rseq/rseq-arm.h |
7416 |
++++ b/tools/testing/selftests/rseq/rseq-arm.h |
7417 |
+@@ -6,6 +6,8 @@ |
7418 |
+ */ |
7419 |
+ |
7420 |
+ /* |
7421 |
++ * - ARM little endian |
7422 |
++ * |
7423 |
+ * RSEQ_SIG uses the udf A32 instruction with an uncommon immediate operand |
7424 |
+ * value 0x5de3. This traps if user-space reaches this instruction by mistake, |
7425 |
+ * and the uncommon operand ensures the kernel does not move the instruction |
7426 |
+@@ -22,36 +24,40 @@ |
7427 |
+ * def3 udf #243 ; 0xf3 |
7428 |
+ * e7f5 b.n <7f5> |
7429 |
+ * |
7430 |
+- * pre-ARMv6 big endian code: |
7431 |
+- * e7f5 b.n <7f5> |
7432 |
+- * def3 udf #243 ; 0xf3 |
7433 |
++ * - ARMv6+ big endian (BE8): |
7434 |
+ * |
7435 |
+ * ARMv6+ -mbig-endian generates mixed endianness code vs data: little-endian |
7436 |
+- * code and big-endian data. Ensure the RSEQ_SIG data signature matches code |
7437 |
+- * endianness. Prior to ARMv6, -mbig-endian generates big-endian code and data |
7438 |
+- * (which match), so there is no need to reverse the endianness of the data |
7439 |
+- * representation of the signature. However, the choice between BE32 and BE8 |
7440 |
+- * is done by the linker, so we cannot know whether code and data endianness |
7441 |
+- * will be mixed before the linker is invoked. |
7442 |
++ * code and big-endian data. The data value of the signature needs to have its |
7443 |
++ * byte order reversed to generate the trap instruction: |
7444 |
++ * |
7445 |
++ * Data: 0xf3def5e7 |
7446 |
++ * |
7447 |
++ * Translates to this A32 instruction pattern: |
7448 |
++ * |
7449 |
++ * e7f5def3 udf #24035 ; 0x5de3 |
7450 |
++ * |
7451 |
++ * Translates to this T16 instruction pattern: |
7452 |
++ * |
7453 |
++ * def3 udf #243 ; 0xf3 |
7454 |
++ * e7f5 b.n <7f5> |
7455 |
++ * |
7456 |
++ * - Prior to ARMv6 big endian (BE32): |
7457 |
++ * |
7458 |
++ * Prior to ARMv6, -mbig-endian generates big-endian code and data |
7459 |
++ * (which match), so the endianness of the data representation of the |
7460 |
++ * signature should not be reversed. However, the choice between BE32 |
7461 |
++ * and BE8 is done by the linker, so we cannot know whether code and |
7462 |
++ * data endianness will be mixed before the linker is invoked. So rather |
7463 |
++ * than try to play tricks with the linker, the rseq signature is simply |
7464 |
++ * data (not a trap instruction) prior to ARMv6 on big endian. This is |
7465 |
++ * why the signature is expressed as data (.word) rather than as |
7466 |
++ * instruction (.inst) in assembler. |
7467 |
+ */ |
7468 |
+ |
7469 |
+-#define RSEQ_SIG_CODE 0xe7f5def3 |
7470 |
+- |
7471 |
+-#ifndef __ASSEMBLER__ |
7472 |
+- |
7473 |
+-#define RSEQ_SIG_DATA \ |
7474 |
+- ({ \ |
7475 |
+- int sig; \ |
7476 |
+- asm volatile ("b 2f\n\t" \ |
7477 |
+- "1: .inst " __rseq_str(RSEQ_SIG_CODE) "\n\t" \ |
7478 |
+- "2:\n\t" \ |
7479 |
+- "ldr %[sig], 1b\n\t" \ |
7480 |
+- : [sig] "=r" (sig)); \ |
7481 |
+- sig; \ |
7482 |
+- }) |
7483 |
+- |
7484 |
+-#define RSEQ_SIG RSEQ_SIG_DATA |
7485 |
+- |
7486 |
++#ifdef __ARMEB__ |
7487 |
++#define RSEQ_SIG 0xf3def5e7 /* udf #24035 ; 0x5de3 (ARMv6+) */ |
7488 |
++#else |
7489 |
++#define RSEQ_SIG 0xe7f5def3 /* udf #24035 ; 0x5de3 */ |
7490 |
+ #endif |
7491 |
+ |
7492 |
+ #define rseq_smp_mb() __asm__ __volatile__ ("dmb" ::: "memory", "cc") |
7493 |
+@@ -125,8 +131,7 @@ do { \ |
7494 |
+ __rseq_str(table_label) ":\n\t" \ |
7495 |
+ ".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \ |
7496 |
+ ".word " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) ", 0x0\n\t" \ |
7497 |
+- ".arm\n\t" \ |
7498 |
+- ".inst " __rseq_str(RSEQ_SIG_CODE) "\n\t" \ |
7499 |
++ ".word " __rseq_str(RSEQ_SIG) "\n\t" \ |
7500 |
+ __rseq_str(label) ":\n\t" \ |
7501 |
+ teardown \ |
7502 |
+ "b %l[" __rseq_str(abort_label) "]\n\t" |