1 |
commit: 819b0cffa158a73b6276046ea0cb831c15ae8314 |
2 |
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
3 |
AuthorDate: Tue Feb 1 17:23:02 2022 +0000 |
4 |
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
5 |
CommitDate: Tue Feb 1 17:23:02 2022 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=819b0cff |
7 |
|
8 |
Linux patch 5.10.96 |
9 |
|
10 |
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org> |
11 |
|
12 |
0000_README | 4 + |
13 |
1095_linux-5.10.96.patch | 3881 ++++++++++++++++++++++++++++++++++++++++++++++ |
14 |
2 files changed, 3885 insertions(+) |
15 |
|
16 |
diff --git a/0000_README b/0000_README |
17 |
index 5f3cbb9a..cc530626 100644 |
18 |
--- a/0000_README |
19 |
+++ b/0000_README |
20 |
@@ -423,6 +423,10 @@ Patch: 1094_linux-5.10.95.patch |
21 |
From: http://www.kernel.org |
22 |
Desc: Linux 5.10.95 |
23 |
|
24 |
+Patch: 1095_linux-5.10.96.patch |
25 |
+From: http://www.kernel.org |
26 |
+Desc: Linux 5.10.96 |
27 |
+ |
28 |
Patch: 1500_XATTR_USER_PREFIX.patch |
29 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
30 |
Desc: Support for namespace user.pax.* on tmpfs. |
31 |
|
32 |
diff --git a/1095_linux-5.10.96.patch b/1095_linux-5.10.96.patch |
33 |
new file mode 100644 |
34 |
index 00000000..6d0571ac |
35 |
--- /dev/null |
36 |
+++ b/1095_linux-5.10.96.patch |
37 |
@@ -0,0 +1,3881 @@ |
38 |
+diff --git a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt |
39 |
+index 0968b40aef1e8..e3501bfa22e90 100644 |
40 |
+--- a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt |
41 |
++++ b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt |
42 |
+@@ -31,7 +31,7 @@ tcan4x5x: tcan4x5x@0 { |
43 |
+ #address-cells = <1>; |
44 |
+ #size-cells = <1>; |
45 |
+ spi-max-frequency = <10000000>; |
46 |
+- bosch,mram-cfg = <0x0 0 0 32 0 0 1 1>; |
47 |
++ bosch,mram-cfg = <0x0 0 0 16 0 0 1 1>; |
48 |
+ interrupt-parent = <&gpio1>; |
49 |
+ interrupts = <14 IRQ_TYPE_LEVEL_LOW>; |
50 |
+ device-state-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>; |
51 |
+diff --git a/Makefile b/Makefile |
52 |
+index fa98893aae615..c43133c8a5b1f 100644 |
53 |
+--- a/Makefile |
54 |
++++ b/Makefile |
55 |
+@@ -1,7 +1,7 @@ |
56 |
+ # SPDX-License-Identifier: GPL-2.0 |
57 |
+ VERSION = 5 |
58 |
+ PATCHLEVEL = 10 |
59 |
+-SUBLEVEL = 95 |
60 |
++SUBLEVEL = 96 |
61 |
+ EXTRAVERSION = |
62 |
+ NAME = Dare mighty things |
63 |
+ |
64 |
+diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c |
65 |
+index 4999caff32818..22275d8518eb3 100644 |
66 |
+--- a/arch/arm64/kernel/process.c |
67 |
++++ b/arch/arm64/kernel/process.c |
68 |
+@@ -511,34 +511,26 @@ static void entry_task_switch(struct task_struct *next) |
69 |
+ |
70 |
+ /* |
71 |
+ * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT. |
72 |
+- * Assuming the virtual counter is enabled at the beginning of times: |
73 |
+- * |
74 |
+- * - disable access when switching from a 64bit task to a 32bit task |
75 |
+- * - enable access when switching from a 32bit task to a 64bit task |
76 |
++ * Ensure access is disabled when switching to a 32bit task, ensure |
77 |
++ * access is enabled when switching to a 64bit task. |
78 |
+ */ |
79 |
+-static void erratum_1418040_thread_switch(struct task_struct *prev, |
80 |
+- struct task_struct *next) |
81 |
++static void erratum_1418040_thread_switch(struct task_struct *next) |
82 |
+ { |
83 |
+- bool prev32, next32; |
84 |
+- u64 val; |
85 |
+- |
86 |
+- if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040)) |
87 |
+- return; |
88 |
+- |
89 |
+- prev32 = is_compat_thread(task_thread_info(prev)); |
90 |
+- next32 = is_compat_thread(task_thread_info(next)); |
91 |
+- |
92 |
+- if (prev32 == next32 || !this_cpu_has_cap(ARM64_WORKAROUND_1418040)) |
93 |
++ if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) || |
94 |
++ !this_cpu_has_cap(ARM64_WORKAROUND_1418040)) |
95 |
+ return; |
96 |
+ |
97 |
+- val = read_sysreg(cntkctl_el1); |
98 |
+- |
99 |
+- if (!next32) |
100 |
+- val |= ARCH_TIMER_USR_VCT_ACCESS_EN; |
101 |
++ if (is_compat_thread(task_thread_info(next))) |
102 |
++ sysreg_clear_set(cntkctl_el1, ARCH_TIMER_USR_VCT_ACCESS_EN, 0); |
103 |
+ else |
104 |
+- val &= ~ARCH_TIMER_USR_VCT_ACCESS_EN; |
105 |
++ sysreg_clear_set(cntkctl_el1, 0, ARCH_TIMER_USR_VCT_ACCESS_EN); |
106 |
++} |
107 |
+ |
108 |
+- write_sysreg(val, cntkctl_el1); |
109 |
++static void erratum_1418040_new_exec(void) |
110 |
++{ |
111 |
++ preempt_disable(); |
112 |
++ erratum_1418040_thread_switch(current); |
113 |
++ preempt_enable(); |
114 |
+ } |
115 |
+ |
116 |
+ /* |
117 |
+@@ -556,7 +548,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, |
118 |
+ entry_task_switch(next); |
119 |
+ uao_thread_switch(next); |
120 |
+ ssbs_thread_switch(next); |
121 |
+- erratum_1418040_thread_switch(prev, next); |
122 |
++ erratum_1418040_thread_switch(next); |
123 |
+ |
124 |
+ /* |
125 |
+ * Complete any pending TLB or cache maintenance on this CPU in case |
126 |
+@@ -622,6 +614,7 @@ void arch_setup_new_exec(void) |
127 |
+ current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0; |
128 |
+ |
129 |
+ ptrauth_thread_init_user(current); |
130 |
++ erratum_1418040_new_exec(); |
131 |
+ |
132 |
+ if (task_spec_ssb_noexec(current)) { |
133 |
+ arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS, |
134 |
+diff --git a/arch/powerpc/include/asm/book3s/32/mmu-hash.h b/arch/powerpc/include/asm/book3s/32/mmu-hash.h |
135 |
+index a8982d52f6b1d..cbde06d0fb380 100644 |
136 |
+--- a/arch/powerpc/include/asm/book3s/32/mmu-hash.h |
137 |
++++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h |
138 |
+@@ -102,6 +102,8 @@ extern s32 patch__hash_page_B, patch__hash_page_C; |
139 |
+ extern s32 patch__flush_hash_A0, patch__flush_hash_A1, patch__flush_hash_A2; |
140 |
+ extern s32 patch__flush_hash_B; |
141 |
+ |
142 |
++int __init find_free_bat(void); |
143 |
++unsigned int bat_block_size(unsigned long base, unsigned long top); |
144 |
+ #endif /* !__ASSEMBLY__ */ |
145 |
+ |
146 |
+ /* We happily ignore the smaller BATs on 601, we don't actually use |
147 |
+diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h |
148 |
+index a6e3700c4566a..f0c0816f57270 100644 |
149 |
+--- a/arch/powerpc/include/asm/ppc-opcode.h |
150 |
++++ b/arch/powerpc/include/asm/ppc-opcode.h |
151 |
+@@ -449,6 +449,7 @@ |
152 |
+ #define PPC_RAW_LDX(r, base, b) (0x7c00002a | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b)) |
153 |
+ #define PPC_RAW_LHZ(r, base, i) (0xa0000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) |
154 |
+ #define PPC_RAW_LHBRX(r, base, b) (0x7c00062c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b)) |
155 |
++#define PPC_RAW_LWBRX(r, base, b) (0x7c00042c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b)) |
156 |
+ #define PPC_RAW_LDBRX(r, base, b) (0x7c000428 | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b)) |
157 |
+ #define PPC_RAW_STWCX(s, a, b) (0x7c00012d | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b)) |
158 |
+ #define PPC_RAW_CMPWI(a, i) (0x2c000000 | ___PPC_RA(a) | IMM_L(i)) |
159 |
+diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile |
160 |
+index fe2ef598e2ead..376104c166fcf 100644 |
161 |
+--- a/arch/powerpc/kernel/Makefile |
162 |
++++ b/arch/powerpc/kernel/Makefile |
163 |
+@@ -11,6 +11,7 @@ CFLAGS_prom_init.o += -fPIC |
164 |
+ CFLAGS_btext.o += -fPIC |
165 |
+ endif |
166 |
+ |
167 |
++CFLAGS_early_32.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) |
168 |
+ CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) |
169 |
+ CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) |
170 |
+ CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) |
171 |
+diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile |
172 |
+index 58991233381ed..0697a0e014ae8 100644 |
173 |
+--- a/arch/powerpc/lib/Makefile |
174 |
++++ b/arch/powerpc/lib/Makefile |
175 |
+@@ -19,6 +19,9 @@ CFLAGS_code-patching.o += -DDISABLE_BRANCH_PROFILING |
176 |
+ CFLAGS_feature-fixups.o += -DDISABLE_BRANCH_PROFILING |
177 |
+ endif |
178 |
+ |
179 |
++CFLAGS_code-patching.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) |
180 |
++CFLAGS_feature-fixups.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) |
181 |
++ |
182 |
+ obj-y += alloc.o code-patching.o feature-fixups.o pmem.o inst.o test_code-patching.o |
183 |
+ |
184 |
+ ifndef CONFIG_KASAN |
185 |
+diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c |
186 |
+index a59e7ec981803..602ab13127b40 100644 |
187 |
+--- a/arch/powerpc/mm/book3s32/mmu.c |
188 |
++++ b/arch/powerpc/mm/book3s32/mmu.c |
189 |
+@@ -72,7 +72,7 @@ unsigned long p_block_mapped(phys_addr_t pa) |
190 |
+ return 0; |
191 |
+ } |
192 |
+ |
193 |
+-static int find_free_bat(void) |
194 |
++int __init find_free_bat(void) |
195 |
+ { |
196 |
+ int b; |
197 |
+ int n = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4; |
198 |
+@@ -96,7 +96,7 @@ static int find_free_bat(void) |
199 |
+ * - block size has to be a power of two. This is calculated by finding the |
200 |
+ * highest bit set to 1. |
201 |
+ */ |
202 |
+-static unsigned int block_size(unsigned long base, unsigned long top) |
203 |
++unsigned int bat_block_size(unsigned long base, unsigned long top) |
204 |
+ { |
205 |
+ unsigned int max_size = SZ_256M; |
206 |
+ unsigned int base_shift = (ffs(base) - 1) & 31; |
207 |
+@@ -141,7 +141,7 @@ static unsigned long __init __mmu_mapin_ram(unsigned long base, unsigned long to |
208 |
+ int idx; |
209 |
+ |
210 |
+ while ((idx = find_free_bat()) != -1 && base != top) { |
211 |
+- unsigned int size = block_size(base, top); |
212 |
++ unsigned int size = bat_block_size(base, top); |
213 |
+ |
214 |
+ if (size < 128 << 10) |
215 |
+ break; |
216 |
+@@ -201,18 +201,17 @@ void mmu_mark_initmem_nx(void) |
217 |
+ int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4; |
218 |
+ int i; |
219 |
+ unsigned long base = (unsigned long)_stext - PAGE_OFFSET; |
220 |
+- unsigned long top = (unsigned long)_etext - PAGE_OFFSET; |
221 |
++ unsigned long top = ALIGN((unsigned long)_etext - PAGE_OFFSET, SZ_128K); |
222 |
+ unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET; |
223 |
+ unsigned long size; |
224 |
+ |
225 |
+- for (i = 0; i < nb - 1 && base < top && top - base > (128 << 10);) { |
226 |
+- size = block_size(base, top); |
227 |
++ for (i = 0; i < nb - 1 && base < top;) { |
228 |
++ size = bat_block_size(base, top); |
229 |
+ setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT); |
230 |
+ base += size; |
231 |
+ } |
232 |
+ if (base < top) { |
233 |
+- size = block_size(base, top); |
234 |
+- size = max(size, 128UL << 10); |
235 |
++ size = bat_block_size(base, top); |
236 |
+ if ((top - base) > size) { |
237 |
+ size <<= 1; |
238 |
+ if (strict_kernel_rwx_enabled() && base + size > border) |
239 |
+diff --git a/arch/powerpc/mm/kasan/book3s_32.c b/arch/powerpc/mm/kasan/book3s_32.c |
240 |
+index 35b287b0a8da4..450a67ef0bbe1 100644 |
241 |
+--- a/arch/powerpc/mm/kasan/book3s_32.c |
242 |
++++ b/arch/powerpc/mm/kasan/book3s_32.c |
243 |
+@@ -10,48 +10,51 @@ int __init kasan_init_region(void *start, size_t size) |
244 |
+ { |
245 |
+ unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start); |
246 |
+ unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size); |
247 |
+- unsigned long k_cur = k_start; |
248 |
+- int k_size = k_end - k_start; |
249 |
+- int k_size_base = 1 << (ffs(k_size) - 1); |
250 |
++ unsigned long k_nobat = k_start; |
251 |
++ unsigned long k_cur; |
252 |
++ phys_addr_t phys; |
253 |
+ int ret; |
254 |
+- void *block; |
255 |
+ |
256 |
+- block = memblock_alloc(k_size, k_size_base); |
257 |
+- |
258 |
+- if (block && k_size_base >= SZ_128K && k_start == ALIGN(k_start, k_size_base)) { |
259 |
+- int shift = ffs(k_size - k_size_base); |
260 |
+- int k_size_more = shift ? 1 << (shift - 1) : 0; |
261 |
+- |
262 |
+- setbat(-1, k_start, __pa(block), k_size_base, PAGE_KERNEL); |
263 |
+- if (k_size_more >= SZ_128K) |
264 |
+- setbat(-1, k_start + k_size_base, __pa(block) + k_size_base, |
265 |
+- k_size_more, PAGE_KERNEL); |
266 |
+- if (v_block_mapped(k_start)) |
267 |
+- k_cur = k_start + k_size_base; |
268 |
+- if (v_block_mapped(k_start + k_size_base)) |
269 |
+- k_cur = k_start + k_size_base + k_size_more; |
270 |
+- |
271 |
+- update_bats(); |
272 |
++ while (k_nobat < k_end) { |
273 |
++ unsigned int k_size = bat_block_size(k_nobat, k_end); |
274 |
++ int idx = find_free_bat(); |
275 |
++ |
276 |
++ if (idx == -1) |
277 |
++ break; |
278 |
++ if (k_size < SZ_128K) |
279 |
++ break; |
280 |
++ phys = memblock_phys_alloc_range(k_size, k_size, 0, |
281 |
++ MEMBLOCK_ALLOC_ANYWHERE); |
282 |
++ if (!phys) |
283 |
++ break; |
284 |
++ |
285 |
++ setbat(idx, k_nobat, phys, k_size, PAGE_KERNEL); |
286 |
++ k_nobat += k_size; |
287 |
+ } |
288 |
++ if (k_nobat != k_start) |
289 |
++ update_bats(); |
290 |
+ |
291 |
+- if (!block) |
292 |
+- block = memblock_alloc(k_size, PAGE_SIZE); |
293 |
+- if (!block) |
294 |
+- return -ENOMEM; |
295 |
++ if (k_nobat < k_end) { |
296 |
++ phys = memblock_phys_alloc_range(k_end - k_nobat, PAGE_SIZE, 0, |
297 |
++ MEMBLOCK_ALLOC_ANYWHERE); |
298 |
++ if (!phys) |
299 |
++ return -ENOMEM; |
300 |
++ } |
301 |
+ |
302 |
+ ret = kasan_init_shadow_page_tables(k_start, k_end); |
303 |
+ if (ret) |
304 |
+ return ret; |
305 |
+ |
306 |
+- kasan_update_early_region(k_start, k_cur, __pte(0)); |
307 |
++ kasan_update_early_region(k_start, k_nobat, __pte(0)); |
308 |
+ |
309 |
+- for (; k_cur < k_end; k_cur += PAGE_SIZE) { |
310 |
++ for (k_cur = k_nobat; k_cur < k_end; k_cur += PAGE_SIZE) { |
311 |
+ pmd_t *pmd = pmd_off_k(k_cur); |
312 |
+- void *va = block + k_cur - k_start; |
313 |
+- pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL); |
314 |
++ pte_t pte = pfn_pte(PHYS_PFN(phys + k_cur - k_nobat), PAGE_KERNEL); |
315 |
+ |
316 |
+ __set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0); |
317 |
+ } |
318 |
+ flush_tlb_kernel_range(k_start, k_end); |
319 |
++ memset(kasan_mem_to_shadow(start), 0, k_end - k_start); |
320 |
++ |
321 |
+ return 0; |
322 |
+ } |
323 |
+diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c |
324 |
+index 8936090acb579..0d47514e8870d 100644 |
325 |
+--- a/arch/powerpc/net/bpf_jit_comp64.c |
326 |
++++ b/arch/powerpc/net/bpf_jit_comp64.c |
327 |
+@@ -651,17 +651,21 @@ bpf_alu32_trunc: |
328 |
+ EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1])); |
329 |
+ break; |
330 |
+ case 64: |
331 |
+- /* |
332 |
+- * Way easier and faster(?) to store the value |
333 |
+- * into stack and then use ldbrx |
334 |
+- * |
335 |
+- * ctx->seen will be reliable in pass2, but |
336 |
+- * the instructions generated will remain the |
337 |
+- * same across all passes |
338 |
+- */ |
339 |
++ /* Store the value to stack and then use byte-reverse loads */ |
340 |
+ PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx)); |
341 |
+ EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx))); |
342 |
+- EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1])); |
343 |
++ if (cpu_has_feature(CPU_FTR_ARCH_206)) { |
344 |
++ EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1])); |
345 |
++ } else { |
346 |
++ EMIT(PPC_RAW_LWBRX(dst_reg, 0, b2p[TMP_REG_1])); |
347 |
++ if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN)) |
348 |
++ EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32)); |
349 |
++ EMIT(PPC_RAW_LI(b2p[TMP_REG_2], 4)); |
350 |
++ EMIT(PPC_RAW_LWBRX(b2p[TMP_REG_2], b2p[TMP_REG_2], b2p[TMP_REG_1])); |
351 |
++ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) |
352 |
++ EMIT(PPC_RAW_SLDI(b2p[TMP_REG_2], b2p[TMP_REG_2], 32)); |
353 |
++ EMIT(PPC_RAW_OR(dst_reg, dst_reg, b2p[TMP_REG_2])); |
354 |
++ } |
355 |
+ break; |
356 |
+ } |
357 |
+ break; |
358 |
+diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c |
359 |
+index bd34e062bd290..e49aa8fc6a491 100644 |
360 |
+--- a/arch/powerpc/perf/core-book3s.c |
361 |
++++ b/arch/powerpc/perf/core-book3s.c |
362 |
+@@ -1273,9 +1273,20 @@ static void power_pmu_disable(struct pmu *pmu) |
363 |
+ * Otherwise provide a warning if there is PMI pending, but |
364 |
+ * no counter is found overflown. |
365 |
+ */ |
366 |
+- if (any_pmc_overflown(cpuhw)) |
367 |
+- clear_pmi_irq_pending(); |
368 |
+- else |
369 |
++ if (any_pmc_overflown(cpuhw)) { |
370 |
++ /* |
371 |
++ * Since power_pmu_disable runs under local_irq_save, it |
372 |
++ * could happen that code hits a PMC overflow without PMI |
373 |
++ * pending in paca. Hence only clear PMI pending if it was |
374 |
++ * set. |
375 |
++ * |
376 |
++ * If a PMI is pending, then MSR[EE] must be disabled (because |
377 |
++ * the masked PMI handler disabling EE). So it is safe to |
378 |
++ * call clear_pmi_irq_pending(). |
379 |
++ */ |
380 |
++ if (pmi_irq_pending()) |
381 |
++ clear_pmi_irq_pending(); |
382 |
++ } else |
383 |
+ WARN_ON(pmi_irq_pending()); |
384 |
+ |
385 |
+ val = mmcra = cpuhw->mmcr.mmcra; |
386 |
+diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c |
387 |
+index e1fcc03159ef2..a927adccb4ba7 100644 |
388 |
+--- a/arch/s390/hypfs/hypfs_vm.c |
389 |
++++ b/arch/s390/hypfs/hypfs_vm.c |
390 |
+@@ -20,6 +20,7 @@ |
391 |
+ |
392 |
+ static char local_guest[] = " "; |
393 |
+ static char all_guests[] = "* "; |
394 |
++static char *all_groups = all_guests; |
395 |
+ static char *guest_query; |
396 |
+ |
397 |
+ struct diag2fc_data { |
398 |
+@@ -62,10 +63,11 @@ static int diag2fc(int size, char* query, void *addr) |
399 |
+ |
400 |
+ memcpy(parm_list.userid, query, NAME_LEN); |
401 |
+ ASCEBC(parm_list.userid, NAME_LEN); |
402 |
+- parm_list.addr = (unsigned long) addr ; |
403 |
++ memcpy(parm_list.aci_grp, all_groups, NAME_LEN); |
404 |
++ ASCEBC(parm_list.aci_grp, NAME_LEN); |
405 |
++ parm_list.addr = (unsigned long)addr; |
406 |
+ parm_list.size = size; |
407 |
+ parm_list.fmt = 0x02; |
408 |
+- memset(parm_list.aci_grp, 0x40, NAME_LEN); |
409 |
+ rc = -1; |
410 |
+ |
411 |
+ diag_stat_inc(DIAG_STAT_X2FC); |
412 |
+diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c |
413 |
+index 4055f1c498147..b81bc96216b97 100644 |
414 |
+--- a/arch/s390/kernel/module.c |
415 |
++++ b/arch/s390/kernel/module.c |
416 |
+@@ -30,7 +30,7 @@ |
417 |
+ #define DEBUGP(fmt , ...) |
418 |
+ #endif |
419 |
+ |
420 |
+-#define PLT_ENTRY_SIZE 20 |
421 |
++#define PLT_ENTRY_SIZE 22 |
422 |
+ |
423 |
+ void *module_alloc(unsigned long size) |
424 |
+ { |
425 |
+@@ -330,27 +330,26 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, |
426 |
+ case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */ |
427 |
+ case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ |
428 |
+ if (info->plt_initialized == 0) { |
429 |
+- unsigned int insn[5]; |
430 |
+- unsigned int *ip = me->core_layout.base + |
431 |
+- me->arch.plt_offset + |
432 |
+- info->plt_offset; |
433 |
+- |
434 |
+- insn[0] = 0x0d10e310; /* basr 1,0 */ |
435 |
+- insn[1] = 0x100a0004; /* lg 1,10(1) */ |
436 |
++ unsigned char insn[PLT_ENTRY_SIZE]; |
437 |
++ char *plt_base; |
438 |
++ char *ip; |
439 |
++ |
440 |
++ plt_base = me->core_layout.base + me->arch.plt_offset; |
441 |
++ ip = plt_base + info->plt_offset; |
442 |
++ *(int *)insn = 0x0d10e310; /* basr 1,0 */ |
443 |
++ *(int *)&insn[4] = 0x100c0004; /* lg 1,12(1) */ |
444 |
+ if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) { |
445 |
+- unsigned int *ij; |
446 |
+- ij = me->core_layout.base + |
447 |
+- me->arch.plt_offset + |
448 |
+- me->arch.plt_size - PLT_ENTRY_SIZE; |
449 |
+- insn[2] = 0xa7f40000 + /* j __jump_r1 */ |
450 |
+- (unsigned int)(u16) |
451 |
+- (((unsigned long) ij - 8 - |
452 |
+- (unsigned long) ip) / 2); |
453 |
++ char *jump_r1; |
454 |
++ |
455 |
++ jump_r1 = plt_base + me->arch.plt_size - |
456 |
++ PLT_ENTRY_SIZE; |
457 |
++ /* brcl 0xf,__jump_r1 */ |
458 |
++ *(short *)&insn[8] = 0xc0f4; |
459 |
++ *(int *)&insn[10] = (jump_r1 - (ip + 8)) / 2; |
460 |
+ } else { |
461 |
+- insn[2] = 0x07f10000; /* br %r1 */ |
462 |
++ *(int *)&insn[8] = 0x07f10000; /* br %r1 */ |
463 |
+ } |
464 |
+- insn[3] = (unsigned int) (val >> 32); |
465 |
+- insn[4] = (unsigned int) val; |
466 |
++ *(long *)&insn[14] = val; |
467 |
+ |
468 |
+ write(ip, insn, sizeof(insn)); |
469 |
+ info->plt_initialized = 1; |
470 |
+diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c |
471 |
+index ba26792d96731..03c8047bebb38 100644 |
472 |
+--- a/arch/x86/events/intel/uncore_snbep.c |
473 |
++++ b/arch/x86/events/intel/uncore_snbep.c |
474 |
+@@ -5239,7 +5239,7 @@ static struct intel_uncore_type icx_uncore_imc = { |
475 |
+ .fixed_ctr_bits = 48, |
476 |
+ .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR, |
477 |
+ .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL, |
478 |
+- .event_descs = hswep_uncore_imc_events, |
479 |
++ .event_descs = snr_uncore_imc_events, |
480 |
+ .perf_ctr = SNR_IMC_MMIO_PMON_CTR0, |
481 |
+ .event_ctl = SNR_IMC_MMIO_PMON_CTL0, |
482 |
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK, |
483 |
+diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c |
484 |
+index 0c6b02dd744c1..f73f1184b1c13 100644 |
485 |
+--- a/arch/x86/kernel/cpu/mce/amd.c |
486 |
++++ b/arch/x86/kernel/cpu/mce/amd.c |
487 |
+@@ -387,7 +387,7 @@ static void threshold_restart_bank(void *_tr) |
488 |
+ u32 hi, lo; |
489 |
+ |
490 |
+ /* sysfs write might race against an offline operation */ |
491 |
+- if (this_cpu_read(threshold_banks)) |
492 |
++ if (!this_cpu_read(threshold_banks) && !tr->set_lvt_off) |
493 |
+ return; |
494 |
+ |
495 |
+ rdmsr(tr->b->address, lo, hi); |
496 |
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c |
497 |
+index 5e1d7396a6b8a..2e6332af98aba 100644 |
498 |
+--- a/arch/x86/kvm/svm/svm.c |
499 |
++++ b/arch/x86/kvm/svm/svm.c |
500 |
+@@ -4146,13 +4146,6 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, void *insn, int i |
501 |
+ if (likely(!insn || insn_len)) |
502 |
+ return true; |
503 |
+ |
504 |
+- /* |
505 |
+- * If RIP is invalid, go ahead with emulation which will cause an |
506 |
+- * internal error exit. |
507 |
+- */ |
508 |
+- if (!kvm_vcpu_gfn_to_memslot(vcpu, kvm_rip_read(vcpu) >> PAGE_SHIFT)) |
509 |
+- return true; |
510 |
+- |
511 |
+ cr4 = kvm_read_cr4(vcpu); |
512 |
+ smep = cr4 & X86_CR4_SMEP; |
513 |
+ smap = cr4 & X86_CR4_SMAP; |
514 |
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c |
515 |
+index 271669dc8d90a..7871b8e84b368 100644 |
516 |
+--- a/arch/x86/kvm/x86.c |
517 |
++++ b/arch/x86/kvm/x86.c |
518 |
+@@ -3171,6 +3171,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
519 |
+ if (data & ~supported_xss) |
520 |
+ return 1; |
521 |
+ vcpu->arch.ia32_xss = data; |
522 |
++ kvm_update_cpuid_runtime(vcpu); |
523 |
+ break; |
524 |
+ case MSR_SMI_COUNT: |
525 |
+ if (!msr_info->host_initiated) |
526 |
+diff --git a/block/bio.c b/block/bio.c |
527 |
+index 0703a208ca248..f8d26ce7b61b0 100644 |
528 |
+--- a/block/bio.c |
529 |
++++ b/block/bio.c |
530 |
+@@ -575,7 +575,8 @@ void bio_truncate(struct bio *bio, unsigned new_size) |
531 |
+ offset = new_size - done; |
532 |
+ else |
533 |
+ offset = 0; |
534 |
+- zero_user(bv.bv_page, offset, bv.bv_len - offset); |
535 |
++ zero_user(bv.bv_page, bv.bv_offset + offset, |
536 |
++ bv.bv_len - offset); |
537 |
+ truncated = true; |
538 |
+ } |
539 |
+ done += bv.bv_len; |
540 |
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c |
541 |
+index 847f33ffc4aed..9fa86288b78a9 100644 |
542 |
+--- a/drivers/firmware/efi/efi.c |
543 |
++++ b/drivers/firmware/efi/efi.c |
544 |
+@@ -719,6 +719,13 @@ void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr, |
545 |
+ systab_hdr->revision >> 16, |
546 |
+ systab_hdr->revision & 0xffff, |
547 |
+ vendor); |
548 |
++ |
549 |
++ if (IS_ENABLED(CONFIG_X86_64) && |
550 |
++ systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION && |
551 |
++ !strcmp(vendor, "Apple")) { |
552 |
++ pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n"); |
553 |
++ efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION; |
554 |
++ } |
555 |
+ } |
556 |
+ |
557 |
+ static __initdata char memory_type_name[][13] = { |
558 |
+diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c |
559 |
+index c1b57dfb12776..415a971e76947 100644 |
560 |
+--- a/drivers/firmware/efi/libstub/arm64-stub.c |
561 |
++++ b/drivers/firmware/efi/libstub/arm64-stub.c |
562 |
+@@ -119,9 +119,9 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, |
563 |
+ if (image->image_base != _text) |
564 |
+ efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n"); |
565 |
+ |
566 |
+- if (!IS_ALIGNED((u64)_text, EFI_KIMG_ALIGN)) |
567 |
+- efi_err("FIRMWARE BUG: kernel image not aligned on %ldk boundary\n", |
568 |
+- EFI_KIMG_ALIGN >> 10); |
569 |
++ if (!IS_ALIGNED((u64)_text, SEGMENT_ALIGN)) |
570 |
++ efi_err("FIRMWARE BUG: kernel image not aligned on %dk boundary\n", |
571 |
++ SEGMENT_ALIGN >> 10); |
572 |
+ |
573 |
+ kernel_size = _edata - _text; |
574 |
+ kernel_memsize = kernel_size + (_end - _edata); |
575 |
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c |
576 |
+index ed2c50011d445..ddf539f26f2da 100644 |
577 |
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c |
578 |
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c |
579 |
+@@ -469,8 +469,8 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, |
580 |
+ return -EINVAL; |
581 |
+ } |
582 |
+ |
583 |
+- if (args->stream_size > SZ_64K || args->nr_relocs > SZ_64K || |
584 |
+- args->nr_bos > SZ_64K || args->nr_pmrs > 128) { |
585 |
++ if (args->stream_size > SZ_128K || args->nr_relocs > SZ_128K || |
586 |
++ args->nr_bos > SZ_128K || args->nr_pmrs > 128) { |
587 |
+ DRM_ERROR("submit arguments out of size limits\n"); |
588 |
+ return -EINVAL; |
589 |
+ } |
590 |
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c |
591 |
+index a7a24539921f3..a6efc11eba93f 100644 |
592 |
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c |
593 |
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c |
594 |
+@@ -26,9 +26,16 @@ static void dpu_setup_dspp_pcc(struct dpu_hw_dspp *ctx, |
595 |
+ struct dpu_hw_pcc_cfg *cfg) |
596 |
+ { |
597 |
+ |
598 |
+- u32 base = ctx->cap->sblk->pcc.base; |
599 |
++ u32 base; |
600 |
+ |
601 |
+- if (!ctx || !base) { |
602 |
++ if (!ctx) { |
603 |
++ DRM_ERROR("invalid ctx %pK\n", ctx); |
604 |
++ return; |
605 |
++ } |
606 |
++ |
607 |
++ base = ctx->cap->sblk->pcc.base; |
608 |
++ |
609 |
++ if (!base) { |
610 |
+ DRM_ERROR("invalid ctx %pK pcc base 0x%x\n", ctx, base); |
611 |
+ return; |
612 |
+ } |
613 |
+diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c |
614 |
+index 1adead764feed..f845333593daa 100644 |
615 |
+--- a/drivers/gpu/drm/msm/dsi/dsi.c |
616 |
++++ b/drivers/gpu/drm/msm/dsi/dsi.c |
617 |
+@@ -33,7 +33,12 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi) |
618 |
+ |
619 |
+ of_node_put(phy_node); |
620 |
+ |
621 |
+- if (!phy_pdev || !msm_dsi->phy) { |
622 |
++ if (!phy_pdev) { |
623 |
++ DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__); |
624 |
++ return -EPROBE_DEFER; |
625 |
++ } |
626 |
++ if (!msm_dsi->phy) { |
627 |
++ put_device(&phy_pdev->dev); |
628 |
+ DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__); |
629 |
+ return -EPROBE_DEFER; |
630 |
+ } |
631 |
+diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c |
632 |
+index e8c1a727179cc..e07986ab52c22 100644 |
633 |
+--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c |
634 |
++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c |
635 |
+@@ -769,12 +769,14 @@ void __exit msm_dsi_phy_driver_unregister(void) |
636 |
+ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, |
637 |
+ struct msm_dsi_phy_clk_request *clk_req) |
638 |
+ { |
639 |
+- struct device *dev = &phy->pdev->dev; |
640 |
++ struct device *dev; |
641 |
+ int ret; |
642 |
+ |
643 |
+ if (!phy || !phy->cfg->ops.enable) |
644 |
+ return -EINVAL; |
645 |
+ |
646 |
++ dev = &phy->pdev->dev; |
647 |
++ |
648 |
+ ret = dsi_phy_enable_resource(phy); |
649 |
+ if (ret) { |
650 |
+ DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n", |
651 |
+diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c |
652 |
+index 737453b6e5966..94f948ef279d1 100644 |
653 |
+--- a/drivers/gpu/drm/msm/hdmi/hdmi.c |
654 |
++++ b/drivers/gpu/drm/msm/hdmi/hdmi.c |
655 |
+@@ -97,10 +97,15 @@ static int msm_hdmi_get_phy(struct hdmi *hdmi) |
656 |
+ |
657 |
+ of_node_put(phy_node); |
658 |
+ |
659 |
+- if (!phy_pdev || !hdmi->phy) { |
660 |
++ if (!phy_pdev) { |
661 |
+ DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n"); |
662 |
+ return -EPROBE_DEFER; |
663 |
+ } |
664 |
++ if (!hdmi->phy) { |
665 |
++ DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n"); |
666 |
++ put_device(&phy_pdev->dev); |
667 |
++ return -EPROBE_DEFER; |
668 |
++ } |
669 |
+ |
670 |
+ hdmi->phy_dev = get_device(&phy_pdev->dev); |
671 |
+ |
672 |
+diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c |
673 |
+index 33e42b2f9cfcb..e37e5afc680a2 100644 |
674 |
+--- a/drivers/gpu/drm/msm/msm_drv.c |
675 |
++++ b/drivers/gpu/drm/msm/msm_drv.c |
676 |
+@@ -350,7 +350,7 @@ static int msm_init_vram(struct drm_device *dev) |
677 |
+ of_node_put(node); |
678 |
+ if (ret) |
679 |
+ return ret; |
680 |
+- size = r.end - r.start; |
681 |
++ size = r.end - r.start + 1; |
682 |
+ DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start); |
683 |
+ |
684 |
+ /* if we have no IOMMU, then we need to use carveout allocator. |
685 |
+diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c |
686 |
+index 959446b0137bc..a7142c32889c0 100644 |
687 |
+--- a/drivers/hwmon/lm90.c |
688 |
++++ b/drivers/hwmon/lm90.c |
689 |
+@@ -373,7 +373,7 @@ static const struct lm90_params lm90_params[] = { |
690 |
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT |
691 |
+ | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_CRIT, |
692 |
+ .alert_alarms = 0x7c, |
693 |
+- .max_convrate = 8, |
694 |
++ .max_convrate = 7, |
695 |
+ }, |
696 |
+ [lm86] = { |
697 |
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT |
698 |
+@@ -394,12 +394,13 @@ static const struct lm90_params lm90_params[] = { |
699 |
+ .max_convrate = 9, |
700 |
+ }, |
701 |
+ [max6646] = { |
702 |
+- .flags = LM90_HAVE_CRIT, |
703 |
++ .flags = LM90_HAVE_CRIT | LM90_HAVE_BROKEN_ALERT, |
704 |
+ .alert_alarms = 0x7c, |
705 |
+ .max_convrate = 6, |
706 |
+ .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL, |
707 |
+ }, |
708 |
+ [max6654] = { |
709 |
++ .flags = LM90_HAVE_BROKEN_ALERT, |
710 |
+ .alert_alarms = 0x7c, |
711 |
+ .max_convrate = 7, |
712 |
+ .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL, |
713 |
+@@ -418,7 +419,7 @@ static const struct lm90_params lm90_params[] = { |
714 |
+ }, |
715 |
+ [max6680] = { |
716 |
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_CRIT |
717 |
+- | LM90_HAVE_CRIT_ALRM_SWP, |
718 |
++ | LM90_HAVE_CRIT_ALRM_SWP | LM90_HAVE_BROKEN_ALERT, |
719 |
+ .alert_alarms = 0x7c, |
720 |
+ .max_convrate = 7, |
721 |
+ }, |
722 |
+diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c |
723 |
+index 1d621f7769035..62d11c6e41d60 100644 |
724 |
+--- a/drivers/media/platform/qcom/venus/core.c |
725 |
++++ b/drivers/media/platform/qcom/venus/core.c |
726 |
+@@ -375,8 +375,6 @@ static int venus_remove(struct platform_device *pdev) |
727 |
+ |
728 |
+ hfi_destroy(core); |
729 |
+ |
730 |
+- v4l2_device_unregister(&core->v4l2_dev); |
731 |
+- |
732 |
+ mutex_destroy(&core->pm_lock); |
733 |
+ mutex_destroy(&core->lock); |
734 |
+ venus_dbgfs_deinit(core); |
735 |
+diff --git a/drivers/mtd/nand/raw/mpc5121_nfc.c b/drivers/mtd/nand/raw/mpc5121_nfc.c |
736 |
+index cb293c50acb87..5b9271b9c3265 100644 |
737 |
+--- a/drivers/mtd/nand/raw/mpc5121_nfc.c |
738 |
++++ b/drivers/mtd/nand/raw/mpc5121_nfc.c |
739 |
+@@ -291,7 +291,6 @@ static int ads5121_chipselect_init(struct mtd_info *mtd) |
740 |
+ /* Control chips select signal on ADS5121 board */ |
741 |
+ static void ads5121_select_chip(struct nand_chip *nand, int chip) |
742 |
+ { |
743 |
+- struct mtd_info *mtd = nand_to_mtd(nand); |
744 |
+ struct mpc5121_nfc_prv *prv = nand_get_controller_data(nand); |
745 |
+ u8 v; |
746 |
+ |
747 |
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c |
748 |
+index 6e7da1dc2e8c3..d6580e942724d 100644 |
749 |
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c |
750 |
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c |
751 |
+@@ -2382,8 +2382,7 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) |
752 |
+ break; |
753 |
+ } |
754 |
+ |
755 |
+- if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) |
756 |
+- hclgevf_enable_vector(&hdev->misc_vector, true); |
757 |
++ hclgevf_enable_vector(&hdev->misc_vector, true); |
758 |
+ |
759 |
+ return IRQ_HANDLED; |
760 |
+ } |
761 |
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c |
762 |
+index 4f99d97638248..c7be7ab131b19 100644 |
763 |
+--- a/drivers/net/ethernet/ibm/ibmvnic.c |
764 |
++++ b/drivers/net/ethernet/ibm/ibmvnic.c |
765 |
+@@ -3401,11 +3401,25 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry) |
766 |
+ struct device *dev = &adapter->vdev->dev; |
767 |
+ union ibmvnic_crq crq; |
768 |
+ int max_entries; |
769 |
++ int cap_reqs; |
770 |
++ |
771 |
++ /* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on |
772 |
++ * the PROMISC flag). Initialize this count upfront. When the tasklet |
773 |
++ * receives a response to all of these, it will send the next protocol |
774 |
++ * message (QUERY_IP_OFFLOAD). |
775 |
++ */ |
776 |
++ if (!(adapter->netdev->flags & IFF_PROMISC) || |
777 |
++ adapter->promisc_supported) |
778 |
++ cap_reqs = 7; |
779 |
++ else |
780 |
++ cap_reqs = 6; |
781 |
+ |
782 |
+ if (!retry) { |
783 |
+ /* Sub-CRQ entries are 32 byte long */ |
784 |
+ int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4); |
785 |
+ |
786 |
++ atomic_set(&adapter->running_cap_crqs, cap_reqs); |
787 |
++ |
788 |
+ if (adapter->min_tx_entries_per_subcrq > entries_page || |
789 |
+ adapter->min_rx_add_entries_per_subcrq > entries_page) { |
790 |
+ dev_err(dev, "Fatal, invalid entries per sub-crq\n"); |
791 |
+@@ -3466,44 +3480,45 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry) |
792 |
+ adapter->opt_rx_comp_queues; |
793 |
+ |
794 |
+ adapter->req_rx_add_queues = adapter->max_rx_add_queues; |
795 |
++ } else { |
796 |
++ atomic_add(cap_reqs, &adapter->running_cap_crqs); |
797 |
+ } |
798 |
+- |
799 |
+ memset(&crq, 0, sizeof(crq)); |
800 |
+ crq.request_capability.first = IBMVNIC_CRQ_CMD; |
801 |
+ crq.request_capability.cmd = REQUEST_CAPABILITY; |
802 |
+ |
803 |
+ crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES); |
804 |
+ crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues); |
805 |
+- atomic_inc(&adapter->running_cap_crqs); |
806 |
++ cap_reqs--; |
807 |
+ ibmvnic_send_crq(adapter, &crq); |
808 |
+ |
809 |
+ crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES); |
810 |
+ crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues); |
811 |
+- atomic_inc(&adapter->running_cap_crqs); |
812 |
++ cap_reqs--; |
813 |
+ ibmvnic_send_crq(adapter, &crq); |
814 |
+ |
815 |
+ crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES); |
816 |
+ crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues); |
817 |
+- atomic_inc(&adapter->running_cap_crqs); |
818 |
++ cap_reqs--; |
819 |
+ ibmvnic_send_crq(adapter, &crq); |
820 |
+ |
821 |
+ crq.request_capability.capability = |
822 |
+ cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ); |
823 |
+ crq.request_capability.number = |
824 |
+ cpu_to_be64(adapter->req_tx_entries_per_subcrq); |
825 |
+- atomic_inc(&adapter->running_cap_crqs); |
826 |
++ cap_reqs--; |
827 |
+ ibmvnic_send_crq(adapter, &crq); |
828 |
+ |
829 |
+ crq.request_capability.capability = |
830 |
+ cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ); |
831 |
+ crq.request_capability.number = |
832 |
+ cpu_to_be64(adapter->req_rx_add_entries_per_subcrq); |
833 |
+- atomic_inc(&adapter->running_cap_crqs); |
834 |
++ cap_reqs--; |
835 |
+ ibmvnic_send_crq(adapter, &crq); |
836 |
+ |
837 |
+ crq.request_capability.capability = cpu_to_be16(REQ_MTU); |
838 |
+ crq.request_capability.number = cpu_to_be64(adapter->req_mtu); |
839 |
+- atomic_inc(&adapter->running_cap_crqs); |
840 |
++ cap_reqs--; |
841 |
+ ibmvnic_send_crq(adapter, &crq); |
842 |
+ |
843 |
+ if (adapter->netdev->flags & IFF_PROMISC) { |
844 |
+@@ -3511,16 +3526,21 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry) |
845 |
+ crq.request_capability.capability = |
846 |
+ cpu_to_be16(PROMISC_REQUESTED); |
847 |
+ crq.request_capability.number = cpu_to_be64(1); |
848 |
+- atomic_inc(&adapter->running_cap_crqs); |
849 |
++ cap_reqs--; |
850 |
+ ibmvnic_send_crq(adapter, &crq); |
851 |
+ } |
852 |
+ } else { |
853 |
+ crq.request_capability.capability = |
854 |
+ cpu_to_be16(PROMISC_REQUESTED); |
855 |
+ crq.request_capability.number = cpu_to_be64(0); |
856 |
+- atomic_inc(&adapter->running_cap_crqs); |
857 |
++ cap_reqs--; |
858 |
+ ibmvnic_send_crq(adapter, &crq); |
859 |
+ } |
860 |
++ |
861 |
++ /* Keep at end to catch any discrepancy between expected and actual |
862 |
++ * CRQs sent. |
863 |
++ */ |
864 |
++ WARN_ON(cap_reqs != 0); |
865 |
+ } |
866 |
+ |
867 |
+ static int pending_scrq(struct ibmvnic_adapter *adapter, |
868 |
+@@ -3953,118 +3973,132 @@ static void send_query_map(struct ibmvnic_adapter *adapter) |
869 |
+ static void send_query_cap(struct ibmvnic_adapter *adapter) |
870 |
+ { |
871 |
+ union ibmvnic_crq crq; |
872 |
++ int cap_reqs; |
873 |
++ |
874 |
++ /* We send out 25 QUERY_CAPABILITY CRQs below. Initialize this count |
875 |
++ * upfront. When the tasklet receives a response to all of these, it |
876 |
++ * can send out the next protocol messaage (REQUEST_CAPABILITY). |
877 |
++ */ |
878 |
++ cap_reqs = 25; |
879 |
++ |
880 |
++ atomic_set(&adapter->running_cap_crqs, cap_reqs); |
881 |
+ |
882 |
+- atomic_set(&adapter->running_cap_crqs, 0); |
883 |
+ memset(&crq, 0, sizeof(crq)); |
884 |
+ crq.query_capability.first = IBMVNIC_CRQ_CMD; |
885 |
+ crq.query_capability.cmd = QUERY_CAPABILITY; |
886 |
+ |
887 |
+ crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES); |
888 |
+- atomic_inc(&adapter->running_cap_crqs); |
889 |
+ ibmvnic_send_crq(adapter, &crq); |
890 |
++ cap_reqs--; |
891 |
+ |
892 |
+ crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES); |
893 |
+- atomic_inc(&adapter->running_cap_crqs); |
894 |
+ ibmvnic_send_crq(adapter, &crq); |
895 |
++ cap_reqs--; |
896 |
+ |
897 |
+ crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES); |
898 |
+- atomic_inc(&adapter->running_cap_crqs); |
899 |
+ ibmvnic_send_crq(adapter, &crq); |
900 |
++ cap_reqs--; |
901 |
+ |
902 |
+ crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES); |
903 |
+- atomic_inc(&adapter->running_cap_crqs); |
904 |
+ ibmvnic_send_crq(adapter, &crq); |
905 |
++ cap_reqs--; |
906 |
+ |
907 |
+ crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES); |
908 |
+- atomic_inc(&adapter->running_cap_crqs); |
909 |
+ ibmvnic_send_crq(adapter, &crq); |
910 |
++ cap_reqs--; |
911 |
+ |
912 |
+ crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES); |
913 |
+- atomic_inc(&adapter->running_cap_crqs); |
914 |
+ ibmvnic_send_crq(adapter, &crq); |
915 |
++ cap_reqs--; |
916 |
+ |
917 |
+ crq.query_capability.capability = |
918 |
+ cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ); |
919 |
+- atomic_inc(&adapter->running_cap_crqs); |
920 |
+ ibmvnic_send_crq(adapter, &crq); |
921 |
++ cap_reqs--; |
922 |
+ |
923 |
+ crq.query_capability.capability = |
924 |
+ cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ); |
925 |
+- atomic_inc(&adapter->running_cap_crqs); |
926 |
+ ibmvnic_send_crq(adapter, &crq); |
927 |
++ cap_reqs--; |
928 |
+ |
929 |
+ crq.query_capability.capability = |
930 |
+ cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ); |
931 |
+- atomic_inc(&adapter->running_cap_crqs); |
932 |
+ ibmvnic_send_crq(adapter, &crq); |
933 |
++ cap_reqs--; |
934 |
+ |
935 |
+ crq.query_capability.capability = |
936 |
+ cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ); |
937 |
+- atomic_inc(&adapter->running_cap_crqs); |
938 |
+ ibmvnic_send_crq(adapter, &crq); |
939 |
++ cap_reqs--; |
940 |
+ |
941 |
+ crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD); |
942 |
+- atomic_inc(&adapter->running_cap_crqs); |
943 |
+ ibmvnic_send_crq(adapter, &crq); |
944 |
++ cap_reqs--; |
945 |
+ |
946 |
+ crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED); |
947 |
+- atomic_inc(&adapter->running_cap_crqs); |
948 |
+ ibmvnic_send_crq(adapter, &crq); |
949 |
++ cap_reqs--; |
950 |
+ |
951 |
+ crq.query_capability.capability = cpu_to_be16(MIN_MTU); |
952 |
+- atomic_inc(&adapter->running_cap_crqs); |
953 |
+ ibmvnic_send_crq(adapter, &crq); |
954 |
++ cap_reqs--; |
955 |
+ |
956 |
+ crq.query_capability.capability = cpu_to_be16(MAX_MTU); |
957 |
+- atomic_inc(&adapter->running_cap_crqs); |
958 |
+ ibmvnic_send_crq(adapter, &crq); |
959 |
++ cap_reqs--; |
960 |
+ |
961 |
+ crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS); |
962 |
+- atomic_inc(&adapter->running_cap_crqs); |
963 |
+ ibmvnic_send_crq(adapter, &crq); |
964 |
++ cap_reqs--; |
965 |
+ |
966 |
+ crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION); |
967 |
+- atomic_inc(&adapter->running_cap_crqs); |
968 |
+ ibmvnic_send_crq(adapter, &crq); |
969 |
++ cap_reqs--; |
970 |
+ |
971 |
+ crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION); |
972 |
+- atomic_inc(&adapter->running_cap_crqs); |
973 |
+ ibmvnic_send_crq(adapter, &crq); |
974 |
++ cap_reqs--; |
975 |
+ |
976 |
+ crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES); |
977 |
+- atomic_inc(&adapter->running_cap_crqs); |
978 |
+ ibmvnic_send_crq(adapter, &crq); |
979 |
++ cap_reqs--; |
980 |
+ |
981 |
+ crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED); |
982 |
+- atomic_inc(&adapter->running_cap_crqs); |
983 |
+ ibmvnic_send_crq(adapter, &crq); |
984 |
++ cap_reqs--; |
985 |
+ |
986 |
+ crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES); |
987 |
+- atomic_inc(&adapter->running_cap_crqs); |
988 |
+ ibmvnic_send_crq(adapter, &crq); |
989 |
++ cap_reqs--; |
990 |
+ |
991 |
+ crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES); |
992 |
+- atomic_inc(&adapter->running_cap_crqs); |
993 |
+ ibmvnic_send_crq(adapter, &crq); |
994 |
++ cap_reqs--; |
995 |
+ |
996 |
+ crq.query_capability.capability = |
997 |
+ cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q); |
998 |
+- atomic_inc(&adapter->running_cap_crqs); |
999 |
+ ibmvnic_send_crq(adapter, &crq); |
1000 |
++ cap_reqs--; |
1001 |
+ |
1002 |
+ crq.query_capability.capability = |
1003 |
+ cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ); |
1004 |
+- atomic_inc(&adapter->running_cap_crqs); |
1005 |
+ ibmvnic_send_crq(adapter, &crq); |
1006 |
++ cap_reqs--; |
1007 |
+ |
1008 |
+ crq.query_capability.capability = |
1009 |
+ cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ); |
1010 |
+- atomic_inc(&adapter->running_cap_crqs); |
1011 |
+ ibmvnic_send_crq(adapter, &crq); |
1012 |
++ cap_reqs--; |
1013 |
+ |
1014 |
+ crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ); |
1015 |
+- atomic_inc(&adapter->running_cap_crqs); |
1016 |
++ |
1017 |
+ ibmvnic_send_crq(adapter, &crq); |
1018 |
++ cap_reqs--; |
1019 |
++ |
1020 |
++ /* Keep at end to catch any discrepancy between expected and actual |
1021 |
++ * CRQs sent. |
1022 |
++ */ |
1023 |
++ WARN_ON(cap_reqs != 0); |
1024 |
+ } |
1025 |
+ |
1026 |
+ static void send_query_ip_offload(struct ibmvnic_adapter *adapter) |
1027 |
+@@ -4369,6 +4403,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq, |
1028 |
+ char *name; |
1029 |
+ |
1030 |
+ atomic_dec(&adapter->running_cap_crqs); |
1031 |
++ netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n", |
1032 |
++ atomic_read(&adapter->running_cap_crqs)); |
1033 |
+ switch (be16_to_cpu(crq->request_capability_rsp.capability)) { |
1034 |
+ case REQ_TX_QUEUES: |
1035 |
+ req_value = &adapter->req_tx_queues; |
1036 |
+@@ -5039,12 +5075,6 @@ static void ibmvnic_tasklet(struct tasklet_struct *t) |
1037 |
+ ibmvnic_handle_crq(crq, adapter); |
1038 |
+ crq->generic.first = 0; |
1039 |
+ } |
1040 |
+- |
1041 |
+- /* remain in tasklet until all |
1042 |
+- * capabilities responses are received |
1043 |
+- */ |
1044 |
+- if (!adapter->wait_capability) |
1045 |
+- done = true; |
1046 |
+ } |
1047 |
+ /* if capabilities CRQ's were sent in this tasklet, the following |
1048 |
+ * tasklet must wait until all responses are received |
1049 |
+diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h |
1050 |
+index 5b83d1bc0e74d..effdc3361266f 100644 |
1051 |
+--- a/drivers/net/ethernet/intel/i40e/i40e.h |
1052 |
++++ b/drivers/net/ethernet/intel/i40e/i40e.h |
1053 |
+@@ -172,7 +172,6 @@ enum i40e_interrupt_policy { |
1054 |
+ |
1055 |
+ struct i40e_lump_tracking { |
1056 |
+ u16 num_entries; |
1057 |
+- u16 search_hint; |
1058 |
+ u16 list[0]; |
1059 |
+ #define I40E_PILE_VALID_BIT 0x8000 |
1060 |
+ #define I40E_IWARP_IRQ_PILE_ID (I40E_PILE_VALID_BIT - 2) |
1061 |
+@@ -755,12 +754,12 @@ struct i40e_vsi { |
1062 |
+ struct rtnl_link_stats64 net_stats_offsets; |
1063 |
+ struct i40e_eth_stats eth_stats; |
1064 |
+ struct i40e_eth_stats eth_stats_offsets; |
1065 |
+- u32 tx_restart; |
1066 |
+- u32 tx_busy; |
1067 |
++ u64 tx_restart; |
1068 |
++ u64 tx_busy; |
1069 |
+ u64 tx_linearize; |
1070 |
+ u64 tx_force_wb; |
1071 |
+- u32 rx_buf_failed; |
1072 |
+- u32 rx_page_failed; |
1073 |
++ u64 rx_buf_failed; |
1074 |
++ u64 rx_page_failed; |
1075 |
+ |
1076 |
+ /* These are containers of ring pointers, allocated at run-time */ |
1077 |
+ struct i40e_ring **rx_rings; |
1078 |
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c |
1079 |
+index 714b578b2b49c..1114a15a9ce3c 100644 |
1080 |
+--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c |
1081 |
++++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c |
1082 |
+@@ -240,7 +240,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) |
1083 |
+ (unsigned long int)vsi->net_stats_offsets.rx_compressed, |
1084 |
+ (unsigned long int)vsi->net_stats_offsets.tx_compressed); |
1085 |
+ dev_info(&pf->pdev->dev, |
1086 |
+- " tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n", |
1087 |
++ " tx_restart = %llu, tx_busy = %llu, rx_buf_failed = %llu, rx_page_failed = %llu\n", |
1088 |
+ vsi->tx_restart, vsi->tx_busy, |
1089 |
+ vsi->rx_buf_failed, vsi->rx_page_failed); |
1090 |
+ rcu_read_lock(); |
1091 |
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c |
1092 |
+index f888a443a067b..bd18a780a0008 100644 |
1093 |
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c |
1094 |
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c |
1095 |
+@@ -195,10 +195,6 @@ int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem) |
1096 |
+ * @id: an owner id to stick on the items assigned |
1097 |
+ * |
1098 |
+ * Returns the base item index of the lump, or negative for error |
1099 |
+- * |
1100 |
+- * The search_hint trick and lack of advanced fit-finding only work |
1101 |
+- * because we're highly likely to have all the same size lump requests. |
1102 |
+- * Linear search time and any fragmentation should be minimal. |
1103 |
+ **/ |
1104 |
+ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, |
1105 |
+ u16 needed, u16 id) |
1106 |
+@@ -213,8 +209,21 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, |
1107 |
+ return -EINVAL; |
1108 |
+ } |
1109 |
+ |
1110 |
+- /* start the linear search with an imperfect hint */ |
1111 |
+- i = pile->search_hint; |
1112 |
++ /* Allocate last queue in the pile for FDIR VSI queue |
1113 |
++ * so it doesn't fragment the qp_pile |
1114 |
++ */ |
1115 |
++ if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) { |
1116 |
++ if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) { |
1117 |
++ dev_err(&pf->pdev->dev, |
1118 |
++ "Cannot allocate queue %d for I40E_VSI_FDIR\n", |
1119 |
++ pile->num_entries - 1); |
1120 |
++ return -ENOMEM; |
1121 |
++ } |
1122 |
++ pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT; |
1123 |
++ return pile->num_entries - 1; |
1124 |
++ } |
1125 |
++ |
1126 |
++ i = 0; |
1127 |
+ while (i < pile->num_entries) { |
1128 |
+ /* skip already allocated entries */ |
1129 |
+ if (pile->list[i] & I40E_PILE_VALID_BIT) { |
1130 |
+@@ -233,7 +242,6 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, |
1131 |
+ for (j = 0; j < needed; j++) |
1132 |
+ pile->list[i+j] = id | I40E_PILE_VALID_BIT; |
1133 |
+ ret = i; |
1134 |
+- pile->search_hint = i + j; |
1135 |
+ break; |
1136 |
+ } |
1137 |
+ |
1138 |
+@@ -256,7 +264,7 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id) |
1139 |
+ { |
1140 |
+ int valid_id = (id | I40E_PILE_VALID_BIT); |
1141 |
+ int count = 0; |
1142 |
+- int i; |
1143 |
++ u16 i; |
1144 |
+ |
1145 |
+ if (!pile || index >= pile->num_entries) |
1146 |
+ return -EINVAL; |
1147 |
+@@ -268,8 +276,6 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id) |
1148 |
+ count++; |
1149 |
+ } |
1150 |
+ |
1151 |
+- if (count && index < pile->search_hint) |
1152 |
+- pile->search_hint = index; |
1153 |
+ |
1154 |
+ return count; |
1155 |
+ } |
1156 |
+@@ -771,9 +777,9 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) |
1157 |
+ struct rtnl_link_stats64 *ns; /* netdev stats */ |
1158 |
+ struct i40e_eth_stats *oes; |
1159 |
+ struct i40e_eth_stats *es; /* device's eth stats */ |
1160 |
+- u32 tx_restart, tx_busy; |
1161 |
++ u64 tx_restart, tx_busy; |
1162 |
+ struct i40e_ring *p; |
1163 |
+- u32 rx_page, rx_buf; |
1164 |
++ u64 rx_page, rx_buf; |
1165 |
+ u64 bytes, packets; |
1166 |
+ unsigned int start; |
1167 |
+ u64 tx_linearize; |
1168 |
+@@ -10130,15 +10136,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) |
1169 |
+ } |
1170 |
+ i40e_get_oem_version(&pf->hw); |
1171 |
+ |
1172 |
+- if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) && |
1173 |
+- ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) || |
1174 |
+- hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) { |
1175 |
+- /* The following delay is necessary for 4.33 firmware and older |
1176 |
+- * to recover after EMP reset. 200 ms should suffice but we |
1177 |
+- * put here 300 ms to be sure that FW is ready to operate |
1178 |
+- * after reset. |
1179 |
+- */ |
1180 |
+- mdelay(300); |
1181 |
++ if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) { |
1182 |
++ /* The following delay is necessary for firmware update. */ |
1183 |
++ mdelay(1000); |
1184 |
+ } |
1185 |
+ |
1186 |
+ /* re-verify the eeprom if we just had an EMP reset */ |
1187 |
+@@ -11327,7 +11327,6 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf) |
1188 |
+ return -ENOMEM; |
1189 |
+ |
1190 |
+ pf->irq_pile->num_entries = vectors; |
1191 |
+- pf->irq_pile->search_hint = 0; |
1192 |
+ |
1193 |
+ /* track first vector for misc interrupts, ignore return */ |
1194 |
+ (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1); |
1195 |
+@@ -12130,7 +12129,6 @@ static int i40e_sw_init(struct i40e_pf *pf) |
1196 |
+ goto sw_init_done; |
1197 |
+ } |
1198 |
+ pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; |
1199 |
+- pf->qp_pile->search_hint = 0; |
1200 |
+ |
1201 |
+ pf->tx_timeout_recovery_level = 1; |
1202 |
+ |
1203 |
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h |
1204 |
+index 564df22f3f463..8335f151ceefc 100644 |
1205 |
+--- a/drivers/net/ethernet/intel/i40e/i40e_register.h |
1206 |
++++ b/drivers/net/ethernet/intel/i40e/i40e_register.h |
1207 |
+@@ -279,6 +279,9 @@ |
1208 |
+ #define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ |
1209 |
+ #define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1 |
1210 |
+ #define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT) |
1211 |
++#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30 |
1212 |
++#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT) |
1213 |
++#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ |
1214 |
+ #define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ |
1215 |
+ #define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0 |
1216 |
+ #define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11 |
1217 |
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c |
1218 |
+index 65c4c4fd359fa..f71b7334e2955 100644 |
1219 |
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c |
1220 |
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c |
1221 |
+@@ -1323,6 +1323,32 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf, |
1222 |
+ return aq_ret; |
1223 |
+ } |
1224 |
+ |
1225 |
++/** |
1226 |
++ * i40e_sync_vfr_reset |
1227 |
++ * @hw: pointer to hw struct |
1228 |
++ * @vf_id: VF identifier |
1229 |
++ * |
1230 |
++ * Before trigger hardware reset, we need to know if no other process has |
1231 |
++ * reserved the hardware for any reset operations. This check is done by |
1232 |
++ * examining the status of the RSTAT1 register used to signal the reset. |
1233 |
++ **/ |
1234 |
++static int i40e_sync_vfr_reset(struct i40e_hw *hw, int vf_id) |
1235 |
++{ |
1236 |
++ u32 reg; |
1237 |
++ int i; |
1238 |
++ |
1239 |
++ for (i = 0; i < I40E_VFR_WAIT_COUNT; i++) { |
1240 |
++ reg = rd32(hw, I40E_VFINT_ICR0_ENA(vf_id)) & |
1241 |
++ I40E_VFINT_ICR0_ADMINQ_MASK; |
1242 |
++ if (reg) |
1243 |
++ return 0; |
1244 |
++ |
1245 |
++ usleep_range(100, 200); |
1246 |
++ } |
1247 |
++ |
1248 |
++ return -EAGAIN; |
1249 |
++} |
1250 |
++ |
1251 |
+ /** |
1252 |
+ * i40e_trigger_vf_reset |
1253 |
+ * @vf: pointer to the VF structure |
1254 |
+@@ -1337,9 +1363,11 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr) |
1255 |
+ struct i40e_pf *pf = vf->pf; |
1256 |
+ struct i40e_hw *hw = &pf->hw; |
1257 |
+ u32 reg, reg_idx, bit_idx; |
1258 |
++ bool vf_active; |
1259 |
++ u32 radq; |
1260 |
+ |
1261 |
+ /* warn the VF */ |
1262 |
+- clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); |
1263 |
++ vf_active = test_and_clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); |
1264 |
+ |
1265 |
+ /* Disable VF's configuration API during reset. The flag is re-enabled |
1266 |
+ * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI. |
1267 |
+@@ -1353,7 +1381,19 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr) |
1268 |
+ * just need to clean up, so don't hit the VFRTRIG register. |
1269 |
+ */ |
1270 |
+ if (!flr) { |
1271 |
+- /* reset VF using VPGEN_VFRTRIG reg */ |
1272 |
++ /* Sync VFR reset before trigger next one */ |
1273 |
++ radq = rd32(hw, I40E_VFINT_ICR0_ENA(vf->vf_id)) & |
1274 |
++ I40E_VFINT_ICR0_ADMINQ_MASK; |
1275 |
++ if (vf_active && !radq) |
1276 |
++ /* waiting for finish reset by virtual driver */ |
1277 |
++ if (i40e_sync_vfr_reset(hw, vf->vf_id)) |
1278 |
++ dev_info(&pf->pdev->dev, |
1279 |
++ "Reset VF %d never finished\n", |
1280 |
++ vf->vf_id); |
1281 |
++ |
1282 |
++ /* Reset VF using VPGEN_VFRTRIG reg. It is also setting |
1283 |
++ * in progress state in rstat1 register. |
1284 |
++ */ |
1285 |
+ reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); |
1286 |
+ reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; |
1287 |
+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); |
1288 |
+@@ -2563,6 +2603,59 @@ error_param: |
1289 |
+ aq_ret); |
1290 |
+ } |
1291 |
+ |
1292 |
++/** |
1293 |
++ * i40e_check_enough_queue - find big enough queue number |
1294 |
++ * @vf: pointer to the VF info |
1295 |
++ * @needed: the number of items needed |
1296 |
++ * |
1297 |
++ * Returns the base item index of the queue, or negative for error |
1298 |
++ **/ |
1299 |
++static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed) |
1300 |
++{ |
1301 |
++ unsigned int i, cur_queues, more, pool_size; |
1302 |
++ struct i40e_lump_tracking *pile; |
1303 |
++ struct i40e_pf *pf = vf->pf; |
1304 |
++ struct i40e_vsi *vsi; |
1305 |
++ |
1306 |
++ vsi = pf->vsi[vf->lan_vsi_idx]; |
1307 |
++ cur_queues = vsi->alloc_queue_pairs; |
1308 |
++ |
1309 |
++ /* if current allocated queues are enough for need */ |
1310 |
++ if (cur_queues >= needed) |
1311 |
++ return vsi->base_queue; |
1312 |
++ |
1313 |
++ pile = pf->qp_pile; |
1314 |
++ if (cur_queues > 0) { |
1315 |
++ /* if the allocated queues are not zero |
1316 |
++ * just check if there are enough queues for more |
1317 |
++ * behind the allocated queues. |
1318 |
++ */ |
1319 |
++ more = needed - cur_queues; |
1320 |
++ for (i = vsi->base_queue + cur_queues; |
1321 |
++ i < pile->num_entries; i++) { |
1322 |
++ if (pile->list[i] & I40E_PILE_VALID_BIT) |
1323 |
++ break; |
1324 |
++ |
1325 |
++ if (more-- == 1) |
1326 |
++ /* there is enough */ |
1327 |
++ return vsi->base_queue; |
1328 |
++ } |
1329 |
++ } |
1330 |
++ |
1331 |
++ pool_size = 0; |
1332 |
++ for (i = 0; i < pile->num_entries; i++) { |
1333 |
++ if (pile->list[i] & I40E_PILE_VALID_BIT) { |
1334 |
++ pool_size = 0; |
1335 |
++ continue; |
1336 |
++ } |
1337 |
++ if (needed <= ++pool_size) |
1338 |
++ /* there is enough */ |
1339 |
++ return i; |
1340 |
++ } |
1341 |
++ |
1342 |
++ return -ENOMEM; |
1343 |
++} |
1344 |
++ |
1345 |
+ /** |
1346 |
+ * i40e_vc_request_queues_msg |
1347 |
+ * @vf: pointer to the VF info |
1348 |
+@@ -2597,6 +2690,12 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg) |
1349 |
+ req_pairs - cur_pairs, |
1350 |
+ pf->queues_left); |
1351 |
+ vfres->num_queue_pairs = pf->queues_left + cur_pairs; |
1352 |
++ } else if (i40e_check_enough_queue(vf, req_pairs) < 0) { |
1353 |
++ dev_warn(&pf->pdev->dev, |
1354 |
++ "VF %d requested %d more queues, but there is not enough for it.\n", |
1355 |
++ vf->vf_id, |
1356 |
++ req_pairs - cur_pairs); |
1357 |
++ vfres->num_queue_pairs = cur_pairs; |
1358 |
+ } else { |
1359 |
+ /* successful request */ |
1360 |
+ vf->num_req_queues = req_pairs; |
1361 |
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h |
1362 |
+index 49575a640a84c..03c42fd0fea19 100644 |
1363 |
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h |
1364 |
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h |
1365 |
+@@ -19,6 +19,7 @@ |
1366 |
+ #define I40E_MAX_VF_PROMISC_FLAGS 3 |
1367 |
+ |
1368 |
+ #define I40E_VF_STATE_WAIT_COUNT 20 |
1369 |
++#define I40E_VFR_WAIT_COUNT 100 |
1370 |
+ |
1371 |
+ /* Various queue ctrls */ |
1372 |
+ enum i40e_queue_ctrl { |
1373 |
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c |
1374 |
+index 044a5b1196acb..161174be51c31 100644 |
1375 |
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c |
1376 |
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c |
1377 |
+@@ -386,7 +386,12 @@ static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf, |
1378 |
+ dst_mdev->msg_size = mbox_hdr->msg_size; |
1379 |
+ dst_mdev->num_msgs = num_msgs; |
1380 |
+ err = otx2_sync_mbox_msg(dst_mbox); |
1381 |
+- if (err) { |
1382 |
++ /* Error code -EIO indicate there is a communication failure |
1383 |
++ * to the AF. Rest of the error codes indicate that AF processed |
1384 |
++ * VF messages and set the error codes in response messages |
1385 |
++ * (if any) so simply forward responses to VF. |
1386 |
++ */ |
1387 |
++ if (err == -EIO) { |
1388 |
+ dev_warn(pf->dev, |
1389 |
+ "AF not responding to VF%d messages\n", vf); |
1390 |
+ /* restore PF mbase and exit */ |
1391 |
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
1392 |
+index a8c5492cb39be..6d8a839fab22e 100644 |
1393 |
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
1394 |
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
1395 |
+@@ -816,8 +816,6 @@ static int stmmac_init_ptp(struct stmmac_priv *priv) |
1396 |
+ priv->hwts_tx_en = 0; |
1397 |
+ priv->hwts_rx_en = 0; |
1398 |
+ |
1399 |
+- stmmac_ptp_register(priv); |
1400 |
+- |
1401 |
+ return 0; |
1402 |
+ } |
1403 |
+ |
1404 |
+@@ -2691,7 +2689,7 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) |
1405 |
+ /** |
1406 |
+ * stmmac_hw_setup - setup mac in a usable state. |
1407 |
+ * @dev : pointer to the device structure. |
1408 |
+- * @init_ptp: initialize PTP if set |
1409 |
++ * @ptp_register: register PTP if set |
1410 |
+ * Description: |
1411 |
+ * this is the main function to setup the HW in a usable state because the |
1412 |
+ * dma engine is reset, the core registers are configured (e.g. AXI, |
1413 |
+@@ -2701,7 +2699,7 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) |
1414 |
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h |
1415 |
+ * file on failure. |
1416 |
+ */ |
1417 |
+-static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) |
1418 |
++static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) |
1419 |
+ { |
1420 |
+ struct stmmac_priv *priv = netdev_priv(dev); |
1421 |
+ u32 rx_cnt = priv->plat->rx_queues_to_use; |
1422 |
+@@ -2757,13 +2755,13 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) |
1423 |
+ |
1424 |
+ stmmac_mmc_setup(priv); |
1425 |
+ |
1426 |
+- if (init_ptp) { |
1427 |
+- ret = stmmac_init_ptp(priv); |
1428 |
+- if (ret == -EOPNOTSUPP) |
1429 |
+- netdev_warn(priv->dev, "PTP not supported by HW\n"); |
1430 |
+- else if (ret) |
1431 |
+- netdev_warn(priv->dev, "PTP init failed\n"); |
1432 |
+- } |
1433 |
++ ret = stmmac_init_ptp(priv); |
1434 |
++ if (ret == -EOPNOTSUPP) |
1435 |
++ netdev_warn(priv->dev, "PTP not supported by HW\n"); |
1436 |
++ else if (ret) |
1437 |
++ netdev_warn(priv->dev, "PTP init failed\n"); |
1438 |
++ else if (ptp_register) |
1439 |
++ stmmac_ptp_register(priv); |
1440 |
+ |
1441 |
+ priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS; |
1442 |
+ |
1443 |
+diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c |
1444 |
+index 424e644724e46..e74f2e95a46eb 100644 |
1445 |
+--- a/drivers/net/ethernet/ti/cpsw_priv.c |
1446 |
++++ b/drivers/net/ethernet/ti/cpsw_priv.c |
1447 |
+@@ -1144,7 +1144,7 @@ int cpsw_fill_rx_channels(struct cpsw_priv *priv) |
1448 |
+ static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw, |
1449 |
+ int size) |
1450 |
+ { |
1451 |
+- struct page_pool_params pp_params; |
1452 |
++ struct page_pool_params pp_params = {}; |
1453 |
+ struct page_pool *pool; |
1454 |
+ |
1455 |
+ pp_params.order = 0; |
1456 |
+diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c |
1457 |
+index 5ab53e9942f30..5d30b3e1806ab 100644 |
1458 |
+--- a/drivers/net/hamradio/yam.c |
1459 |
++++ b/drivers/net/hamradio/yam.c |
1460 |
+@@ -951,9 +951,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
1461 |
+ sizeof(struct yamdrv_ioctl_mcs)); |
1462 |
+ if (IS_ERR(ym)) |
1463 |
+ return PTR_ERR(ym); |
1464 |
+- if (ym->cmd != SIOCYAMSMCS) |
1465 |
+- return -EINVAL; |
1466 |
+- if (ym->bitrate > YAM_MAXBITRATE) { |
1467 |
++ if (ym->cmd != SIOCYAMSMCS || ym->bitrate > YAM_MAXBITRATE) { |
1468 |
+ kfree(ym); |
1469 |
+ return -EINVAL; |
1470 |
+ } |
1471 |
+diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c |
1472 |
+index dbed15dc0fe77..644861366d544 100644 |
1473 |
+--- a/drivers/net/phy/broadcom.c |
1474 |
++++ b/drivers/net/phy/broadcom.c |
1475 |
+@@ -789,6 +789,7 @@ static struct phy_driver broadcom_drivers[] = { |
1476 |
+ .phy_id_mask = 0xfffffff0, |
1477 |
+ .name = "Broadcom BCM54616S", |
1478 |
+ /* PHY_GBIT_FEATURES */ |
1479 |
++ .soft_reset = genphy_soft_reset, |
1480 |
+ .config_init = bcm54xx_config_init, |
1481 |
+ .config_aneg = bcm54616s_config_aneg, |
1482 |
+ .ack_interrupt = bcm_phy_ack_intr, |
1483 |
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c |
1484 |
+index 85f3cde5ffd09..d2f6d8107595a 100644 |
1485 |
+--- a/drivers/net/phy/phy_device.c |
1486 |
++++ b/drivers/net/phy/phy_device.c |
1487 |
+@@ -1682,6 +1682,9 @@ void phy_detach(struct phy_device *phydev) |
1488 |
+ phy_driver_is_genphy_10g(phydev)) |
1489 |
+ device_release_driver(&phydev->mdio.dev); |
1490 |
+ |
1491 |
++ /* Assert the reset signal */ |
1492 |
++ phy_device_reset(phydev, 1); |
1493 |
++ |
1494 |
+ /* |
1495 |
+ * The phydev might go away on the put_device() below, so avoid |
1496 |
+ * a use-after-free bug by reading the underlying bus first. |
1497 |
+@@ -1693,9 +1696,6 @@ void phy_detach(struct phy_device *phydev) |
1498 |
+ ndev_owner = dev->dev.parent->driver->owner; |
1499 |
+ if (ndev_owner != bus->owner) |
1500 |
+ module_put(bus->owner); |
1501 |
+- |
1502 |
+- /* Assert the reset signal */ |
1503 |
+- phy_device_reset(phydev, 1); |
1504 |
+ } |
1505 |
+ EXPORT_SYMBOL(phy_detach); |
1506 |
+ |
1507 |
+diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c |
1508 |
+index 4cf874fb5c5b4..a05d8372669c1 100644 |
1509 |
+--- a/drivers/net/phy/sfp-bus.c |
1510 |
++++ b/drivers/net/phy/sfp-bus.c |
1511 |
+@@ -609,6 +609,11 @@ struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode) |
1512 |
+ else if (ret < 0) |
1513 |
+ return ERR_PTR(ret); |
1514 |
+ |
1515 |
++ if (!fwnode_device_is_available(ref.fwnode)) { |
1516 |
++ fwnode_handle_put(ref.fwnode); |
1517 |
++ return NULL; |
1518 |
++ } |
1519 |
++ |
1520 |
+ bus = sfp_bus_get(ref.fwnode); |
1521 |
+ fwnode_handle_put(ref.fwnode); |
1522 |
+ if (!bus) |
1523 |
+diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c |
1524 |
+index 4bbbacdbf3bb7..be90d77c5168d 100644 |
1525 |
+--- a/drivers/rpmsg/rpmsg_char.c |
1526 |
++++ b/drivers/rpmsg/rpmsg_char.c |
1527 |
+@@ -92,7 +92,7 @@ static int rpmsg_eptdev_destroy(struct device *dev, void *data) |
1528 |
+ /* wake up any blocked readers */ |
1529 |
+ wake_up_interruptible(&eptdev->readq); |
1530 |
+ |
1531 |
+- device_del(&eptdev->dev); |
1532 |
++ cdev_device_del(&eptdev->cdev, &eptdev->dev); |
1533 |
+ put_device(&eptdev->dev); |
1534 |
+ |
1535 |
+ return 0; |
1536 |
+@@ -332,7 +332,6 @@ static void rpmsg_eptdev_release_device(struct device *dev) |
1537 |
+ |
1538 |
+ ida_simple_remove(&rpmsg_ept_ida, dev->id); |
1539 |
+ ida_simple_remove(&rpmsg_minor_ida, MINOR(eptdev->dev.devt)); |
1540 |
+- cdev_del(&eptdev->cdev); |
1541 |
+ kfree(eptdev); |
1542 |
+ } |
1543 |
+ |
1544 |
+@@ -377,19 +376,13 @@ static int rpmsg_eptdev_create(struct rpmsg_ctrldev *ctrldev, |
1545 |
+ dev->id = ret; |
1546 |
+ dev_set_name(dev, "rpmsg%d", ret); |
1547 |
+ |
1548 |
+- ret = cdev_add(&eptdev->cdev, dev->devt, 1); |
1549 |
++ ret = cdev_device_add(&eptdev->cdev, &eptdev->dev); |
1550 |
+ if (ret) |
1551 |
+ goto free_ept_ida; |
1552 |
+ |
1553 |
+ /* We can now rely on the release function for cleanup */ |
1554 |
+ dev->release = rpmsg_eptdev_release_device; |
1555 |
+ |
1556 |
+- ret = device_add(dev); |
1557 |
+- if (ret) { |
1558 |
+- dev_err(dev, "device_add failed: %d\n", ret); |
1559 |
+- put_device(dev); |
1560 |
+- } |
1561 |
+- |
1562 |
+ return ret; |
1563 |
+ |
1564 |
+ free_ept_ida: |
1565 |
+@@ -458,7 +451,6 @@ static void rpmsg_ctrldev_release_device(struct device *dev) |
1566 |
+ |
1567 |
+ ida_simple_remove(&rpmsg_ctrl_ida, dev->id); |
1568 |
+ ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt)); |
1569 |
+- cdev_del(&ctrldev->cdev); |
1570 |
+ kfree(ctrldev); |
1571 |
+ } |
1572 |
+ |
1573 |
+@@ -493,19 +485,13 @@ static int rpmsg_chrdev_probe(struct rpmsg_device *rpdev) |
1574 |
+ dev->id = ret; |
1575 |
+ dev_set_name(&ctrldev->dev, "rpmsg_ctrl%d", ret); |
1576 |
+ |
1577 |
+- ret = cdev_add(&ctrldev->cdev, dev->devt, 1); |
1578 |
++ ret = cdev_device_add(&ctrldev->cdev, &ctrldev->dev); |
1579 |
+ if (ret) |
1580 |
+ goto free_ctrl_ida; |
1581 |
+ |
1582 |
+ /* We can now rely on the release function for cleanup */ |
1583 |
+ dev->release = rpmsg_ctrldev_release_device; |
1584 |
+ |
1585 |
+- ret = device_add(dev); |
1586 |
+- if (ret) { |
1587 |
+- dev_err(&rpdev->dev, "device_add failed: %d\n", ret); |
1588 |
+- put_device(dev); |
1589 |
+- } |
1590 |
+- |
1591 |
+ dev_set_drvdata(&rpdev->dev, ctrldev); |
1592 |
+ |
1593 |
+ return ret; |
1594 |
+@@ -531,7 +517,7 @@ static void rpmsg_chrdev_remove(struct rpmsg_device *rpdev) |
1595 |
+ if (ret) |
1596 |
+ dev_warn(&rpdev->dev, "failed to nuke endpoints: %d\n", ret); |
1597 |
+ |
1598 |
+- device_del(&ctrldev->dev); |
1599 |
++ cdev_device_del(&ctrldev->cdev, &ctrldev->dev); |
1600 |
+ put_device(&ctrldev->dev); |
1601 |
+ } |
1602 |
+ |
1603 |
+diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c |
1604 |
+index d24cafe02708f..511bf8e0a436c 100644 |
1605 |
+--- a/drivers/s390/scsi/zfcp_fc.c |
1606 |
++++ b/drivers/s390/scsi/zfcp_fc.c |
1607 |
+@@ -521,6 +521,8 @@ static void zfcp_fc_adisc_handler(void *data) |
1608 |
+ goto out; |
1609 |
+ } |
1610 |
+ |
1611 |
++ /* re-init to undo drop from zfcp_fc_adisc() */ |
1612 |
++ port->d_id = ntoh24(adisc_resp->adisc_port_id); |
1613 |
+ /* port is good, unblock rport without going through erp */ |
1614 |
+ zfcp_scsi_schedule_rport_register(port); |
1615 |
+ out: |
1616 |
+@@ -534,6 +536,7 @@ static int zfcp_fc_adisc(struct zfcp_port *port) |
1617 |
+ struct zfcp_fc_req *fc_req; |
1618 |
+ struct zfcp_adapter *adapter = port->adapter; |
1619 |
+ struct Scsi_Host *shost = adapter->scsi_host; |
1620 |
++ u32 d_id; |
1621 |
+ int ret; |
1622 |
+ |
1623 |
+ fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC); |
1624 |
+@@ -558,7 +561,15 @@ static int zfcp_fc_adisc(struct zfcp_port *port) |
1625 |
+ fc_req->u.adisc.req.adisc_cmd = ELS_ADISC; |
1626 |
+ hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost)); |
1627 |
+ |
1628 |
+- ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els, |
1629 |
++ d_id = port->d_id; /* remember as destination for send els below */ |
1630 |
++ /* |
1631 |
++ * Force fresh GID_PN lookup on next port recovery. |
1632 |
++ * Must happen after request setup and before sending request, |
1633 |
++ * to prevent race with port->d_id re-init in zfcp_fc_adisc_handler(). |
1634 |
++ */ |
1635 |
++ port->d_id = 0; |
1636 |
++ |
1637 |
++ ret = zfcp_fsf_send_els(adapter, d_id, &fc_req->ct_els, |
1638 |
+ ZFCP_FC_CTELS_TMO); |
1639 |
+ if (ret) |
1640 |
+ kmem_cache_free(zfcp_fc_req_cache, fc_req); |
1641 |
+diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c |
1642 |
+index 6890bbe04a8c1..052e7879704a5 100644 |
1643 |
+--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c |
1644 |
++++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c |
1645 |
+@@ -80,7 +80,7 @@ static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba); |
1646 |
+ static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba); |
1647 |
+ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, |
1648 |
+ struct device *parent, int npiv); |
1649 |
+-static void bnx2fc_destroy_work(struct work_struct *work); |
1650 |
++static void bnx2fc_port_destroy(struct fcoe_port *port); |
1651 |
+ |
1652 |
+ static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev); |
1653 |
+ static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device |
1654 |
+@@ -905,9 +905,6 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event, |
1655 |
+ __bnx2fc_destroy(interface); |
1656 |
+ } |
1657 |
+ mutex_unlock(&bnx2fc_dev_lock); |
1658 |
+- |
1659 |
+- /* Ensure ALL destroy work has been completed before return */ |
1660 |
+- flush_workqueue(bnx2fc_wq); |
1661 |
+ return; |
1662 |
+ |
1663 |
+ default: |
1664 |
+@@ -1213,8 +1210,8 @@ static int bnx2fc_vport_destroy(struct fc_vport *vport) |
1665 |
+ mutex_unlock(&n_port->lp_mutex); |
1666 |
+ bnx2fc_free_vport(interface->hba, port->lport); |
1667 |
+ bnx2fc_port_shutdown(port->lport); |
1668 |
++ bnx2fc_port_destroy(port); |
1669 |
+ bnx2fc_interface_put(interface); |
1670 |
+- queue_work(bnx2fc_wq, &port->destroy_work); |
1671 |
+ return 0; |
1672 |
+ } |
1673 |
+ |
1674 |
+@@ -1523,7 +1520,6 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, |
1675 |
+ port->lport = lport; |
1676 |
+ port->priv = interface; |
1677 |
+ port->get_netdev = bnx2fc_netdev; |
1678 |
+- INIT_WORK(&port->destroy_work, bnx2fc_destroy_work); |
1679 |
+ |
1680 |
+ /* Configure fcoe_port */ |
1681 |
+ rc = bnx2fc_lport_config(lport); |
1682 |
+@@ -1651,8 +1647,8 @@ static void __bnx2fc_destroy(struct bnx2fc_interface *interface) |
1683 |
+ bnx2fc_interface_cleanup(interface); |
1684 |
+ bnx2fc_stop(interface); |
1685 |
+ list_del(&interface->list); |
1686 |
++ bnx2fc_port_destroy(port); |
1687 |
+ bnx2fc_interface_put(interface); |
1688 |
+- queue_work(bnx2fc_wq, &port->destroy_work); |
1689 |
+ } |
1690 |
+ |
1691 |
+ /** |
1692 |
+@@ -1692,15 +1688,12 @@ netdev_err: |
1693 |
+ return rc; |
1694 |
+ } |
1695 |
+ |
1696 |
+-static void bnx2fc_destroy_work(struct work_struct *work) |
1697 |
++static void bnx2fc_port_destroy(struct fcoe_port *port) |
1698 |
+ { |
1699 |
+- struct fcoe_port *port; |
1700 |
+ struct fc_lport *lport; |
1701 |
+ |
1702 |
+- port = container_of(work, struct fcoe_port, destroy_work); |
1703 |
+ lport = port->lport; |
1704 |
+- |
1705 |
+- BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n"); |
1706 |
++ BNX2FC_HBA_DBG(lport, "Entered %s, destroying lport %p\n", __func__, lport); |
1707 |
+ |
1708 |
+ bnx2fc_if_destroy(lport); |
1709 |
+ } |
1710 |
+@@ -2554,9 +2547,6 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev) |
1711 |
+ __bnx2fc_destroy(interface); |
1712 |
+ mutex_unlock(&bnx2fc_dev_lock); |
1713 |
+ |
1714 |
+- /* Ensure ALL destroy work has been completed before return */ |
1715 |
+- flush_workqueue(bnx2fc_wq); |
1716 |
+- |
1717 |
+ bnx2fc_ulp_stop(hba); |
1718 |
+ /* unregister cnic device */ |
1719 |
+ if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic)) |
1720 |
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c |
1721 |
+index d76880ae68c83..b8f8621537720 100644 |
1722 |
+--- a/drivers/tty/n_gsm.c |
1723 |
++++ b/drivers/tty/n_gsm.c |
1724 |
+@@ -317,6 +317,7 @@ static struct tty_driver *gsm_tty_driver; |
1725 |
+ #define GSM1_ESCAPE_BITS 0x20 |
1726 |
+ #define XON 0x11 |
1727 |
+ #define XOFF 0x13 |
1728 |
++#define ISO_IEC_646_MASK 0x7F |
1729 |
+ |
1730 |
+ static const struct tty_port_operations gsm_port_ops; |
1731 |
+ |
1732 |
+@@ -526,7 +527,8 @@ static int gsm_stuff_frame(const u8 *input, u8 *output, int len) |
1733 |
+ int olen = 0; |
1734 |
+ while (len--) { |
1735 |
+ if (*input == GSM1_SOF || *input == GSM1_ESCAPE |
1736 |
+- || *input == XON || *input == XOFF) { |
1737 |
++ || (*input & ISO_IEC_646_MASK) == XON |
1738 |
++ || (*input & ISO_IEC_646_MASK) == XOFF) { |
1739 |
+ *output++ = GSM1_ESCAPE; |
1740 |
+ *output++ = *input++ ^ GSM1_ESCAPE_BITS; |
1741 |
+ olen++; |
1742 |
+diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c |
1743 |
+index 65e9045dafe6d..5595c63c46eaf 100644 |
1744 |
+--- a/drivers/tty/serial/8250/8250_of.c |
1745 |
++++ b/drivers/tty/serial/8250/8250_of.c |
1746 |
+@@ -83,8 +83,17 @@ static int of_platform_serial_setup(struct platform_device *ofdev, |
1747 |
+ port->mapsize = resource_size(&resource); |
1748 |
+ |
1749 |
+ /* Check for shifted address mapping */ |
1750 |
+- if (of_property_read_u32(np, "reg-offset", &prop) == 0) |
1751 |
++ if (of_property_read_u32(np, "reg-offset", &prop) == 0) { |
1752 |
++ if (prop >= port->mapsize) { |
1753 |
++ dev_warn(&ofdev->dev, "reg-offset %u exceeds region size %pa\n", |
1754 |
++ prop, &port->mapsize); |
1755 |
++ ret = -EINVAL; |
1756 |
++ goto err_unprepare; |
1757 |
++ } |
1758 |
++ |
1759 |
+ port->mapbase += prop; |
1760 |
++ port->mapsize -= prop; |
1761 |
++ } |
1762 |
+ |
1763 |
+ port->iotype = UPIO_MEM; |
1764 |
+ if (of_property_read_u32(np, "reg-io-width", &prop) == 0) { |
1765 |
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c |
1766 |
+index 019328d644d8b..3a985e953b8e9 100644 |
1767 |
+--- a/drivers/tty/serial/8250/8250_pci.c |
1768 |
++++ b/drivers/tty/serial/8250/8250_pci.c |
1769 |
+@@ -5171,8 +5171,30 @@ static const struct pci_device_id serial_pci_tbl[] = { |
1770 |
+ { PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS400, |
1771 |
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */ |
1772 |
+ pbn_b2_4_115200 }, |
1773 |
++ /* Brainboxes Devices */ |
1774 |
+ /* |
1775 |
+- * BrainBoxes UC-260 |
1776 |
++ * Brainboxes UC-101 |
1777 |
++ */ |
1778 |
++ { PCI_VENDOR_ID_INTASHIELD, 0x0BA1, |
1779 |
++ PCI_ANY_ID, PCI_ANY_ID, |
1780 |
++ 0, 0, |
1781 |
++ pbn_b2_2_115200 }, |
1782 |
++ /* |
1783 |
++ * Brainboxes UC-235/246 |
1784 |
++ */ |
1785 |
++ { PCI_VENDOR_ID_INTASHIELD, 0x0AA1, |
1786 |
++ PCI_ANY_ID, PCI_ANY_ID, |
1787 |
++ 0, 0, |
1788 |
++ pbn_b2_1_115200 }, |
1789 |
++ /* |
1790 |
++ * Brainboxes UC-257 |
1791 |
++ */ |
1792 |
++ { PCI_VENDOR_ID_INTASHIELD, 0x0861, |
1793 |
++ PCI_ANY_ID, PCI_ANY_ID, |
1794 |
++ 0, 0, |
1795 |
++ pbn_b2_2_115200 }, |
1796 |
++ /* |
1797 |
++ * Brainboxes UC-260/271/701/756 |
1798 |
+ */ |
1799 |
+ { PCI_VENDOR_ID_INTASHIELD, 0x0D21, |
1800 |
+ PCI_ANY_ID, PCI_ANY_ID, |
1801 |
+@@ -5180,7 +5202,81 @@ static const struct pci_device_id serial_pci_tbl[] = { |
1802 |
+ pbn_b2_4_115200 }, |
1803 |
+ { PCI_VENDOR_ID_INTASHIELD, 0x0E34, |
1804 |
+ PCI_ANY_ID, PCI_ANY_ID, |
1805 |
+- PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00, |
1806 |
++ PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00, |
1807 |
++ pbn_b2_4_115200 }, |
1808 |
++ /* |
1809 |
++ * Brainboxes UC-268 |
1810 |
++ */ |
1811 |
++ { PCI_VENDOR_ID_INTASHIELD, 0x0841, |
1812 |
++ PCI_ANY_ID, PCI_ANY_ID, |
1813 |
++ 0, 0, |
1814 |
++ pbn_b2_4_115200 }, |
1815 |
++ /* |
1816 |
++ * Brainboxes UC-275/279 |
1817 |
++ */ |
1818 |
++ { PCI_VENDOR_ID_INTASHIELD, 0x0881, |
1819 |
++ PCI_ANY_ID, PCI_ANY_ID, |
1820 |
++ 0, 0, |
1821 |
++ pbn_b2_8_115200 }, |
1822 |
++ /* |
1823 |
++ * Brainboxes UC-302 |
1824 |
++ */ |
1825 |
++ { PCI_VENDOR_ID_INTASHIELD, 0x08E1, |
1826 |
++ PCI_ANY_ID, PCI_ANY_ID, |
1827 |
++ 0, 0, |
1828 |
++ pbn_b2_2_115200 }, |
1829 |
++ /* |
1830 |
++ * Brainboxes UC-310 |
1831 |
++ */ |
1832 |
++ { PCI_VENDOR_ID_INTASHIELD, 0x08C1, |
1833 |
++ PCI_ANY_ID, PCI_ANY_ID, |
1834 |
++ 0, 0, |
1835 |
++ pbn_b2_2_115200 }, |
1836 |
++ /* |
1837 |
++ * Brainboxes UC-313 |
1838 |
++ */ |
1839 |
++ { PCI_VENDOR_ID_INTASHIELD, 0x08A3, |
1840 |
++ PCI_ANY_ID, PCI_ANY_ID, |
1841 |
++ 0, 0, |
1842 |
++ pbn_b2_2_115200 }, |
1843 |
++ /* |
1844 |
++ * Brainboxes UC-320/324 |
1845 |
++ */ |
1846 |
++ { PCI_VENDOR_ID_INTASHIELD, 0x0A61, |
1847 |
++ PCI_ANY_ID, PCI_ANY_ID, |
1848 |
++ 0, 0, |
1849 |
++ pbn_b2_1_115200 }, |
1850 |
++ /* |
1851 |
++ * Brainboxes UC-346 |
1852 |
++ */ |
1853 |
++ { PCI_VENDOR_ID_INTASHIELD, 0x0B02, |
1854 |
++ PCI_ANY_ID, PCI_ANY_ID, |
1855 |
++ 0, 0, |
1856 |
++ pbn_b2_4_115200 }, |
1857 |
++ /* |
1858 |
++ * Brainboxes UC-357 |
1859 |
++ */ |
1860 |
++ { PCI_VENDOR_ID_INTASHIELD, 0x0A81, |
1861 |
++ PCI_ANY_ID, PCI_ANY_ID, |
1862 |
++ 0, 0, |
1863 |
++ pbn_b2_2_115200 }, |
1864 |
++ { PCI_VENDOR_ID_INTASHIELD, 0x0A83, |
1865 |
++ PCI_ANY_ID, PCI_ANY_ID, |
1866 |
++ 0, 0, |
1867 |
++ pbn_b2_2_115200 }, |
1868 |
++ /* |
1869 |
++ * Brainboxes UC-368 |
1870 |
++ */ |
1871 |
++ { PCI_VENDOR_ID_INTASHIELD, 0x0C41, |
1872 |
++ PCI_ANY_ID, PCI_ANY_ID, |
1873 |
++ 0, 0, |
1874 |
++ pbn_b2_4_115200 }, |
1875 |
++ /* |
1876 |
++ * Brainboxes UC-420/431 |
1877 |
++ */ |
1878 |
++ { PCI_VENDOR_ID_INTASHIELD, 0x0921, |
1879 |
++ PCI_ANY_ID, PCI_ANY_ID, |
1880 |
++ 0, 0, |
1881 |
+ pbn_b2_4_115200 }, |
1882 |
+ /* |
1883 |
+ * Perle PCI-RAS cards |
1884 |
+diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c |
1885 |
+index 844059861f9e1..0eadf0547175c 100644 |
1886 |
+--- a/drivers/tty/serial/stm32-usart.c |
1887 |
++++ b/drivers/tty/serial/stm32-usart.c |
1888 |
+@@ -574,7 +574,7 @@ static void stm32_usart_start_tx(struct uart_port *port) |
1889 |
+ struct serial_rs485 *rs485conf = &port->rs485; |
1890 |
+ struct circ_buf *xmit = &port->state->xmit; |
1891 |
+ |
1892 |
+- if (uart_circ_empty(xmit)) |
1893 |
++ if (uart_circ_empty(xmit) && !port->x_char) |
1894 |
+ return; |
1895 |
+ |
1896 |
+ if (rs485conf->flags & SER_RS485_ENABLED) { |
1897 |
+diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c |
1898 |
+index a18d7c4222ddf..82fe8e00a96a3 100644 |
1899 |
+--- a/drivers/usb/common/ulpi.c |
1900 |
++++ b/drivers/usb/common/ulpi.c |
1901 |
+@@ -39,8 +39,11 @@ static int ulpi_match(struct device *dev, struct device_driver *driver) |
1902 |
+ struct ulpi *ulpi = to_ulpi_dev(dev); |
1903 |
+ const struct ulpi_device_id *id; |
1904 |
+ |
1905 |
+- /* Some ULPI devices don't have a vendor id so rely on OF match */ |
1906 |
+- if (ulpi->id.vendor == 0) |
1907 |
++ /* |
1908 |
++ * Some ULPI devices don't have a vendor id |
1909 |
++ * or provide an id_table so rely on OF match. |
1910 |
++ */ |
1911 |
++ if (ulpi->id.vendor == 0 || !drv->id_table) |
1912 |
+ return of_driver_match_device(dev, driver); |
1913 |
+ |
1914 |
+ for (id = drv->id_table; id->vendor; id++) |
1915 |
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c |
1916 |
+index b2710015493a5..ddd1d3eef912b 100644 |
1917 |
+--- a/drivers/usb/core/hcd.c |
1918 |
++++ b/drivers/usb/core/hcd.c |
1919 |
+@@ -1562,6 +1562,13 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags) |
1920 |
+ urb->hcpriv = NULL; |
1921 |
+ INIT_LIST_HEAD(&urb->urb_list); |
1922 |
+ atomic_dec(&urb->use_count); |
1923 |
++ /* |
1924 |
++ * Order the write of urb->use_count above before the read |
1925 |
++ * of urb->reject below. Pairs with the memory barriers in |
1926 |
++ * usb_kill_urb() and usb_poison_urb(). |
1927 |
++ */ |
1928 |
++ smp_mb__after_atomic(); |
1929 |
++ |
1930 |
+ atomic_dec(&urb->dev->urbnum); |
1931 |
+ if (atomic_read(&urb->reject)) |
1932 |
+ wake_up(&usb_kill_urb_queue); |
1933 |
+@@ -1666,6 +1673,13 @@ static void __usb_hcd_giveback_urb(struct urb *urb) |
1934 |
+ |
1935 |
+ usb_anchor_resume_wakeups(anchor); |
1936 |
+ atomic_dec(&urb->use_count); |
1937 |
++ /* |
1938 |
++ * Order the write of urb->use_count above before the read |
1939 |
++ * of urb->reject below. Pairs with the memory barriers in |
1940 |
++ * usb_kill_urb() and usb_poison_urb(). |
1941 |
++ */ |
1942 |
++ smp_mb__after_atomic(); |
1943 |
++ |
1944 |
+ if (unlikely(atomic_read(&urb->reject))) |
1945 |
+ wake_up(&usb_kill_urb_queue); |
1946 |
+ usb_put_urb(urb); |
1947 |
+diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c |
1948 |
+index 357b149b20d3a..9c285026f8276 100644 |
1949 |
+--- a/drivers/usb/core/urb.c |
1950 |
++++ b/drivers/usb/core/urb.c |
1951 |
+@@ -706,6 +706,12 @@ void usb_kill_urb(struct urb *urb) |
1952 |
+ if (!(urb && urb->dev && urb->ep)) |
1953 |
+ return; |
1954 |
+ atomic_inc(&urb->reject); |
1955 |
++ /* |
1956 |
++ * Order the write of urb->reject above before the read |
1957 |
++ * of urb->use_count below. Pairs with the barriers in |
1958 |
++ * __usb_hcd_giveback_urb() and usb_hcd_submit_urb(). |
1959 |
++ */ |
1960 |
++ smp_mb__after_atomic(); |
1961 |
+ |
1962 |
+ usb_hcd_unlink_urb(urb, -ENOENT); |
1963 |
+ wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); |
1964 |
+@@ -747,6 +753,12 @@ void usb_poison_urb(struct urb *urb) |
1965 |
+ if (!urb) |
1966 |
+ return; |
1967 |
+ atomic_inc(&urb->reject); |
1968 |
++ /* |
1969 |
++ * Order the write of urb->reject above before the read |
1970 |
++ * of urb->use_count below. Pairs with the barriers in |
1971 |
++ * __usb_hcd_giveback_urb() and usb_hcd_submit_urb(). |
1972 |
++ */ |
1973 |
++ smp_mb__after_atomic(); |
1974 |
+ |
1975 |
+ if (!urb->dev || !urb->ep) |
1976 |
+ return; |
1977 |
+diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c |
1978 |
+index 282737e4609ce..2c65a9bb3c81b 100644 |
1979 |
+--- a/drivers/usb/gadget/function/f_sourcesink.c |
1980 |
++++ b/drivers/usb/gadget/function/f_sourcesink.c |
1981 |
+@@ -583,6 +583,7 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in, |
1982 |
+ |
1983 |
+ if (is_iso) { |
1984 |
+ switch (speed) { |
1985 |
++ case USB_SPEED_SUPER_PLUS: |
1986 |
+ case USB_SPEED_SUPER: |
1987 |
+ size = ss->isoc_maxpacket * |
1988 |
+ (ss->isoc_mult + 1) * |
1989 |
+diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c |
1990 |
+index c1edcc9b13cec..dc570ce4e8319 100644 |
1991 |
+--- a/drivers/usb/host/xhci-plat.c |
1992 |
++++ b/drivers/usb/host/xhci-plat.c |
1993 |
+@@ -437,6 +437,9 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev) |
1994 |
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
1995 |
+ int ret; |
1996 |
+ |
1997 |
++ if (pm_runtime_suspended(dev)) |
1998 |
++ pm_runtime_resume(dev); |
1999 |
++ |
2000 |
+ ret = xhci_priv_suspend_quirk(hcd); |
2001 |
+ if (ret) |
2002 |
+ return ret; |
2003 |
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h |
2004 |
+index 29191d33c0e3e..1a05e3dcfec8a 100644 |
2005 |
+--- a/drivers/usb/storage/unusual_devs.h |
2006 |
++++ b/drivers/usb/storage/unusual_devs.h |
2007 |
+@@ -2301,6 +2301,16 @@ UNUSUAL_DEV( 0x2027, 0xa001, 0x0000, 0x9999, |
2008 |
+ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init, |
2009 |
+ US_FL_SCM_MULT_TARG ), |
2010 |
+ |
2011 |
++/* |
2012 |
++ * Reported by DocMAX <mail@××××××××××.de> |
2013 |
++ * and Thomas Weißschuh <linux@××××××××××.net> |
2014 |
++ */ |
2015 |
++UNUSUAL_DEV( 0x2109, 0x0715, 0x9999, 0x9999, |
2016 |
++ "VIA Labs, Inc.", |
2017 |
++ "VL817 SATA Bridge", |
2018 |
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
2019 |
++ US_FL_IGNORE_UAS), |
2020 |
++ |
2021 |
+ UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001, |
2022 |
+ "ST", |
2023 |
+ "2A", |
2024 |
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c |
2025 |
+index 721d9c4ddc81f..8333c80b5f7c1 100644 |
2026 |
+--- a/drivers/usb/typec/tcpm/tcpm.c |
2027 |
++++ b/drivers/usb/typec/tcpm/tcpm.c |
2028 |
+@@ -4164,7 +4164,8 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port) |
2029 |
+ case SNK_TRYWAIT_DEBOUNCE: |
2030 |
+ break; |
2031 |
+ case SNK_ATTACH_WAIT: |
2032 |
+- tcpm_set_state(port, SNK_UNATTACHED, 0); |
2033 |
++ case SNK_DEBOUNCED: |
2034 |
++ /* Do nothing, as TCPM is still waiting for vbus to reaach VSAFE5V to connect */ |
2035 |
+ break; |
2036 |
+ |
2037 |
+ case SNK_NEGOTIATE_CAPABILITIES: |
2038 |
+diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c |
2039 |
+index bff96d64dddff..6db7c8ddd51cd 100644 |
2040 |
+--- a/drivers/usb/typec/ucsi/ucsi_ccg.c |
2041 |
++++ b/drivers/usb/typec/ucsi/ucsi_ccg.c |
2042 |
+@@ -325,7 +325,7 @@ static int ucsi_ccg_init(struct ucsi_ccg *uc) |
2043 |
+ if (status < 0) |
2044 |
+ return status; |
2045 |
+ |
2046 |
+- if (!data) |
2047 |
++ if (!(data & DEV_INT)) |
2048 |
+ return 0; |
2049 |
+ |
2050 |
+ status = ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data)); |
2051 |
+diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c |
2052 |
+index 4dc9077dd2ac0..3c309ab208874 100644 |
2053 |
+--- a/drivers/video/fbdev/hyperv_fb.c |
2054 |
++++ b/drivers/video/fbdev/hyperv_fb.c |
2055 |
+@@ -286,8 +286,6 @@ struct hvfb_par { |
2056 |
+ |
2057 |
+ static uint screen_width = HVFB_WIDTH; |
2058 |
+ static uint screen_height = HVFB_HEIGHT; |
2059 |
+-static uint screen_width_max = HVFB_WIDTH; |
2060 |
+-static uint screen_height_max = HVFB_HEIGHT; |
2061 |
+ static uint screen_depth; |
2062 |
+ static uint screen_fb_size; |
2063 |
+ static uint dio_fb_size; /* FB size for deferred IO */ |
2064 |
+@@ -581,7 +579,6 @@ static int synthvid_get_supported_resolution(struct hv_device *hdev) |
2065 |
+ int ret = 0; |
2066 |
+ unsigned long t; |
2067 |
+ u8 index; |
2068 |
+- int i; |
2069 |
+ |
2070 |
+ memset(msg, 0, sizeof(struct synthvid_msg)); |
2071 |
+ msg->vid_hdr.type = SYNTHVID_RESOLUTION_REQUEST; |
2072 |
+@@ -612,13 +609,6 @@ static int synthvid_get_supported_resolution(struct hv_device *hdev) |
2073 |
+ goto out; |
2074 |
+ } |
2075 |
+ |
2076 |
+- for (i = 0; i < msg->resolution_resp.resolution_count; i++) { |
2077 |
+- screen_width_max = max_t(unsigned int, screen_width_max, |
2078 |
+- msg->resolution_resp.supported_resolution[i].width); |
2079 |
+- screen_height_max = max_t(unsigned int, screen_height_max, |
2080 |
+- msg->resolution_resp.supported_resolution[i].height); |
2081 |
+- } |
2082 |
+- |
2083 |
+ screen_width = |
2084 |
+ msg->resolution_resp.supported_resolution[index].width; |
2085 |
+ screen_height = |
2086 |
+@@ -940,7 +930,7 @@ static void hvfb_get_option(struct fb_info *info) |
2087 |
+ |
2088 |
+ if (x < HVFB_WIDTH_MIN || y < HVFB_HEIGHT_MIN || |
2089 |
+ (synthvid_ver_ge(par->synthvid_version, SYNTHVID_VERSION_WIN10) && |
2090 |
+- (x > screen_width_max || y > screen_height_max)) || |
2091 |
++ (x * y * screen_depth / 8 > screen_fb_size)) || |
2092 |
+ (par->synthvid_version == SYNTHVID_VERSION_WIN8 && |
2093 |
+ x * y * screen_depth / 8 > SYNTHVID_FB_SIZE_WIN8) || |
2094 |
+ (par->synthvid_version == SYNTHVID_VERSION_WIN7 && |
2095 |
+@@ -1193,8 +1183,8 @@ static int hvfb_probe(struct hv_device *hdev, |
2096 |
+ } |
2097 |
+ |
2098 |
+ hvfb_get_option(info); |
2099 |
+- pr_info("Screen resolution: %dx%d, Color depth: %d\n", |
2100 |
+- screen_width, screen_height, screen_depth); |
2101 |
++ pr_info("Screen resolution: %dx%d, Color depth: %d, Frame buffer size: %d\n", |
2102 |
++ screen_width, screen_height, screen_depth, screen_fb_size); |
2103 |
+ |
2104 |
+ ret = hvfb_getmem(hdev, info); |
2105 |
+ if (ret) { |
2106 |
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c |
2107 |
+index 040db0dfba264..b5e9bfe884c4b 100644 |
2108 |
+--- a/fs/btrfs/ioctl.c |
2109 |
++++ b/fs/btrfs/ioctl.c |
2110 |
+@@ -3103,10 +3103,8 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, |
2111 |
+ inode_lock(inode); |
2112 |
+ err = btrfs_delete_subvolume(dir, dentry); |
2113 |
+ inode_unlock(inode); |
2114 |
+- if (!err) { |
2115 |
+- fsnotify_rmdir(dir, dentry); |
2116 |
+- d_delete(dentry); |
2117 |
+- } |
2118 |
++ if (!err) |
2119 |
++ d_delete_notify(dir, dentry); |
2120 |
+ |
2121 |
+ out_dput: |
2122 |
+ dput(dentry); |
2123 |
+diff --git a/fs/ceph/file.c b/fs/ceph/file.c |
2124 |
+index 8ed881fd7440d..450050801f3b6 100644 |
2125 |
+--- a/fs/ceph/file.c |
2126 |
++++ b/fs/ceph/file.c |
2127 |
+@@ -577,6 +577,7 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry, |
2128 |
+ struct ceph_inode_info *ci = ceph_inode(dir); |
2129 |
+ struct inode *inode; |
2130 |
+ struct timespec64 now; |
2131 |
++ struct ceph_string *pool_ns; |
2132 |
+ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb); |
2133 |
+ struct ceph_vino vino = { .ino = req->r_deleg_ino, |
2134 |
+ .snap = CEPH_NOSNAP }; |
2135 |
+@@ -626,6 +627,12 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry, |
2136 |
+ in.max_size = cpu_to_le64(lo->stripe_unit); |
2137 |
+ |
2138 |
+ ceph_file_layout_to_legacy(lo, &in.layout); |
2139 |
++ /* lo is private, so pool_ns can't change */ |
2140 |
++ pool_ns = rcu_dereference_raw(lo->pool_ns); |
2141 |
++ if (pool_ns) { |
2142 |
++ iinfo.pool_ns_len = pool_ns->len; |
2143 |
++ iinfo.pool_ns_data = pool_ns->str; |
2144 |
++ } |
2145 |
+ |
2146 |
+ down_read(&mdsc->snap_rwsem); |
2147 |
+ ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session, |
2148 |
+@@ -743,8 +750,10 @@ retry: |
2149 |
+ restore_deleg_ino(dir, req->r_deleg_ino); |
2150 |
+ ceph_mdsc_put_request(req); |
2151 |
+ try_async = false; |
2152 |
++ ceph_put_string(rcu_dereference_raw(lo.pool_ns)); |
2153 |
+ goto retry; |
2154 |
+ } |
2155 |
++ ceph_put_string(rcu_dereference_raw(lo.pool_ns)); |
2156 |
+ goto out_req; |
2157 |
+ } |
2158 |
+ } |
2159 |
+diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c |
2160 |
+index b0983e2a4e2c7..32ddad3ec5d53 100644 |
2161 |
+--- a/fs/configfs/dir.c |
2162 |
++++ b/fs/configfs/dir.c |
2163 |
+@@ -1805,8 +1805,8 @@ void configfs_unregister_group(struct config_group *group) |
2164 |
+ configfs_detach_group(&group->cg_item); |
2165 |
+ d_inode(dentry)->i_flags |= S_DEAD; |
2166 |
+ dont_mount(dentry); |
2167 |
++ d_drop(dentry); |
2168 |
+ fsnotify_rmdir(d_inode(parent), dentry); |
2169 |
+- d_delete(dentry); |
2170 |
+ inode_unlock(d_inode(parent)); |
2171 |
+ |
2172 |
+ dput(dentry); |
2173 |
+@@ -1947,10 +1947,10 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys) |
2174 |
+ configfs_detach_group(&group->cg_item); |
2175 |
+ d_inode(dentry)->i_flags |= S_DEAD; |
2176 |
+ dont_mount(dentry); |
2177 |
+- fsnotify_rmdir(d_inode(root), dentry); |
2178 |
+ inode_unlock(d_inode(dentry)); |
2179 |
+ |
2180 |
+- d_delete(dentry); |
2181 |
++ d_drop(dentry); |
2182 |
++ fsnotify_rmdir(d_inode(root), dentry); |
2183 |
+ |
2184 |
+ inode_unlock(d_inode(root)); |
2185 |
+ |
2186 |
+diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c |
2187 |
+index 42e5a766d33c7..4f25015aa5342 100644 |
2188 |
+--- a/fs/devpts/inode.c |
2189 |
++++ b/fs/devpts/inode.c |
2190 |
+@@ -621,8 +621,8 @@ void devpts_pty_kill(struct dentry *dentry) |
2191 |
+ |
2192 |
+ dentry->d_fsdata = NULL; |
2193 |
+ drop_nlink(dentry->d_inode); |
2194 |
+- fsnotify_unlink(d_inode(dentry->d_parent), dentry); |
2195 |
+ d_drop(dentry); |
2196 |
++ fsnotify_unlink(d_inode(dentry->d_parent), dentry); |
2197 |
+ dput(dentry); /* d_alloc_name() in devpts_pty_new() */ |
2198 |
+ } |
2199 |
+ |
2200 |
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c |
2201 |
+index 188f79d769881..b748329bb0bab 100644 |
2202 |
+--- a/fs/jbd2/journal.c |
2203 |
++++ b/fs/jbd2/journal.c |
2204 |
+@@ -2795,6 +2795,7 @@ struct journal_head *jbd2_journal_grab_journal_head(struct buffer_head *bh) |
2205 |
+ jbd_unlock_bh_journal_head(bh); |
2206 |
+ return jh; |
2207 |
+ } |
2208 |
++EXPORT_SYMBOL(jbd2_journal_grab_journal_head); |
2209 |
+ |
2210 |
+ static void __journal_remove_journal_head(struct buffer_head *bh) |
2211 |
+ { |
2212 |
+@@ -2847,6 +2848,7 @@ void jbd2_journal_put_journal_head(struct journal_head *jh) |
2213 |
+ jbd_unlock_bh_journal_head(bh); |
2214 |
+ } |
2215 |
+ } |
2216 |
++EXPORT_SYMBOL(jbd2_journal_put_journal_head); |
2217 |
+ |
2218 |
+ /* |
2219 |
+ * Initialize jbd inode head |
2220 |
+diff --git a/fs/namei.c b/fs/namei.c |
2221 |
+index 4c9d0c36545d3..72f354b62dd5d 100644 |
2222 |
+--- a/fs/namei.c |
2223 |
++++ b/fs/namei.c |
2224 |
+@@ -3709,13 +3709,12 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry) |
2225 |
+ dentry->d_inode->i_flags |= S_DEAD; |
2226 |
+ dont_mount(dentry); |
2227 |
+ detach_mounts(dentry); |
2228 |
+- fsnotify_rmdir(dir, dentry); |
2229 |
+ |
2230 |
+ out: |
2231 |
+ inode_unlock(dentry->d_inode); |
2232 |
+ dput(dentry); |
2233 |
+ if (!error) |
2234 |
+- d_delete(dentry); |
2235 |
++ d_delete_notify(dir, dentry); |
2236 |
+ return error; |
2237 |
+ } |
2238 |
+ EXPORT_SYMBOL(vfs_rmdir); |
2239 |
+@@ -3825,7 +3824,6 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry, struct inode **delegate |
2240 |
+ if (!error) { |
2241 |
+ dont_mount(dentry); |
2242 |
+ detach_mounts(dentry); |
2243 |
+- fsnotify_unlink(dir, dentry); |
2244 |
+ } |
2245 |
+ } |
2246 |
+ } |
2247 |
+@@ -3833,9 +3831,11 @@ out: |
2248 |
+ inode_unlock(target); |
2249 |
+ |
2250 |
+ /* We don't d_delete() NFS sillyrenamed files--they still exist. */ |
2251 |
+- if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) { |
2252 |
++ if (!error && dentry->d_flags & DCACHE_NFSFS_RENAMED) { |
2253 |
++ fsnotify_unlink(dir, dentry); |
2254 |
++ } else if (!error) { |
2255 |
+ fsnotify_link_count(target); |
2256 |
+- d_delete(dentry); |
2257 |
++ d_delete_notify(dir, dentry); |
2258 |
+ } |
2259 |
+ |
2260 |
+ return error; |
2261 |
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c |
2262 |
+index 8b963c72dd3b1..a23b7a5dec9ee 100644 |
2263 |
+--- a/fs/nfs/dir.c |
2264 |
++++ b/fs/nfs/dir.c |
2265 |
+@@ -1777,6 +1777,24 @@ out: |
2266 |
+ |
2267 |
+ no_open: |
2268 |
+ res = nfs_lookup(dir, dentry, lookup_flags); |
2269 |
++ if (!res) { |
2270 |
++ inode = d_inode(dentry); |
2271 |
++ if ((lookup_flags & LOOKUP_DIRECTORY) && inode && |
2272 |
++ !S_ISDIR(inode->i_mode)) |
2273 |
++ res = ERR_PTR(-ENOTDIR); |
2274 |
++ else if (inode && S_ISREG(inode->i_mode)) |
2275 |
++ res = ERR_PTR(-EOPENSTALE); |
2276 |
++ } else if (!IS_ERR(res)) { |
2277 |
++ inode = d_inode(res); |
2278 |
++ if ((lookup_flags & LOOKUP_DIRECTORY) && inode && |
2279 |
++ !S_ISDIR(inode->i_mode)) { |
2280 |
++ dput(res); |
2281 |
++ res = ERR_PTR(-ENOTDIR); |
2282 |
++ } else if (inode && S_ISREG(inode->i_mode)) { |
2283 |
++ dput(res); |
2284 |
++ res = ERR_PTR(-EOPENSTALE); |
2285 |
++ } |
2286 |
++ } |
2287 |
+ if (switched) { |
2288 |
+ d_lookup_done(dentry); |
2289 |
+ if (!res) |
2290 |
+@@ -2174,6 +2192,8 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) |
2291 |
+ |
2292 |
+ trace_nfs_link_enter(inode, dir, dentry); |
2293 |
+ d_drop(dentry); |
2294 |
++ if (S_ISREG(inode->i_mode)) |
2295 |
++ nfs_sync_inode(inode); |
2296 |
+ error = NFS_PROTO(dir)->link(inode, dir, &dentry->d_name); |
2297 |
+ if (error == 0) { |
2298 |
+ ihold(inode); |
2299 |
+@@ -2262,6 +2282,8 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry, |
2300 |
+ } |
2301 |
+ } |
2302 |
+ |
2303 |
++ if (S_ISREG(old_inode->i_mode)) |
2304 |
++ nfs_sync_inode(old_inode); |
2305 |
+ task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, NULL); |
2306 |
+ if (IS_ERR(task)) { |
2307 |
+ error = PTR_ERR(task); |
2308 |
+diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c |
2309 |
+index a8f954bbde4f5..dedec4771ecc2 100644 |
2310 |
+--- a/fs/nfsd/nfsctl.c |
2311 |
++++ b/fs/nfsd/nfsctl.c |
2312 |
+@@ -1247,7 +1247,8 @@ static void nfsdfs_remove_file(struct inode *dir, struct dentry *dentry) |
2313 |
+ clear_ncl(d_inode(dentry)); |
2314 |
+ dget(dentry); |
2315 |
+ ret = simple_unlink(dir, dentry); |
2316 |
+- d_delete(dentry); |
2317 |
++ d_drop(dentry); |
2318 |
++ fsnotify_unlink(dir, dentry); |
2319 |
+ dput(dentry); |
2320 |
+ WARN_ON_ONCE(ret); |
2321 |
+ } |
2322 |
+@@ -1336,8 +1337,8 @@ void nfsd_client_rmdir(struct dentry *dentry) |
2323 |
+ dget(dentry); |
2324 |
+ ret = simple_rmdir(dir, dentry); |
2325 |
+ WARN_ON_ONCE(ret); |
2326 |
++ d_drop(dentry); |
2327 |
+ fsnotify_rmdir(dir, dentry); |
2328 |
+- d_delete(dentry); |
2329 |
+ dput(dentry); |
2330 |
+ inode_unlock(dir); |
2331 |
+ } |
2332 |
+diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c |
2333 |
+index e7d04adb6cb87..4f48003e43271 100644 |
2334 |
+--- a/fs/ocfs2/suballoc.c |
2335 |
++++ b/fs/ocfs2/suballoc.c |
2336 |
+@@ -1253,26 +1253,23 @@ static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh, |
2337 |
+ { |
2338 |
+ struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data; |
2339 |
+ struct journal_head *jh; |
2340 |
+- int ret = 1; |
2341 |
++ int ret; |
2342 |
+ |
2343 |
+ if (ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap)) |
2344 |
+ return 0; |
2345 |
+ |
2346 |
+- if (!buffer_jbd(bg_bh)) |
2347 |
++ jh = jbd2_journal_grab_journal_head(bg_bh); |
2348 |
++ if (!jh) |
2349 |
+ return 1; |
2350 |
+ |
2351 |
+- jbd_lock_bh_journal_head(bg_bh); |
2352 |
+- if (buffer_jbd(bg_bh)) { |
2353 |
+- jh = bh2jh(bg_bh); |
2354 |
+- spin_lock(&jh->b_state_lock); |
2355 |
+- bg = (struct ocfs2_group_desc *) jh->b_committed_data; |
2356 |
+- if (bg) |
2357 |
+- ret = !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap); |
2358 |
+- else |
2359 |
+- ret = 1; |
2360 |
+- spin_unlock(&jh->b_state_lock); |
2361 |
+- } |
2362 |
+- jbd_unlock_bh_journal_head(bg_bh); |
2363 |
++ spin_lock(&jh->b_state_lock); |
2364 |
++ bg = (struct ocfs2_group_desc *) jh->b_committed_data; |
2365 |
++ if (bg) |
2366 |
++ ret = !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap); |
2367 |
++ else |
2368 |
++ ret = 1; |
2369 |
++ spin_unlock(&jh->b_state_lock); |
2370 |
++ jbd2_journal_put_journal_head(jh); |
2371 |
+ |
2372 |
+ return ret; |
2373 |
+ } |
2374 |
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c |
2375 |
+index 0dd2f93ac0480..d32b836f6ca74 100644 |
2376 |
+--- a/fs/udf/inode.c |
2377 |
++++ b/fs/udf/inode.c |
2378 |
+@@ -257,10 +257,6 @@ int udf_expand_file_adinicb(struct inode *inode) |
2379 |
+ char *kaddr; |
2380 |
+ struct udf_inode_info *iinfo = UDF_I(inode); |
2381 |
+ int err; |
2382 |
+- struct writeback_control udf_wbc = { |
2383 |
+- .sync_mode = WB_SYNC_NONE, |
2384 |
+- .nr_to_write = 1, |
2385 |
+- }; |
2386 |
+ |
2387 |
+ WARN_ON_ONCE(!inode_is_locked(inode)); |
2388 |
+ if (!iinfo->i_lenAlloc) { |
2389 |
+@@ -304,8 +300,10 @@ int udf_expand_file_adinicb(struct inode *inode) |
2390 |
+ iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG; |
2391 |
+ /* from now on we have normal address_space methods */ |
2392 |
+ inode->i_data.a_ops = &udf_aops; |
2393 |
++ set_page_dirty(page); |
2394 |
++ unlock_page(page); |
2395 |
+ up_write(&iinfo->i_data_sem); |
2396 |
+- err = inode->i_data.a_ops->writepage(page, &udf_wbc); |
2397 |
++ err = filemap_fdatawrite(inode->i_mapping); |
2398 |
+ if (err) { |
2399 |
+ /* Restore everything back so that we don't lose data... */ |
2400 |
+ lock_page(page); |
2401 |
+@@ -316,6 +314,7 @@ int udf_expand_file_adinicb(struct inode *inode) |
2402 |
+ unlock_page(page); |
2403 |
+ iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB; |
2404 |
+ inode->i_data.a_ops = &udf_adinicb_aops; |
2405 |
++ iinfo->i_lenAlloc = inode->i_size; |
2406 |
+ up_write(&iinfo->i_data_sem); |
2407 |
+ } |
2408 |
+ put_page(page); |
2409 |
+diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h |
2410 |
+index f8acddcf54fb4..79add91eaa04e 100644 |
2411 |
+--- a/include/linux/fsnotify.h |
2412 |
++++ b/include/linux/fsnotify.h |
2413 |
+@@ -203,6 +203,42 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode, |
2414 |
+ fsnotify_name(dir, FS_CREATE, inode, &new_dentry->d_name, 0); |
2415 |
+ } |
2416 |
+ |
2417 |
++/* |
2418 |
++ * fsnotify_delete - @dentry was unlinked and unhashed |
2419 |
++ * |
2420 |
++ * Caller must make sure that dentry->d_name is stable. |
2421 |
++ * |
2422 |
++ * Note: unlike fsnotify_unlink(), we have to pass also the unlinked inode |
2423 |
++ * as this may be called after d_delete() and old_dentry may be negative. |
2424 |
++ */ |
2425 |
++static inline void fsnotify_delete(struct inode *dir, struct inode *inode, |
2426 |
++ struct dentry *dentry) |
2427 |
++{ |
2428 |
++ __u32 mask = FS_DELETE; |
2429 |
++ |
2430 |
++ if (S_ISDIR(inode->i_mode)) |
2431 |
++ mask |= FS_ISDIR; |
2432 |
++ |
2433 |
++ fsnotify_name(dir, mask, inode, &dentry->d_name, 0); |
2434 |
++} |
2435 |
++ |
2436 |
++/** |
2437 |
++ * d_delete_notify - delete a dentry and call fsnotify_delete() |
2438 |
++ * @dentry: The dentry to delete |
2439 |
++ * |
2440 |
++ * This helper is used to guaranty that the unlinked inode cannot be found |
2441 |
++ * by lookup of this name after fsnotify_delete() event has been delivered. |
2442 |
++ */ |
2443 |
++static inline void d_delete_notify(struct inode *dir, struct dentry *dentry) |
2444 |
++{ |
2445 |
++ struct inode *inode = d_inode(dentry); |
2446 |
++ |
2447 |
++ ihold(inode); |
2448 |
++ d_delete(dentry); |
2449 |
++ fsnotify_delete(dir, inode, dentry); |
2450 |
++ iput(inode); |
2451 |
++} |
2452 |
++ |
2453 |
+ /* |
2454 |
+ * fsnotify_unlink - 'name' was unlinked |
2455 |
+ * |
2456 |
+@@ -210,10 +246,10 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode, |
2457 |
+ */ |
2458 |
+ static inline void fsnotify_unlink(struct inode *dir, struct dentry *dentry) |
2459 |
+ { |
2460 |
+- /* Expected to be called before d_delete() */ |
2461 |
+- WARN_ON_ONCE(d_is_negative(dentry)); |
2462 |
++ if (WARN_ON_ONCE(d_is_negative(dentry))) |
2463 |
++ return; |
2464 |
+ |
2465 |
+- fsnotify_dirent(dir, dentry, FS_DELETE); |
2466 |
++ fsnotify_delete(dir, d_inode(dentry), dentry); |
2467 |
+ } |
2468 |
+ |
2469 |
+ /* |
2470 |
+@@ -233,10 +269,10 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) |
2471 |
+ */ |
2472 |
+ static inline void fsnotify_rmdir(struct inode *dir, struct dentry *dentry) |
2473 |
+ { |
2474 |
+- /* Expected to be called before d_delete() */ |
2475 |
+- WARN_ON_ONCE(d_is_negative(dentry)); |
2476 |
++ if (WARN_ON_ONCE(d_is_negative(dentry))) |
2477 |
++ return; |
2478 |
+ |
2479 |
+- fsnotify_dirent(dir, dentry, FS_DELETE | FS_ISDIR); |
2480 |
++ fsnotify_delete(dir, d_inode(dentry), dentry); |
2481 |
+ } |
2482 |
+ |
2483 |
+ /* |
2484 |
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h |
2485 |
+index 3476d20b75d49..fe3155736d635 100644 |
2486 |
+--- a/include/linux/netdevice.h |
2487 |
++++ b/include/linux/netdevice.h |
2488 |
+@@ -2543,6 +2543,7 @@ struct packet_type { |
2489 |
+ struct net_device *); |
2490 |
+ bool (*id_match)(struct packet_type *ptype, |
2491 |
+ struct sock *sk); |
2492 |
++ struct net *af_packet_net; |
2493 |
+ void *af_packet_priv; |
2494 |
+ struct list_head list; |
2495 |
+ }; |
2496 |
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h |
2497 |
+index c94551091dad3..67a50c78232fe 100644 |
2498 |
+--- a/include/linux/perf_event.h |
2499 |
++++ b/include/linux/perf_event.h |
2500 |
+@@ -678,18 +678,6 @@ struct perf_event { |
2501 |
+ u64 total_time_running; |
2502 |
+ u64 tstamp; |
2503 |
+ |
2504 |
+- /* |
2505 |
+- * timestamp shadows the actual context timing but it can |
2506 |
+- * be safely used in NMI interrupt context. It reflects the |
2507 |
+- * context time as it was when the event was last scheduled in, |
2508 |
+- * or when ctx_sched_in failed to schedule the event because we |
2509 |
+- * run out of PMC. |
2510 |
+- * |
2511 |
+- * ctx_time already accounts for ctx->timestamp. Therefore to |
2512 |
+- * compute ctx_time for a sample, simply add perf_clock(). |
2513 |
+- */ |
2514 |
+- u64 shadow_ctx_time; |
2515 |
+- |
2516 |
+ struct perf_event_attr attr; |
2517 |
+ u16 header_size; |
2518 |
+ u16 id_header_size; |
2519 |
+@@ -834,6 +822,7 @@ struct perf_event_context { |
2520 |
+ */ |
2521 |
+ u64 time; |
2522 |
+ u64 timestamp; |
2523 |
++ u64 timeoffset; |
2524 |
+ |
2525 |
+ /* |
2526 |
+ * These fields let us detect when two contexts have both |
2527 |
+@@ -916,6 +905,8 @@ struct bpf_perf_event_data_kern { |
2528 |
+ struct perf_cgroup_info { |
2529 |
+ u64 time; |
2530 |
+ u64 timestamp; |
2531 |
++ u64 timeoffset; |
2532 |
++ int active; |
2533 |
+ }; |
2534 |
+ |
2535 |
+ struct perf_cgroup { |
2536 |
+diff --git a/include/linux/usb/role.h b/include/linux/usb/role.h |
2537 |
+index 0164fed31b06c..b9ccaeb8a4aef 100644 |
2538 |
+--- a/include/linux/usb/role.h |
2539 |
++++ b/include/linux/usb/role.h |
2540 |
+@@ -90,6 +90,12 @@ fwnode_usb_role_switch_get(struct fwnode_handle *node) |
2541 |
+ |
2542 |
+ static inline void usb_role_switch_put(struct usb_role_switch *sw) { } |
2543 |
+ |
2544 |
++static inline struct usb_role_switch * |
2545 |
++usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode) |
2546 |
++{ |
2547 |
++ return NULL; |
2548 |
++} |
2549 |
++ |
2550 |
+ static inline struct usb_role_switch * |
2551 |
+ usb_role_switch_register(struct device *parent, |
2552 |
+ const struct usb_role_switch_desc *desc) |
2553 |
+diff --git a/include/net/addrconf.h b/include/net/addrconf.h |
2554 |
+index 78ea3e332688f..e7ce719838b5e 100644 |
2555 |
+--- a/include/net/addrconf.h |
2556 |
++++ b/include/net/addrconf.h |
2557 |
+@@ -6,6 +6,8 @@ |
2558 |
+ #define RTR_SOLICITATION_INTERVAL (4*HZ) |
2559 |
+ #define RTR_SOLICITATION_MAX_INTERVAL (3600*HZ) /* 1 hour */ |
2560 |
+ |
2561 |
++#define MIN_VALID_LIFETIME (2*3600) /* 2 hours */ |
2562 |
++ |
2563 |
+ #define TEMP_VALID_LIFETIME (7*86400) |
2564 |
+ #define TEMP_PREFERRED_LIFETIME (86400) |
2565 |
+ #define REGEN_MAX_RETRY (3) |
2566 |
+diff --git a/include/net/ip.h b/include/net/ip.h |
2567 |
+index 5538e54d4620c..de2dc22a78f93 100644 |
2568 |
+--- a/include/net/ip.h |
2569 |
++++ b/include/net/ip.h |
2570 |
+@@ -506,19 +506,18 @@ static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb, |
2571 |
+ { |
2572 |
+ struct iphdr *iph = ip_hdr(skb); |
2573 |
+ |
2574 |
++ /* We had many attacks based on IPID, use the private |
2575 |
++ * generator as much as we can. |
2576 |
++ */ |
2577 |
++ if (sk && inet_sk(sk)->inet_daddr) { |
2578 |
++ iph->id = htons(inet_sk(sk)->inet_id); |
2579 |
++ inet_sk(sk)->inet_id += segs; |
2580 |
++ return; |
2581 |
++ } |
2582 |
+ if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) { |
2583 |
+- /* This is only to work around buggy Windows95/2000 |
2584 |
+- * VJ compression implementations. If the ID field |
2585 |
+- * does not change, they drop every other packet in |
2586 |
+- * a TCP stream using header compression. |
2587 |
+- */ |
2588 |
+- if (sk && inet_sk(sk)->inet_daddr) { |
2589 |
+- iph->id = htons(inet_sk(sk)->inet_id); |
2590 |
+- inet_sk(sk)->inet_id += segs; |
2591 |
+- } else { |
2592 |
+- iph->id = 0; |
2593 |
+- } |
2594 |
++ iph->id = 0; |
2595 |
+ } else { |
2596 |
++ /* Unfortunately we need the big hammer to get a suitable IPID */ |
2597 |
+ __ip_select_ident(net, iph, segs); |
2598 |
+ } |
2599 |
+ } |
2600 |
+diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h |
2601 |
+index 88bc66b8d02b0..95d93ecf07371 100644 |
2602 |
+--- a/include/net/ip6_fib.h |
2603 |
++++ b/include/net/ip6_fib.h |
2604 |
+@@ -280,7 +280,7 @@ static inline bool fib6_get_cookie_safe(const struct fib6_info *f6i, |
2605 |
+ fn = rcu_dereference(f6i->fib6_node); |
2606 |
+ |
2607 |
+ if (fn) { |
2608 |
+- *cookie = fn->fn_sernum; |
2609 |
++ *cookie = READ_ONCE(fn->fn_sernum); |
2610 |
+ /* pairs with smp_wmb() in fib6_update_sernum_upto_root() */ |
2611 |
+ smp_rmb(); |
2612 |
+ status = true; |
2613 |
+diff --git a/include/net/route.h b/include/net/route.h |
2614 |
+index ff021cab657e5..a07c277cd33e8 100644 |
2615 |
+--- a/include/net/route.h |
2616 |
++++ b/include/net/route.h |
2617 |
+@@ -369,7 +369,7 @@ static inline struct neighbour *ip_neigh_gw4(struct net_device *dev, |
2618 |
+ { |
2619 |
+ struct neighbour *neigh; |
2620 |
+ |
2621 |
+- neigh = __ipv4_neigh_lookup_noref(dev, daddr); |
2622 |
++ neigh = __ipv4_neigh_lookup_noref(dev, (__force u32)daddr); |
2623 |
+ if (unlikely(!neigh)) |
2624 |
+ neigh = __neigh_create(&arp_tbl, &daddr, dev, false); |
2625 |
+ |
2626 |
+diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c |
2627 |
+index 4477873ac3a0b..56cd7e6589ff3 100644 |
2628 |
+--- a/kernel/bpf/stackmap.c |
2629 |
++++ b/kernel/bpf/stackmap.c |
2630 |
+@@ -664,13 +664,14 @@ BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf, |
2631 |
+ u32, size, u64, flags) |
2632 |
+ { |
2633 |
+ struct pt_regs *regs; |
2634 |
+- long res; |
2635 |
++ long res = -EINVAL; |
2636 |
+ |
2637 |
+ if (!try_get_task_stack(task)) |
2638 |
+ return -EFAULT; |
2639 |
+ |
2640 |
+ regs = task_pt_regs(task); |
2641 |
+- res = __bpf_get_stack(regs, task, NULL, buf, size, flags); |
2642 |
++ if (regs) |
2643 |
++ res = __bpf_get_stack(regs, task, NULL, buf, size, flags); |
2644 |
+ put_task_stack(task); |
2645 |
+ |
2646 |
+ return res; |
2647 |
+diff --git a/kernel/events/core.c b/kernel/events/core.c |
2648 |
+index e2d774cc470ee..c6493f7e02359 100644 |
2649 |
+--- a/kernel/events/core.c |
2650 |
++++ b/kernel/events/core.c |
2651 |
+@@ -266,7 +266,7 @@ static void event_function_call(struct perf_event *event, event_f func, void *da |
2652 |
+ if (!event->parent) { |
2653 |
+ /* |
2654 |
+ * If this is a !child event, we must hold ctx::mutex to |
2655 |
+- * stabilize the the event->ctx relation. See |
2656 |
++ * stabilize the event->ctx relation. See |
2657 |
+ * perf_event_ctx_lock(). |
2658 |
+ */ |
2659 |
+ lockdep_assert_held(&ctx->mutex); |
2660 |
+@@ -673,6 +673,23 @@ perf_event_set_state(struct perf_event *event, enum perf_event_state state) |
2661 |
+ WRITE_ONCE(event->state, state); |
2662 |
+ } |
2663 |
+ |
2664 |
++/* |
2665 |
++ * UP store-release, load-acquire |
2666 |
++ */ |
2667 |
++ |
2668 |
++#define __store_release(ptr, val) \ |
2669 |
++do { \ |
2670 |
++ barrier(); \ |
2671 |
++ WRITE_ONCE(*(ptr), (val)); \ |
2672 |
++} while (0) |
2673 |
++ |
2674 |
++#define __load_acquire(ptr) \ |
2675 |
++({ \ |
2676 |
++ __unqual_scalar_typeof(*(ptr)) ___p = READ_ONCE(*(ptr)); \ |
2677 |
++ barrier(); \ |
2678 |
++ ___p; \ |
2679 |
++}) |
2680 |
++ |
2681 |
+ #ifdef CONFIG_CGROUP_PERF |
2682 |
+ |
2683 |
+ static inline bool |
2684 |
+@@ -718,34 +735,51 @@ static inline u64 perf_cgroup_event_time(struct perf_event *event) |
2685 |
+ return t->time; |
2686 |
+ } |
2687 |
+ |
2688 |
+-static inline void __update_cgrp_time(struct perf_cgroup *cgrp) |
2689 |
++static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now) |
2690 |
+ { |
2691 |
+- struct perf_cgroup_info *info; |
2692 |
+- u64 now; |
2693 |
+- |
2694 |
+- now = perf_clock(); |
2695 |
++ struct perf_cgroup_info *t; |
2696 |
+ |
2697 |
+- info = this_cpu_ptr(cgrp->info); |
2698 |
++ t = per_cpu_ptr(event->cgrp->info, event->cpu); |
2699 |
++ if (!__load_acquire(&t->active)) |
2700 |
++ return t->time; |
2701 |
++ now += READ_ONCE(t->timeoffset); |
2702 |
++ return now; |
2703 |
++} |
2704 |
+ |
2705 |
+- info->time += now - info->timestamp; |
2706 |
++static inline void __update_cgrp_time(struct perf_cgroup_info *info, u64 now, bool adv) |
2707 |
++{ |
2708 |
++ if (adv) |
2709 |
++ info->time += now - info->timestamp; |
2710 |
+ info->timestamp = now; |
2711 |
++ /* |
2712 |
++ * see update_context_time() |
2713 |
++ */ |
2714 |
++ WRITE_ONCE(info->timeoffset, info->time - info->timestamp); |
2715 |
+ } |
2716 |
+ |
2717 |
+-static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) |
2718 |
++static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx, bool final) |
2719 |
+ { |
2720 |
+ struct perf_cgroup *cgrp = cpuctx->cgrp; |
2721 |
+ struct cgroup_subsys_state *css; |
2722 |
++ struct perf_cgroup_info *info; |
2723 |
+ |
2724 |
+ if (cgrp) { |
2725 |
++ u64 now = perf_clock(); |
2726 |
++ |
2727 |
+ for (css = &cgrp->css; css; css = css->parent) { |
2728 |
+ cgrp = container_of(css, struct perf_cgroup, css); |
2729 |
+- __update_cgrp_time(cgrp); |
2730 |
++ info = this_cpu_ptr(cgrp->info); |
2731 |
++ |
2732 |
++ __update_cgrp_time(info, now, true); |
2733 |
++ if (final) |
2734 |
++ __store_release(&info->active, 0); |
2735 |
+ } |
2736 |
+ } |
2737 |
+ } |
2738 |
+ |
2739 |
+ static inline void update_cgrp_time_from_event(struct perf_event *event) |
2740 |
+ { |
2741 |
++ struct perf_cgroup_info *info; |
2742 |
+ struct perf_cgroup *cgrp; |
2743 |
+ |
2744 |
+ /* |
2745 |
+@@ -759,8 +793,10 @@ static inline void update_cgrp_time_from_event(struct perf_event *event) |
2746 |
+ /* |
2747 |
+ * Do not update time when cgroup is not active |
2748 |
+ */ |
2749 |
+- if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) |
2750 |
+- __update_cgrp_time(event->cgrp); |
2751 |
++ if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) { |
2752 |
++ info = this_cpu_ptr(event->cgrp->info); |
2753 |
++ __update_cgrp_time(info, perf_clock(), true); |
2754 |
++ } |
2755 |
+ } |
2756 |
+ |
2757 |
+ static inline void |
2758 |
+@@ -784,7 +820,8 @@ perf_cgroup_set_timestamp(struct task_struct *task, |
2759 |
+ for (css = &cgrp->css; css; css = css->parent) { |
2760 |
+ cgrp = container_of(css, struct perf_cgroup, css); |
2761 |
+ info = this_cpu_ptr(cgrp->info); |
2762 |
+- info->timestamp = ctx->timestamp; |
2763 |
++ __update_cgrp_time(info, ctx->timestamp, false); |
2764 |
++ __store_release(&info->active, 1); |
2765 |
+ } |
2766 |
+ } |
2767 |
+ |
2768 |
+@@ -980,14 +1017,6 @@ out: |
2769 |
+ return ret; |
2770 |
+ } |
2771 |
+ |
2772 |
+-static inline void |
2773 |
+-perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) |
2774 |
+-{ |
2775 |
+- struct perf_cgroup_info *t; |
2776 |
+- t = per_cpu_ptr(event->cgrp->info, event->cpu); |
2777 |
+- event->shadow_ctx_time = now - t->timestamp; |
2778 |
+-} |
2779 |
+- |
2780 |
+ static inline void |
2781 |
+ perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) |
2782 |
+ { |
2783 |
+@@ -1065,7 +1094,8 @@ static inline void update_cgrp_time_from_event(struct perf_event *event) |
2784 |
+ { |
2785 |
+ } |
2786 |
+ |
2787 |
+-static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) |
2788 |
++static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx, |
2789 |
++ bool final) |
2790 |
+ { |
2791 |
+ } |
2792 |
+ |
2793 |
+@@ -1097,12 +1127,12 @@ perf_cgroup_switch(struct task_struct *task, struct task_struct *next) |
2794 |
+ { |
2795 |
+ } |
2796 |
+ |
2797 |
+-static inline void |
2798 |
+-perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) |
2799 |
++static inline u64 perf_cgroup_event_time(struct perf_event *event) |
2800 |
+ { |
2801 |
++ return 0; |
2802 |
+ } |
2803 |
+ |
2804 |
+-static inline u64 perf_cgroup_event_time(struct perf_event *event) |
2805 |
++static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now) |
2806 |
+ { |
2807 |
+ return 0; |
2808 |
+ } |
2809 |
+@@ -1300,7 +1330,7 @@ static void put_ctx(struct perf_event_context *ctx) |
2810 |
+ * life-time rules separate them. That is an exiting task cannot fork, and a |
2811 |
+ * spawning task cannot (yet) exit. |
2812 |
+ * |
2813 |
+- * But remember that that these are parent<->child context relations, and |
2814 |
++ * But remember that these are parent<->child context relations, and |
2815 |
+ * migration does not affect children, therefore these two orderings should not |
2816 |
+ * interact. |
2817 |
+ * |
2818 |
+@@ -1439,7 +1469,7 @@ static u64 primary_event_id(struct perf_event *event) |
2819 |
+ /* |
2820 |
+ * Get the perf_event_context for a task and lock it. |
2821 |
+ * |
2822 |
+- * This has to cope with with the fact that until it is locked, |
2823 |
++ * This has to cope with the fact that until it is locked, |
2824 |
+ * the context could get moved to another task. |
2825 |
+ */ |
2826 |
+ static struct perf_event_context * |
2827 |
+@@ -1524,22 +1554,59 @@ static void perf_unpin_context(struct perf_event_context *ctx) |
2828 |
+ /* |
2829 |
+ * Update the record of the current time in a context. |
2830 |
+ */ |
2831 |
+-static void update_context_time(struct perf_event_context *ctx) |
2832 |
++static void __update_context_time(struct perf_event_context *ctx, bool adv) |
2833 |
+ { |
2834 |
+ u64 now = perf_clock(); |
2835 |
+ |
2836 |
+- ctx->time += now - ctx->timestamp; |
2837 |
++ if (adv) |
2838 |
++ ctx->time += now - ctx->timestamp; |
2839 |
+ ctx->timestamp = now; |
2840 |
++ |
2841 |
++ /* |
2842 |
++ * The above: time' = time + (now - timestamp), can be re-arranged |
2843 |
++ * into: time` = now + (time - timestamp), which gives a single value |
2844 |
++ * offset to compute future time without locks on. |
2845 |
++ * |
2846 |
++ * See perf_event_time_now(), which can be used from NMI context where |
2847 |
++ * it's (obviously) not possible to acquire ctx->lock in order to read |
2848 |
++ * both the above values in a consistent manner. |
2849 |
++ */ |
2850 |
++ WRITE_ONCE(ctx->timeoffset, ctx->time - ctx->timestamp); |
2851 |
++} |
2852 |
++ |
2853 |
++static void update_context_time(struct perf_event_context *ctx) |
2854 |
++{ |
2855 |
++ __update_context_time(ctx, true); |
2856 |
+ } |
2857 |
+ |
2858 |
+ static u64 perf_event_time(struct perf_event *event) |
2859 |
+ { |
2860 |
+ struct perf_event_context *ctx = event->ctx; |
2861 |
+ |
2862 |
++ if (unlikely(!ctx)) |
2863 |
++ return 0; |
2864 |
++ |
2865 |
+ if (is_cgroup_event(event)) |
2866 |
+ return perf_cgroup_event_time(event); |
2867 |
+ |
2868 |
+- return ctx ? ctx->time : 0; |
2869 |
++ return ctx->time; |
2870 |
++} |
2871 |
++ |
2872 |
++static u64 perf_event_time_now(struct perf_event *event, u64 now) |
2873 |
++{ |
2874 |
++ struct perf_event_context *ctx = event->ctx; |
2875 |
++ |
2876 |
++ if (unlikely(!ctx)) |
2877 |
++ return 0; |
2878 |
++ |
2879 |
++ if (is_cgroup_event(event)) |
2880 |
++ return perf_cgroup_event_time_now(event, now); |
2881 |
++ |
2882 |
++ if (!(__load_acquire(&ctx->is_active) & EVENT_TIME)) |
2883 |
++ return ctx->time; |
2884 |
++ |
2885 |
++ now += READ_ONCE(ctx->timeoffset); |
2886 |
++ return now; |
2887 |
+ } |
2888 |
+ |
2889 |
+ static enum event_type_t get_event_type(struct perf_event *event) |
2890 |
+@@ -2333,7 +2400,7 @@ __perf_remove_from_context(struct perf_event *event, |
2891 |
+ |
2892 |
+ if (ctx->is_active & EVENT_TIME) { |
2893 |
+ update_context_time(ctx); |
2894 |
+- update_cgrp_time_from_cpuctx(cpuctx); |
2895 |
++ update_cgrp_time_from_cpuctx(cpuctx, false); |
2896 |
+ } |
2897 |
+ |
2898 |
+ event_sched_out(event, cpuctx, ctx); |
2899 |
+@@ -2342,6 +2409,9 @@ __perf_remove_from_context(struct perf_event *event, |
2900 |
+ list_del_event(event, ctx); |
2901 |
+ |
2902 |
+ if (!ctx->nr_events && ctx->is_active) { |
2903 |
++ if (ctx == &cpuctx->ctx) |
2904 |
++ update_cgrp_time_from_cpuctx(cpuctx, true); |
2905 |
++ |
2906 |
+ ctx->is_active = 0; |
2907 |
+ ctx->rotate_necessary = 0; |
2908 |
+ if (ctx->task) { |
2909 |
+@@ -2467,40 +2537,6 @@ void perf_event_disable_inatomic(struct perf_event *event) |
2910 |
+ irq_work_queue(&event->pending); |
2911 |
+ } |
2912 |
+ |
2913 |
+-static void perf_set_shadow_time(struct perf_event *event, |
2914 |
+- struct perf_event_context *ctx) |
2915 |
+-{ |
2916 |
+- /* |
2917 |
+- * use the correct time source for the time snapshot |
2918 |
+- * |
2919 |
+- * We could get by without this by leveraging the |
2920 |
+- * fact that to get to this function, the caller |
2921 |
+- * has most likely already called update_context_time() |
2922 |
+- * and update_cgrp_time_xx() and thus both timestamp |
2923 |
+- * are identical (or very close). Given that tstamp is, |
2924 |
+- * already adjusted for cgroup, we could say that: |
2925 |
+- * tstamp - ctx->timestamp |
2926 |
+- * is equivalent to |
2927 |
+- * tstamp - cgrp->timestamp. |
2928 |
+- * |
2929 |
+- * Then, in perf_output_read(), the calculation would |
2930 |
+- * work with no changes because: |
2931 |
+- * - event is guaranteed scheduled in |
2932 |
+- * - no scheduled out in between |
2933 |
+- * - thus the timestamp would be the same |
2934 |
+- * |
2935 |
+- * But this is a bit hairy. |
2936 |
+- * |
2937 |
+- * So instead, we have an explicit cgroup call to remain |
2938 |
+- * within the time time source all along. We believe it |
2939 |
+- * is cleaner and simpler to understand. |
2940 |
+- */ |
2941 |
+- if (is_cgroup_event(event)) |
2942 |
+- perf_cgroup_set_shadow_time(event, event->tstamp); |
2943 |
+- else |
2944 |
+- event->shadow_ctx_time = event->tstamp - ctx->timestamp; |
2945 |
+-} |
2946 |
+- |
2947 |
+ #define MAX_INTERRUPTS (~0ULL) |
2948 |
+ |
2949 |
+ static void perf_log_throttle(struct perf_event *event, int enable); |
2950 |
+@@ -2541,8 +2577,6 @@ event_sched_in(struct perf_event *event, |
2951 |
+ |
2952 |
+ perf_pmu_disable(event->pmu); |
2953 |
+ |
2954 |
+- perf_set_shadow_time(event, ctx); |
2955 |
+- |
2956 |
+ perf_log_itrace_start(event); |
2957 |
+ |
2958 |
+ if (event->pmu->add(event, PERF_EF_START)) { |
2959 |
+@@ -3216,16 +3250,6 @@ static void ctx_sched_out(struct perf_event_context *ctx, |
2960 |
+ return; |
2961 |
+ } |
2962 |
+ |
2963 |
+- ctx->is_active &= ~event_type; |
2964 |
+- if (!(ctx->is_active & EVENT_ALL)) |
2965 |
+- ctx->is_active = 0; |
2966 |
+- |
2967 |
+- if (ctx->task) { |
2968 |
+- WARN_ON_ONCE(cpuctx->task_ctx != ctx); |
2969 |
+- if (!ctx->is_active) |
2970 |
+- cpuctx->task_ctx = NULL; |
2971 |
+- } |
2972 |
+- |
2973 |
+ /* |
2974 |
+ * Always update time if it was set; not only when it changes. |
2975 |
+ * Otherwise we can 'forget' to update time for any but the last |
2976 |
+@@ -3239,7 +3263,22 @@ static void ctx_sched_out(struct perf_event_context *ctx, |
2977 |
+ if (is_active & EVENT_TIME) { |
2978 |
+ /* update (and stop) ctx time */ |
2979 |
+ update_context_time(ctx); |
2980 |
+- update_cgrp_time_from_cpuctx(cpuctx); |
2981 |
++ update_cgrp_time_from_cpuctx(cpuctx, ctx == &cpuctx->ctx); |
2982 |
++ /* |
2983 |
++ * CPU-release for the below ->is_active store, |
2984 |
++ * see __load_acquire() in perf_event_time_now() |
2985 |
++ */ |
2986 |
++ barrier(); |
2987 |
++ } |
2988 |
++ |
2989 |
++ ctx->is_active &= ~event_type; |
2990 |
++ if (!(ctx->is_active & EVENT_ALL)) |
2991 |
++ ctx->is_active = 0; |
2992 |
++ |
2993 |
++ if (ctx->task) { |
2994 |
++ WARN_ON_ONCE(cpuctx->task_ctx != ctx); |
2995 |
++ if (!ctx->is_active) |
2996 |
++ cpuctx->task_ctx = NULL; |
2997 |
+ } |
2998 |
+ |
2999 |
+ is_active ^= ctx->is_active; /* changed bits */ |
3000 |
+@@ -3676,13 +3715,19 @@ static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx, |
3001 |
+ return 0; |
3002 |
+ } |
3003 |
+ |
3004 |
++/* |
3005 |
++ * Because the userpage is strictly per-event (there is no concept of context, |
3006 |
++ * so there cannot be a context indirection), every userpage must be updated |
3007 |
++ * when context time starts :-( |
3008 |
++ * |
3009 |
++ * IOW, we must not miss EVENT_TIME edges. |
3010 |
++ */ |
3011 |
+ static inline bool event_update_userpage(struct perf_event *event) |
3012 |
+ { |
3013 |
+ if (likely(!atomic_read(&event->mmap_count))) |
3014 |
+ return false; |
3015 |
+ |
3016 |
+ perf_event_update_time(event); |
3017 |
+- perf_set_shadow_time(event, event->ctx); |
3018 |
+ perf_event_update_userpage(event); |
3019 |
+ |
3020 |
+ return true; |
3021 |
+@@ -3766,13 +3811,23 @@ ctx_sched_in(struct perf_event_context *ctx, |
3022 |
+ struct task_struct *task) |
3023 |
+ { |
3024 |
+ int is_active = ctx->is_active; |
3025 |
+- u64 now; |
3026 |
+ |
3027 |
+ lockdep_assert_held(&ctx->lock); |
3028 |
+ |
3029 |
+ if (likely(!ctx->nr_events)) |
3030 |
+ return; |
3031 |
+ |
3032 |
++ if (is_active ^ EVENT_TIME) { |
3033 |
++ /* start ctx time */ |
3034 |
++ __update_context_time(ctx, false); |
3035 |
++ perf_cgroup_set_timestamp(task, ctx); |
3036 |
++ /* |
3037 |
++ * CPU-release for the below ->is_active store, |
3038 |
++ * see __load_acquire() in perf_event_time_now() |
3039 |
++ */ |
3040 |
++ barrier(); |
3041 |
++ } |
3042 |
++ |
3043 |
+ ctx->is_active |= (event_type | EVENT_TIME); |
3044 |
+ if (ctx->task) { |
3045 |
+ if (!is_active) |
3046 |
+@@ -3783,13 +3838,6 @@ ctx_sched_in(struct perf_event_context *ctx, |
3047 |
+ |
3048 |
+ is_active ^= ctx->is_active; /* changed bits */ |
3049 |
+ |
3050 |
+- if (is_active & EVENT_TIME) { |
3051 |
+- /* start ctx time */ |
3052 |
+- now = perf_clock(); |
3053 |
+- ctx->timestamp = now; |
3054 |
+- perf_cgroup_set_timestamp(task, ctx); |
3055 |
+- } |
3056 |
+- |
3057 |
+ /* |
3058 |
+ * First go through the list and put on any pinned groups |
3059 |
+ * in order to give them the best chance of going on. |
3060 |
+@@ -4325,6 +4373,18 @@ static inline u64 perf_event_count(struct perf_event *event) |
3061 |
+ return local64_read(&event->count) + atomic64_read(&event->child_count); |
3062 |
+ } |
3063 |
+ |
3064 |
++static void calc_timer_values(struct perf_event *event, |
3065 |
++ u64 *now, |
3066 |
++ u64 *enabled, |
3067 |
++ u64 *running) |
3068 |
++{ |
3069 |
++ u64 ctx_time; |
3070 |
++ |
3071 |
++ *now = perf_clock(); |
3072 |
++ ctx_time = perf_event_time_now(event, *now); |
3073 |
++ __perf_update_times(event, ctx_time, enabled, running); |
3074 |
++} |
3075 |
++ |
3076 |
+ /* |
3077 |
+ * NMI-safe method to read a local event, that is an event that |
3078 |
+ * is: |
3079 |
+@@ -4384,10 +4444,9 @@ int perf_event_read_local(struct perf_event *event, u64 *value, |
3080 |
+ |
3081 |
+ *value = local64_read(&event->count); |
3082 |
+ if (enabled || running) { |
3083 |
+- u64 now = event->shadow_ctx_time + perf_clock(); |
3084 |
+- u64 __enabled, __running; |
3085 |
++ u64 __enabled, __running, __now;; |
3086 |
+ |
3087 |
+- __perf_update_times(event, now, &__enabled, &__running); |
3088 |
++ calc_timer_values(event, &__now, &__enabled, &__running); |
3089 |
+ if (enabled) |
3090 |
+ *enabled = __enabled; |
3091 |
+ if (running) |
3092 |
+@@ -5694,18 +5753,6 @@ static int perf_event_index(struct perf_event *event) |
3093 |
+ return event->pmu->event_idx(event); |
3094 |
+ } |
3095 |
+ |
3096 |
+-static void calc_timer_values(struct perf_event *event, |
3097 |
+- u64 *now, |
3098 |
+- u64 *enabled, |
3099 |
+- u64 *running) |
3100 |
+-{ |
3101 |
+- u64 ctx_time; |
3102 |
+- |
3103 |
+- *now = perf_clock(); |
3104 |
+- ctx_time = event->shadow_ctx_time + *now; |
3105 |
+- __perf_update_times(event, ctx_time, enabled, running); |
3106 |
+-} |
3107 |
+- |
3108 |
+ static void perf_event_init_userpage(struct perf_event *event) |
3109 |
+ { |
3110 |
+ struct perf_event_mmap_page *userpg; |
3111 |
+@@ -6245,7 +6292,6 @@ accounting: |
3112 |
+ ring_buffer_attach(event, rb); |
3113 |
+ |
3114 |
+ perf_event_update_time(event); |
3115 |
+- perf_set_shadow_time(event, event->ctx); |
3116 |
+ perf_event_init_userpage(event); |
3117 |
+ perf_event_update_userpage(event); |
3118 |
+ } else { |
3119 |
+diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c |
3120 |
+index 00b0358739ab3..e1bbb3b92921d 100644 |
3121 |
+--- a/kernel/events/uprobes.c |
3122 |
++++ b/kernel/events/uprobes.c |
3123 |
+@@ -1735,7 +1735,7 @@ void uprobe_free_utask(struct task_struct *t) |
3124 |
+ } |
3125 |
+ |
3126 |
+ /* |
3127 |
+- * Allocate a uprobe_task object for the task if if necessary. |
3128 |
++ * Allocate a uprobe_task object for the task if necessary. |
3129 |
+ * Called when the thread hits a breakpoint. |
3130 |
+ * |
3131 |
+ * Returns: |
3132 |
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c |
3133 |
+index 2f8cd616d3b29..f00dd928fc711 100644 |
3134 |
+--- a/kernel/locking/rtmutex.c |
3135 |
++++ b/kernel/locking/rtmutex.c |
3136 |
+@@ -1438,7 +1438,7 @@ rt_mutex_fasttrylock(struct rt_mutex *lock, |
3137 |
+ } |
3138 |
+ |
3139 |
+ /* |
3140 |
+- * Performs the wakeup of the the top-waiter and re-enables preemption. |
3141 |
++ * Performs the wakeup of the top-waiter and re-enables preemption. |
3142 |
+ */ |
3143 |
+ void rt_mutex_postunlock(struct wake_q_head *wake_q) |
3144 |
+ { |
3145 |
+@@ -1832,7 +1832,7 @@ struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock) |
3146 |
+ * been started. |
3147 |
+ * @waiter: the pre-initialized rt_mutex_waiter |
3148 |
+ * |
3149 |
+- * Wait for the the lock acquisition started on our behalf by |
3150 |
++ * Wait for the lock acquisition started on our behalf by |
3151 |
+ * rt_mutex_start_proxy_lock(). Upon failure, the caller must call |
3152 |
+ * rt_mutex_cleanup_proxy_lock(). |
3153 |
+ * |
3154 |
+diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c |
3155 |
+index a163542d178ee..cc5cc889b5b7f 100644 |
3156 |
+--- a/kernel/locking/rwsem.c |
3157 |
++++ b/kernel/locking/rwsem.c |
3158 |
+@@ -1177,7 +1177,7 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state) |
3159 |
+ |
3160 |
+ /* |
3161 |
+ * If there were already threads queued before us and: |
3162 |
+- * 1) there are no no active locks, wake the front |
3163 |
++ * 1) there are no active locks, wake the front |
3164 |
+ * queued process(es) as the handoff bit might be set. |
3165 |
+ * 2) there are no active writers and some readers, the lock |
3166 |
+ * must be read owned; so we try to wake any read lock |
3167 |
+diff --git a/kernel/locking/semaphore.c b/kernel/locking/semaphore.c |
3168 |
+index d9dd94defc0a9..9aa855a96c4ae 100644 |
3169 |
+--- a/kernel/locking/semaphore.c |
3170 |
++++ b/kernel/locking/semaphore.c |
3171 |
+@@ -119,7 +119,7 @@ EXPORT_SYMBOL(down_killable); |
3172 |
+ * @sem: the semaphore to be acquired |
3173 |
+ * |
3174 |
+ * Try to acquire the semaphore atomically. Returns 0 if the semaphore has |
3175 |
+- * been acquired successfully or 1 if it it cannot be acquired. |
3176 |
++ * been acquired successfully or 1 if it cannot be acquired. |
3177 |
+ * |
3178 |
+ * NOTE: This return value is inverted from both spin_trylock and |
3179 |
+ * mutex_trylock! Be careful about this when converting code. |
3180 |
+diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c |
3181 |
+index 105df4dfc7839..52571dcad768b 100644 |
3182 |
+--- a/kernel/power/wakelock.c |
3183 |
++++ b/kernel/power/wakelock.c |
3184 |
+@@ -39,23 +39,20 @@ ssize_t pm_show_wakelocks(char *buf, bool show_active) |
3185 |
+ { |
3186 |
+ struct rb_node *node; |
3187 |
+ struct wakelock *wl; |
3188 |
+- char *str = buf; |
3189 |
+- char *end = buf + PAGE_SIZE; |
3190 |
++ int len = 0; |
3191 |
+ |
3192 |
+ mutex_lock(&wakelocks_lock); |
3193 |
+ |
3194 |
+ for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) { |
3195 |
+ wl = rb_entry(node, struct wakelock, node); |
3196 |
+ if (wl->ws->active == show_active) |
3197 |
+- str += scnprintf(str, end - str, "%s ", wl->name); |
3198 |
++ len += sysfs_emit_at(buf, len, "%s ", wl->name); |
3199 |
+ } |
3200 |
+- if (str > buf) |
3201 |
+- str--; |
3202 |
+ |
3203 |
+- str += scnprintf(str, end - str, "\n"); |
3204 |
++ len += sysfs_emit_at(buf, len, "\n"); |
3205 |
+ |
3206 |
+ mutex_unlock(&wakelocks_lock); |
3207 |
+- return (str - buf); |
3208 |
++ return len; |
3209 |
+ } |
3210 |
+ |
3211 |
+ #if CONFIG_PM_WAKELOCKS_LIMIT > 0 |
3212 |
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c |
3213 |
+index 2a33cb5a10e59..acd9833b8ec22 100644 |
3214 |
+--- a/kernel/sched/fair.c |
3215 |
++++ b/kernel/sched/fair.c |
3216 |
+@@ -3379,7 +3379,6 @@ void set_task_rq_fair(struct sched_entity *se, |
3217 |
+ se->avg.last_update_time = n_last_update_time; |
3218 |
+ } |
3219 |
+ |
3220 |
+- |
3221 |
+ /* |
3222 |
+ * When on migration a sched_entity joins/leaves the PELT hierarchy, we need to |
3223 |
+ * propagate its contribution. The key to this propagation is the invariant |
3224 |
+@@ -3447,7 +3446,6 @@ void set_task_rq_fair(struct sched_entity *se, |
3225 |
+ * XXX: only do this for the part of runnable > running ? |
3226 |
+ * |
3227 |
+ */ |
3228 |
+- |
3229 |
+ static inline void |
3230 |
+ update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) |
3231 |
+ { |
3232 |
+@@ -3676,7 +3674,19 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) |
3233 |
+ |
3234 |
+ r = removed_util; |
3235 |
+ sub_positive(&sa->util_avg, r); |
3236 |
+- sa->util_sum = sa->util_avg * divider; |
3237 |
++ sub_positive(&sa->util_sum, r * divider); |
3238 |
++ /* |
3239 |
++ * Because of rounding, se->util_sum might ends up being +1 more than |
3240 |
++ * cfs->util_sum. Although this is not a problem by itself, detaching |
3241 |
++ * a lot of tasks with the rounding problem between 2 updates of |
3242 |
++ * util_avg (~1ms) can make cfs->util_sum becoming null whereas |
3243 |
++ * cfs_util_avg is not. |
3244 |
++ * Check that util_sum is still above its lower bound for the new |
3245 |
++ * util_avg. Given that period_contrib might have moved since the last |
3246 |
++ * sync, we are only sure that util_sum must be above or equal to |
3247 |
++ * util_avg * minimum possible divider |
3248 |
++ */ |
3249 |
++ sa->util_sum = max_t(u32, sa->util_sum, sa->util_avg * PELT_MIN_DIVIDER); |
3250 |
+ |
3251 |
+ r = removed_runnable; |
3252 |
+ sub_positive(&sa->runnable_avg, r); |
3253 |
+@@ -5149,7 +5159,7 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) |
3254 |
+ /* |
3255 |
+ * When a group wakes up we want to make sure that its quota is not already |
3256 |
+ * expired/exceeded, otherwise it may be allowed to steal additional ticks of |
3257 |
+- * runtime as update_curr() throttling can not not trigger until it's on-rq. |
3258 |
++ * runtime as update_curr() throttling can not trigger until it's on-rq. |
3259 |
+ */ |
3260 |
+ static void check_enqueue_throttle(struct cfs_rq *cfs_rq) |
3261 |
+ { |
3262 |
+diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c |
3263 |
+index 16f57e71f9c44..cc7cd512e4e33 100644 |
3264 |
+--- a/kernel/sched/membarrier.c |
3265 |
++++ b/kernel/sched/membarrier.c |
3266 |
+@@ -19,11 +19,11 @@ |
3267 |
+ #endif |
3268 |
+ |
3269 |
+ #ifdef CONFIG_RSEQ |
3270 |
+-#define MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ_BITMASK \ |
3271 |
++#define MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK \ |
3272 |
+ (MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ \ |
3273 |
+- | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ_BITMASK) |
3274 |
++ | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ) |
3275 |
+ #else |
3276 |
+-#define MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ_BITMASK 0 |
3277 |
++#define MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK 0 |
3278 |
+ #endif |
3279 |
+ |
3280 |
+ #define MEMBARRIER_CMD_BITMASK \ |
3281 |
+@@ -31,7 +31,8 @@ |
3282 |
+ | MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED \ |
3283 |
+ | MEMBARRIER_CMD_PRIVATE_EXPEDITED \ |
3284 |
+ | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED \ |
3285 |
+- | MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK) |
3286 |
++ | MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \ |
3287 |
++ | MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK) |
3288 |
+ |
3289 |
+ static void ipi_mb(void *info) |
3290 |
+ { |
3291 |
+@@ -315,7 +316,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm) |
3292 |
+ |
3293 |
+ /* |
3294 |
+ * For each cpu runqueue, if the task's mm match @mm, ensure that all |
3295 |
+- * @mm's membarrier state set bits are also set in in the runqueue's |
3296 |
++ * @mm's membarrier state set bits are also set in the runqueue's |
3297 |
+ * membarrier state. This ensures that a runqueue scheduling |
3298 |
+ * between threads which are users of @mm has its membarrier state |
3299 |
+ * updated. |
3300 |
+diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h |
3301 |
+index 0b9aeebb9c325..45bf08e22207c 100644 |
3302 |
+--- a/kernel/sched/pelt.h |
3303 |
++++ b/kernel/sched/pelt.h |
3304 |
+@@ -37,9 +37,11 @@ update_irq_load_avg(struct rq *rq, u64 running) |
3305 |
+ } |
3306 |
+ #endif |
3307 |
+ |
3308 |
++#define PELT_MIN_DIVIDER (LOAD_AVG_MAX - 1024) |
3309 |
++ |
3310 |
+ static inline u32 get_pelt_divider(struct sched_avg *avg) |
3311 |
+ { |
3312 |
+- return LOAD_AVG_MAX - 1024 + avg->period_contrib; |
3313 |
++ return PELT_MIN_DIVIDER + avg->period_contrib; |
3314 |
+ } |
3315 |
+ |
3316 |
+ static inline void cfs_se_util_change(struct sched_avg *avg) |
3317 |
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c |
3318 |
+index cd2d094b9f820..a0729213f37be 100644 |
3319 |
+--- a/kernel/trace/trace.c |
3320 |
++++ b/kernel/trace/trace.c |
3321 |
+@@ -7257,7 +7257,8 @@ static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr) |
3322 |
+ err = kzalloc(sizeof(*err), GFP_KERNEL); |
3323 |
+ if (!err) |
3324 |
+ err = ERR_PTR(-ENOMEM); |
3325 |
+- tr->n_err_log_entries++; |
3326 |
++ else |
3327 |
++ tr->n_err_log_entries++; |
3328 |
+ |
3329 |
+ return err; |
3330 |
+ } |
3331 |
+diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c |
3332 |
+index 003e5f37861e3..1557a20b6500e 100644 |
3333 |
+--- a/kernel/trace/trace_events_hist.c |
3334 |
++++ b/kernel/trace/trace_events_hist.c |
3335 |
+@@ -3506,6 +3506,7 @@ static int trace_action_create(struct hist_trigger_data *hist_data, |
3336 |
+ |
3337 |
+ var_ref_idx = find_var_ref_idx(hist_data, var_ref); |
3338 |
+ if (WARN_ON(var_ref_idx < 0)) { |
3339 |
++ kfree(p); |
3340 |
+ ret = var_ref_idx; |
3341 |
+ goto err; |
3342 |
+ } |
3343 |
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c |
3344 |
+index 7ffcca9ae82a1..72b4127360c7f 100644 |
3345 |
+--- a/net/bluetooth/hci_event.c |
3346 |
++++ b/net/bluetooth/hci_event.c |
3347 |
+@@ -5661,6 +5661,11 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) |
3348 |
+ struct hci_ev_le_advertising_info *ev = ptr; |
3349 |
+ s8 rssi; |
3350 |
+ |
3351 |
++ if (ptr > (void *)skb_tail_pointer(skb) - sizeof(*ev)) { |
3352 |
++ bt_dev_err(hdev, "Malicious advertising data."); |
3353 |
++ break; |
3354 |
++ } |
3355 |
++ |
3356 |
+ if (ev->length <= HCI_MAX_AD_LENGTH && |
3357 |
+ ev->data + ev->length <= skb_tail_pointer(skb)) { |
3358 |
+ rssi = ev->data[ev->length]; |
3359 |
+@@ -5672,11 +5677,6 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) |
3360 |
+ } |
3361 |
+ |
3362 |
+ ptr += sizeof(*ev) + ev->length + 1; |
3363 |
+- |
3364 |
+- if (ptr > (void *) skb_tail_pointer(skb) - sizeof(*ev)) { |
3365 |
+- bt_dev_err(hdev, "Malicious advertising data. Stopping processing"); |
3366 |
+- break; |
3367 |
+- } |
3368 |
+ } |
3369 |
+ |
3370 |
+ hci_dev_unlock(hdev); |
3371 |
+diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c |
3372 |
+index 08c77418c687b..852f4b54e8811 100644 |
3373 |
+--- a/net/bridge/br_vlan.c |
3374 |
++++ b/net/bridge/br_vlan.c |
3375 |
+@@ -543,10 +543,10 @@ static bool __allowed_ingress(const struct net_bridge *br, |
3376 |
+ if (!br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) { |
3377 |
+ if (*state == BR_STATE_FORWARDING) { |
3378 |
+ *state = br_vlan_get_pvid_state(vg); |
3379 |
+- return br_vlan_state_allowed(*state, true); |
3380 |
+- } else { |
3381 |
+- return true; |
3382 |
++ if (!br_vlan_state_allowed(*state, true)) |
3383 |
++ goto drop; |
3384 |
+ } |
3385 |
++ return true; |
3386 |
+ } |
3387 |
+ } |
3388 |
+ v = br_vlan_find(vg, *vid); |
3389 |
+@@ -1873,7 +1873,8 @@ static int br_vlan_rtm_dump(struct sk_buff *skb, struct netlink_callback *cb) |
3390 |
+ goto out_err; |
3391 |
+ } |
3392 |
+ err = br_vlan_dump_dev(dev, skb, cb, dump_flags); |
3393 |
+- if (err && err != -EMSGSIZE) |
3394 |
++ /* if the dump completed without an error we return 0 here */ |
3395 |
++ if (err != -EMSGSIZE) |
3396 |
+ goto out_err; |
3397 |
+ } else { |
3398 |
+ for_each_netdev_rcu(net, dev) { |
3399 |
+diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c |
3400 |
+index c714e6a9dad4c..eadb696360b48 100644 |
3401 |
+--- a/net/core/net-procfs.c |
3402 |
++++ b/net/core/net-procfs.c |
3403 |
+@@ -193,12 +193,23 @@ static const struct seq_operations softnet_seq_ops = { |
3404 |
+ .show = softnet_seq_show, |
3405 |
+ }; |
3406 |
+ |
3407 |
+-static void *ptype_get_idx(loff_t pos) |
3408 |
++static void *ptype_get_idx(struct seq_file *seq, loff_t pos) |
3409 |
+ { |
3410 |
++ struct list_head *ptype_list = NULL; |
3411 |
+ struct packet_type *pt = NULL; |
3412 |
++ struct net_device *dev; |
3413 |
+ loff_t i = 0; |
3414 |
+ int t; |
3415 |
+ |
3416 |
++ for_each_netdev_rcu(seq_file_net(seq), dev) { |
3417 |
++ ptype_list = &dev->ptype_all; |
3418 |
++ list_for_each_entry_rcu(pt, ptype_list, list) { |
3419 |
++ if (i == pos) |
3420 |
++ return pt; |
3421 |
++ ++i; |
3422 |
++ } |
3423 |
++ } |
3424 |
++ |
3425 |
+ list_for_each_entry_rcu(pt, &ptype_all, list) { |
3426 |
+ if (i == pos) |
3427 |
+ return pt; |
3428 |
+@@ -219,22 +230,40 @@ static void *ptype_seq_start(struct seq_file *seq, loff_t *pos) |
3429 |
+ __acquires(RCU) |
3430 |
+ { |
3431 |
+ rcu_read_lock(); |
3432 |
+- return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN; |
3433 |
++ return *pos ? ptype_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; |
3434 |
+ } |
3435 |
+ |
3436 |
+ static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
3437 |
+ { |
3438 |
++ struct net_device *dev; |
3439 |
+ struct packet_type *pt; |
3440 |
+ struct list_head *nxt; |
3441 |
+ int hash; |
3442 |
+ |
3443 |
+ ++*pos; |
3444 |
+ if (v == SEQ_START_TOKEN) |
3445 |
+- return ptype_get_idx(0); |
3446 |
++ return ptype_get_idx(seq, 0); |
3447 |
+ |
3448 |
+ pt = v; |
3449 |
+ nxt = pt->list.next; |
3450 |
++ if (pt->dev) { |
3451 |
++ if (nxt != &pt->dev->ptype_all) |
3452 |
++ goto found; |
3453 |
++ |
3454 |
++ dev = pt->dev; |
3455 |
++ for_each_netdev_continue_rcu(seq_file_net(seq), dev) { |
3456 |
++ if (!list_empty(&dev->ptype_all)) { |
3457 |
++ nxt = dev->ptype_all.next; |
3458 |
++ goto found; |
3459 |
++ } |
3460 |
++ } |
3461 |
++ |
3462 |
++ nxt = ptype_all.next; |
3463 |
++ goto ptype_all; |
3464 |
++ } |
3465 |
++ |
3466 |
+ if (pt->type == htons(ETH_P_ALL)) { |
3467 |
++ptype_all: |
3468 |
+ if (nxt != &ptype_all) |
3469 |
+ goto found; |
3470 |
+ hash = 0; |
3471 |
+@@ -263,7 +292,8 @@ static int ptype_seq_show(struct seq_file *seq, void *v) |
3472 |
+ |
3473 |
+ if (v == SEQ_START_TOKEN) |
3474 |
+ seq_puts(seq, "Type Device Function\n"); |
3475 |
+- else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) { |
3476 |
++ else if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) && |
3477 |
++ (!pt->dev || net_eq(dev_net(pt->dev), seq_file_net(seq)))) { |
3478 |
+ if (pt->type == htons(ETH_P_ALL)) |
3479 |
+ seq_puts(seq, "ALL "); |
3480 |
+ else |
3481 |
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c |
3482 |
+index 10d4cde31c6bf..5e48b3d3a00db 100644 |
3483 |
+--- a/net/ipv4/ip_output.c |
3484 |
++++ b/net/ipv4/ip_output.c |
3485 |
+@@ -162,12 +162,19 @@ int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk, |
3486 |
+ iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr); |
3487 |
+ iph->saddr = saddr; |
3488 |
+ iph->protocol = sk->sk_protocol; |
3489 |
+- if (ip_dont_fragment(sk, &rt->dst)) { |
3490 |
++ /* Do not bother generating IPID for small packets (eg SYNACK) */ |
3491 |
++ if (skb->len <= IPV4_MIN_MTU || ip_dont_fragment(sk, &rt->dst)) { |
3492 |
+ iph->frag_off = htons(IP_DF); |
3493 |
+ iph->id = 0; |
3494 |
+ } else { |
3495 |
+ iph->frag_off = 0; |
3496 |
+- __ip_select_ident(net, iph, 1); |
3497 |
++ /* TCP packets here are SYNACK with fat IPv4/TCP options. |
3498 |
++ * Avoid using the hashed IP ident generator. |
3499 |
++ */ |
3500 |
++ if (sk->sk_protocol == IPPROTO_TCP) |
3501 |
++ iph->id = (__force __be16)prandom_u32(); |
3502 |
++ else |
3503 |
++ __ip_select_ident(net, iph, 1); |
3504 |
+ } |
3505 |
+ |
3506 |
+ if (opt && opt->opt.optlen) { |
3507 |
+@@ -614,18 +621,6 @@ void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph, |
3508 |
+ } |
3509 |
+ EXPORT_SYMBOL(ip_fraglist_init); |
3510 |
+ |
3511 |
+-static void ip_fraglist_ipcb_prepare(struct sk_buff *skb, |
3512 |
+- struct ip_fraglist_iter *iter) |
3513 |
+-{ |
3514 |
+- struct sk_buff *to = iter->frag; |
3515 |
+- |
3516 |
+- /* Copy the flags to each fragment. */ |
3517 |
+- IPCB(to)->flags = IPCB(skb)->flags; |
3518 |
+- |
3519 |
+- if (iter->offset == 0) |
3520 |
+- ip_options_fragment(to); |
3521 |
+-} |
3522 |
+- |
3523 |
+ void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter) |
3524 |
+ { |
3525 |
+ unsigned int hlen = iter->hlen; |
3526 |
+@@ -671,7 +666,7 @@ void ip_frag_init(struct sk_buff *skb, unsigned int hlen, |
3527 |
+ EXPORT_SYMBOL(ip_frag_init); |
3528 |
+ |
3529 |
+ static void ip_frag_ipcb(struct sk_buff *from, struct sk_buff *to, |
3530 |
+- bool first_frag, struct ip_frag_state *state) |
3531 |
++ bool first_frag) |
3532 |
+ { |
3533 |
+ /* Copy the flags to each fragment. */ |
3534 |
+ IPCB(to)->flags = IPCB(from)->flags; |
3535 |
+@@ -850,8 +845,20 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, |
3536 |
+ /* Prepare header of the next frame, |
3537 |
+ * before previous one went down. */ |
3538 |
+ if (iter.frag) { |
3539 |
+- ip_fraglist_ipcb_prepare(skb, &iter); |
3540 |
++ bool first_frag = (iter.offset == 0); |
3541 |
++ |
3542 |
++ IPCB(iter.frag)->flags = IPCB(skb)->flags; |
3543 |
+ ip_fraglist_prepare(skb, &iter); |
3544 |
++ if (first_frag && IPCB(skb)->opt.optlen) { |
3545 |
++ /* ipcb->opt is not populated for frags |
3546 |
++ * coming from __ip_make_skb(), |
3547 |
++ * ip_options_fragment() needs optlen |
3548 |
++ */ |
3549 |
++ IPCB(iter.frag)->opt.optlen = |
3550 |
++ IPCB(skb)->opt.optlen; |
3551 |
++ ip_options_fragment(iter.frag); |
3552 |
++ ip_send_check(iter.iph); |
3553 |
++ } |
3554 |
+ } |
3555 |
+ |
3556 |
+ skb->tstamp = tstamp; |
3557 |
+@@ -905,7 +912,7 @@ slow_path: |
3558 |
+ err = PTR_ERR(skb2); |
3559 |
+ goto fail; |
3560 |
+ } |
3561 |
+- ip_frag_ipcb(skb, skb2, first_frag, &state); |
3562 |
++ ip_frag_ipcb(skb, skb2, first_frag); |
3563 |
+ |
3564 |
+ /* |
3565 |
+ * Put this fragment into the sending queue. |
3566 |
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c |
3567 |
+index 8ce8b7300b9d3..a5722905456c2 100644 |
3568 |
+--- a/net/ipv4/ping.c |
3569 |
++++ b/net/ipv4/ping.c |
3570 |
+@@ -220,7 +220,8 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident) |
3571 |
+ continue; |
3572 |
+ } |
3573 |
+ |
3574 |
+- if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) |
3575 |
++ if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif && |
3576 |
++ sk->sk_bound_dev_if != inet_sdif(skb)) |
3577 |
+ continue; |
3578 |
+ |
3579 |
+ sock_hold(sk); |
3580 |
+diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c |
3581 |
+index 7d26e0f8bdaeb..5d95f80314f95 100644 |
3582 |
+--- a/net/ipv4/raw.c |
3583 |
++++ b/net/ipv4/raw.c |
3584 |
+@@ -721,6 +721,7 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
3585 |
+ int ret = -EINVAL; |
3586 |
+ int chk_addr_ret; |
3587 |
+ |
3588 |
++ lock_sock(sk); |
3589 |
+ if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in)) |
3590 |
+ goto out; |
3591 |
+ |
3592 |
+@@ -740,7 +741,9 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
3593 |
+ inet->inet_saddr = 0; /* Use device */ |
3594 |
+ sk_dst_reset(sk); |
3595 |
+ ret = 0; |
3596 |
+-out: return ret; |
3597 |
++out: |
3598 |
++ release_sock(sk); |
3599 |
++ return ret; |
3600 |
+ } |
3601 |
+ |
3602 |
+ /* |
3603 |
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c |
3604 |
+index 29526937077b3..4dde49e628fab 100644 |
3605 |
+--- a/net/ipv6/addrconf.c |
3606 |
++++ b/net/ipv6/addrconf.c |
3607 |
+@@ -2577,7 +2577,7 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev, |
3608 |
+ __u32 valid_lft, u32 prefered_lft) |
3609 |
+ { |
3610 |
+ struct inet6_ifaddr *ifp = ipv6_get_ifaddr(net, addr, dev, 1); |
3611 |
+- int create = 0; |
3612 |
++ int create = 0, update_lft = 0; |
3613 |
+ |
3614 |
+ if (!ifp && valid_lft) { |
3615 |
+ int max_addresses = in6_dev->cnf.max_addresses; |
3616 |
+@@ -2621,19 +2621,32 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev, |
3617 |
+ unsigned long now; |
3618 |
+ u32 stored_lft; |
3619 |
+ |
3620 |
+- /* Update lifetime (RFC4862 5.5.3 e) |
3621 |
+- * We deviate from RFC4862 by honoring all Valid Lifetimes to |
3622 |
+- * improve the reaction of SLAAC to renumbering events |
3623 |
+- * (draft-gont-6man-slaac-renum-06, Section 4.2) |
3624 |
+- */ |
3625 |
++ /* update lifetime (RFC2462 5.5.3 e) */ |
3626 |
+ spin_lock_bh(&ifp->lock); |
3627 |
+ now = jiffies; |
3628 |
+ if (ifp->valid_lft > (now - ifp->tstamp) / HZ) |
3629 |
+ stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ; |
3630 |
+ else |
3631 |
+ stored_lft = 0; |
3632 |
+- |
3633 |
+ if (!create && stored_lft) { |
3634 |
++ const u32 minimum_lft = min_t(u32, |
3635 |
++ stored_lft, MIN_VALID_LIFETIME); |
3636 |
++ valid_lft = max(valid_lft, minimum_lft); |
3637 |
++ |
3638 |
++ /* RFC4862 Section 5.5.3e: |
3639 |
++ * "Note that the preferred lifetime of the |
3640 |
++ * corresponding address is always reset to |
3641 |
++ * the Preferred Lifetime in the received |
3642 |
++ * Prefix Information option, regardless of |
3643 |
++ * whether the valid lifetime is also reset or |
3644 |
++ * ignored." |
3645 |
++ * |
3646 |
++ * So we should always update prefered_lft here. |
3647 |
++ */ |
3648 |
++ update_lft = 1; |
3649 |
++ } |
3650 |
++ |
3651 |
++ if (update_lft) { |
3652 |
+ ifp->valid_lft = valid_lft; |
3653 |
+ ifp->prefered_lft = prefered_lft; |
3654 |
+ ifp->tstamp = now; |
3655 |
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c |
3656 |
+index e43f1fbac28b6..c783b91231321 100644 |
3657 |
+--- a/net/ipv6/ip6_fib.c |
3658 |
++++ b/net/ipv6/ip6_fib.c |
3659 |
+@@ -110,7 +110,7 @@ void fib6_update_sernum(struct net *net, struct fib6_info *f6i) |
3660 |
+ fn = rcu_dereference_protected(f6i->fib6_node, |
3661 |
+ lockdep_is_held(&f6i->fib6_table->tb6_lock)); |
3662 |
+ if (fn) |
3663 |
+- fn->fn_sernum = fib6_new_sernum(net); |
3664 |
++ WRITE_ONCE(fn->fn_sernum, fib6_new_sernum(net)); |
3665 |
+ } |
3666 |
+ |
3667 |
+ /* |
3668 |
+@@ -587,12 +587,13 @@ static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb, |
3669 |
+ spin_unlock_bh(&table->tb6_lock); |
3670 |
+ if (res > 0) { |
3671 |
+ cb->args[4] = 1; |
3672 |
+- cb->args[5] = w->root->fn_sernum; |
3673 |
++ cb->args[5] = READ_ONCE(w->root->fn_sernum); |
3674 |
+ } |
3675 |
+ } else { |
3676 |
+- if (cb->args[5] != w->root->fn_sernum) { |
3677 |
++ int sernum = READ_ONCE(w->root->fn_sernum); |
3678 |
++ if (cb->args[5] != sernum) { |
3679 |
+ /* Begin at the root if the tree changed */ |
3680 |
+- cb->args[5] = w->root->fn_sernum; |
3681 |
++ cb->args[5] = sernum; |
3682 |
+ w->state = FWS_INIT; |
3683 |
+ w->node = w->root; |
3684 |
+ w->skip = w->count; |
3685 |
+@@ -1342,7 +1343,7 @@ static void __fib6_update_sernum_upto_root(struct fib6_info *rt, |
3686 |
+ /* paired with smp_rmb() in rt6_get_cookie_safe() */ |
3687 |
+ smp_wmb(); |
3688 |
+ while (fn) { |
3689 |
+- fn->fn_sernum = sernum; |
3690 |
++ WRITE_ONCE(fn->fn_sernum, sernum); |
3691 |
+ fn = rcu_dereference_protected(fn->parent, |
3692 |
+ lockdep_is_held(&rt->fib6_table->tb6_lock)); |
3693 |
+ } |
3694 |
+@@ -2171,8 +2172,8 @@ static int fib6_clean_node(struct fib6_walker *w) |
3695 |
+ }; |
3696 |
+ |
3697 |
+ if (c->sernum != FIB6_NO_SERNUM_CHANGE && |
3698 |
+- w->node->fn_sernum != c->sernum) |
3699 |
+- w->node->fn_sernum = c->sernum; |
3700 |
++ READ_ONCE(w->node->fn_sernum) != c->sernum) |
3701 |
++ WRITE_ONCE(w->node->fn_sernum, c->sernum); |
3702 |
+ |
3703 |
+ if (!c->func) { |
3704 |
+ WARN_ON_ONCE(c->sernum == FIB6_NO_SERNUM_CHANGE); |
3705 |
+@@ -2536,7 +2537,7 @@ static void ipv6_route_seq_setup_walk(struct ipv6_route_iter *iter, |
3706 |
+ iter->w.state = FWS_INIT; |
3707 |
+ iter->w.node = iter->w.root; |
3708 |
+ iter->w.args = iter; |
3709 |
+- iter->sernum = iter->w.root->fn_sernum; |
3710 |
++ iter->sernum = READ_ONCE(iter->w.root->fn_sernum); |
3711 |
+ INIT_LIST_HEAD(&iter->w.lh); |
3712 |
+ fib6_walker_link(net, &iter->w); |
3713 |
+ } |
3714 |
+@@ -2564,8 +2565,10 @@ static struct fib6_table *ipv6_route_seq_next_table(struct fib6_table *tbl, |
3715 |
+ |
3716 |
+ static void ipv6_route_check_sernum(struct ipv6_route_iter *iter) |
3717 |
+ { |
3718 |
+- if (iter->sernum != iter->w.root->fn_sernum) { |
3719 |
+- iter->sernum = iter->w.root->fn_sernum; |
3720 |
++ int sernum = READ_ONCE(iter->w.root->fn_sernum); |
3721 |
++ |
3722 |
++ if (iter->sernum != sernum) { |
3723 |
++ iter->sernum = sernum; |
3724 |
+ iter->w.state = FWS_INIT; |
3725 |
+ iter->w.node = iter->w.root; |
3726 |
+ WARN_ON(iter->w.skip); |
3727 |
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c |
3728 |
+index 08441f06afd48..3a2741569b847 100644 |
3729 |
+--- a/net/ipv6/ip6_tunnel.c |
3730 |
++++ b/net/ipv6/ip6_tunnel.c |
3731 |
+@@ -1066,14 +1066,14 @@ int ip6_tnl_xmit_ctl(struct ip6_tnl *t, |
3732 |
+ |
3733 |
+ if (unlikely(!ipv6_chk_addr_and_flags(net, laddr, ldev, false, |
3734 |
+ 0, IFA_F_TENTATIVE))) |
3735 |
+- pr_warn("%s xmit: Local address not yet configured!\n", |
3736 |
+- p->name); |
3737 |
++ pr_warn_ratelimited("%s xmit: Local address not yet configured!\n", |
3738 |
++ p->name); |
3739 |
+ else if (!(p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) && |
3740 |
+ !ipv6_addr_is_multicast(raddr) && |
3741 |
+ unlikely(ipv6_chk_addr_and_flags(net, raddr, ldev, |
3742 |
+ true, 0, IFA_F_TENTATIVE))) |
3743 |
+- pr_warn("%s xmit: Routing loop! Remote address found on this node!\n", |
3744 |
+- p->name); |
3745 |
++ pr_warn_ratelimited("%s xmit: Routing loop! Remote address found on this node!\n", |
3746 |
++ p->name); |
3747 |
+ else |
3748 |
+ ret = 1; |
3749 |
+ rcu_read_unlock(); |
3750 |
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c |
3751 |
+index 654bf4ca61260..352e645c546eb 100644 |
3752 |
+--- a/net/ipv6/route.c |
3753 |
++++ b/net/ipv6/route.c |
3754 |
+@@ -2674,7 +2674,7 @@ static void ip6_link_failure(struct sk_buff *skb) |
3755 |
+ if (from) { |
3756 |
+ fn = rcu_dereference(from->fib6_node); |
3757 |
+ if (fn && (rt->rt6i_flags & RTF_DEFAULT)) |
3758 |
+- fn->fn_sernum = -1; |
3759 |
++ WRITE_ONCE(fn->fn_sernum, -1); |
3760 |
+ } |
3761 |
+ } |
3762 |
+ rcu_read_unlock(); |
3763 |
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c |
3764 |
+index f4cf26b606f92..8369af0c50eab 100644 |
3765 |
+--- a/net/netfilter/nf_conntrack_core.c |
3766 |
++++ b/net/netfilter/nf_conntrack_core.c |
3767 |
+@@ -1832,15 +1832,17 @@ repeat: |
3768 |
+ pr_debug("nf_conntrack_in: Can't track with proto module\n"); |
3769 |
+ nf_conntrack_put(&ct->ct_general); |
3770 |
+ skb->_nfct = 0; |
3771 |
+- NF_CT_STAT_INC_ATOMIC(state->net, invalid); |
3772 |
+- if (ret == -NF_DROP) |
3773 |
+- NF_CT_STAT_INC_ATOMIC(state->net, drop); |
3774 |
+ /* Special case: TCP tracker reports an attempt to reopen a |
3775 |
+ * closed/aborted connection. We have to go back and create a |
3776 |
+ * fresh conntrack. |
3777 |
+ */ |
3778 |
+ if (ret == -NF_REPEAT) |
3779 |
+ goto repeat; |
3780 |
++ |
3781 |
++ NF_CT_STAT_INC_ATOMIC(state->net, invalid); |
3782 |
++ if (ret == -NF_DROP) |
3783 |
++ NF_CT_STAT_INC_ATOMIC(state->net, drop); |
3784 |
++ |
3785 |
+ ret = -ret; |
3786 |
+ goto out; |
3787 |
+ } |
3788 |
+diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c |
3789 |
+index 1ebee25de6772..6a8495bd08bb2 100644 |
3790 |
+--- a/net/netfilter/nft_payload.c |
3791 |
++++ b/net/netfilter/nft_payload.c |
3792 |
+@@ -502,6 +502,9 @@ static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt, |
3793 |
+ struct sk_buff *skb, |
3794 |
+ unsigned int *l4csum_offset) |
3795 |
+ { |
3796 |
++ if (pkt->xt.fragoff) |
3797 |
++ return -1; |
3798 |
++ |
3799 |
+ switch (pkt->tprot) { |
3800 |
+ case IPPROTO_TCP: |
3801 |
+ *l4csum_offset = offsetof(struct tcphdr, check); |
3802 |
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c |
3803 |
+index f78097aa403a8..6ef035494f30d 100644 |
3804 |
+--- a/net/packet/af_packet.c |
3805 |
++++ b/net/packet/af_packet.c |
3806 |
+@@ -1735,6 +1735,7 @@ static int fanout_add(struct sock *sk, struct fanout_args *args) |
3807 |
+ match->prot_hook.dev = po->prot_hook.dev; |
3808 |
+ match->prot_hook.func = packet_rcv_fanout; |
3809 |
+ match->prot_hook.af_packet_priv = match; |
3810 |
++ match->prot_hook.af_packet_net = read_pnet(&match->net); |
3811 |
+ match->prot_hook.id_match = match_fanout_group; |
3812 |
+ match->max_num_members = args->max_num_members; |
3813 |
+ list_add(&match->list, &fanout_list); |
3814 |
+@@ -3323,6 +3324,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol, |
3815 |
+ po->prot_hook.func = packet_rcv_spkt; |
3816 |
+ |
3817 |
+ po->prot_hook.af_packet_priv = sk; |
3818 |
++ po->prot_hook.af_packet_net = sock_net(sk); |
3819 |
+ |
3820 |
+ if (proto) { |
3821 |
+ po->prot_hook.type = proto; |
3822 |
+diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c |
3823 |
+index 6be2672a65eab..df864e6922679 100644 |
3824 |
+--- a/net/rxrpc/call_event.c |
3825 |
++++ b/net/rxrpc/call_event.c |
3826 |
+@@ -157,7 +157,7 @@ static void rxrpc_congestion_timeout(struct rxrpc_call *call) |
3827 |
+ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) |
3828 |
+ { |
3829 |
+ struct sk_buff *skb; |
3830 |
+- unsigned long resend_at, rto_j; |
3831 |
++ unsigned long resend_at; |
3832 |
+ rxrpc_seq_t cursor, seq, top; |
3833 |
+ ktime_t now, max_age, oldest, ack_ts; |
3834 |
+ int ix; |
3835 |
+@@ -165,10 +165,8 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) |
3836 |
+ |
3837 |
+ _enter("{%d,%d}", call->tx_hard_ack, call->tx_top); |
3838 |
+ |
3839 |
+- rto_j = call->peer->rto_j; |
3840 |
+- |
3841 |
+ now = ktime_get_real(); |
3842 |
+- max_age = ktime_sub(now, jiffies_to_usecs(rto_j)); |
3843 |
++ max_age = ktime_sub(now, jiffies_to_usecs(call->peer->rto_j)); |
3844 |
+ |
3845 |
+ spin_lock_bh(&call->lock); |
3846 |
+ |
3847 |
+@@ -213,7 +211,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) |
3848 |
+ } |
3849 |
+ |
3850 |
+ resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest))); |
3851 |
+- resend_at += jiffies + rto_j; |
3852 |
++ resend_at += jiffies + rxrpc_get_rto_backoff(call->peer, retrans); |
3853 |
+ WRITE_ONCE(call->resend_at, resend_at); |
3854 |
+ |
3855 |
+ if (unacked) |
3856 |
+diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c |
3857 |
+index 10f2bf2e9068a..a45c83f22236e 100644 |
3858 |
+--- a/net/rxrpc/output.c |
3859 |
++++ b/net/rxrpc/output.c |
3860 |
+@@ -468,7 +468,7 @@ done: |
3861 |
+ if (call->peer->rtt_count > 1) { |
3862 |
+ unsigned long nowj = jiffies, ack_lost_at; |
3863 |
+ |
3864 |
+- ack_lost_at = rxrpc_get_rto_backoff(call->peer, retrans); |
3865 |
++ ack_lost_at = rxrpc_get_rto_backoff(call->peer, false); |
3866 |
+ ack_lost_at += nowj; |
3867 |
+ WRITE_ONCE(call->ack_lost_at, ack_lost_at); |
3868 |
+ rxrpc_reduce_call_timer(call, ack_lost_at, nowj, |
3869 |
+diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c |
3870 |
+index eadc0ede928c3..5f854ffbab925 100644 |
3871 |
+--- a/net/sunrpc/rpc_pipe.c |
3872 |
++++ b/net/sunrpc/rpc_pipe.c |
3873 |
+@@ -599,9 +599,9 @@ static int __rpc_rmdir(struct inode *dir, struct dentry *dentry) |
3874 |
+ |
3875 |
+ dget(dentry); |
3876 |
+ ret = simple_rmdir(dir, dentry); |
3877 |
++ d_drop(dentry); |
3878 |
+ if (!ret) |
3879 |
+ fsnotify_rmdir(dir, dentry); |
3880 |
+- d_delete(dentry); |
3881 |
+ dput(dentry); |
3882 |
+ return ret; |
3883 |
+ } |
3884 |
+@@ -612,9 +612,9 @@ static int __rpc_unlink(struct inode *dir, struct dentry *dentry) |
3885 |
+ |
3886 |
+ dget(dentry); |
3887 |
+ ret = simple_unlink(dir, dentry); |
3888 |
++ d_drop(dentry); |
3889 |
+ if (!ret) |
3890 |
+ fsnotify_unlink(dir, dentry); |
3891 |
+- d_delete(dentry); |
3892 |
+ dput(dentry); |
3893 |
+ return ret; |
3894 |
+ } |
3895 |
+diff --git a/usr/include/Makefile b/usr/include/Makefile |
3896 |
+index f6b3c85d900ed..703a255cddc63 100644 |
3897 |
+--- a/usr/include/Makefile |
3898 |
++++ b/usr/include/Makefile |
3899 |
+@@ -34,7 +34,6 @@ no-header-test += linux/hdlc/ioctl.h |
3900 |
+ no-header-test += linux/ivtv.h |
3901 |
+ no-header-test += linux/kexec.h |
3902 |
+ no-header-test += linux/matroxfb.h |
3903 |
+-no-header-test += linux/nfc.h |
3904 |
+ no-header-test += linux/omap3isp.h |
3905 |
+ no-header-test += linux/omapfb.h |
3906 |
+ no-header-test += linux/patchkey.h |
3907 |
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c |
3908 |
+index 4a7d377b3a500..d22de43925076 100644 |
3909 |
+--- a/virt/kvm/kvm_main.c |
3910 |
++++ b/virt/kvm/kvm_main.c |
3911 |
+@@ -1691,7 +1691,6 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn |
3912 |
+ { |
3913 |
+ return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn); |
3914 |
+ } |
3915 |
+-EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_memslot); |
3916 |
+ |
3917 |
+ bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) |
3918 |
+ { |