1 |
commit: e9a4a8d320e321ce780a3598be19da864ea4a595 |
2 |
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
3 |
AuthorDate: Wed Sep 22 11:38:14 2021 +0000 |
4 |
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
5 |
CommitDate: Wed Sep 22 11:38:14 2021 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e9a4a8d3 |
7 |
|
8 |
Linux patch 5.10.68 |
9 |
|
10 |
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org> |
11 |
|
12 |
0000_README | 4 + |
13 |
1067_linux-5.10.68.patch | 3868 ++++++++++++++++++++++++++++++++++++++++++++++ |
14 |
2 files changed, 3872 insertions(+) |
15 |
|
16 |
diff --git a/0000_README b/0000_README |
17 |
index 20bba3a..416061d 100644 |
18 |
--- a/0000_README |
19 |
+++ b/0000_README |
20 |
@@ -311,6 +311,10 @@ Patch: 1066_linux-5.10.67.patch |
21 |
From: http://www.kernel.org |
22 |
Desc: Linux 5.10.67 |
23 |
|
24 |
+Patch: 1067_linux-5.10.68.patch |
25 |
+From: http://www.kernel.org |
26 |
+Desc: Linux 5.10.68 |
27 |
+ |
28 |
Patch: 1500_XATTR_USER_PREFIX.patch |
29 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
30 |
Desc: Support for namespace user.pax.* on tmpfs. |
31 |
|
32 |
diff --git a/1067_linux-5.10.68.patch b/1067_linux-5.10.68.patch |
33 |
new file mode 100644 |
34 |
index 0000000..7a0e47b |
35 |
--- /dev/null |
36 |
+++ b/1067_linux-5.10.68.patch |
37 |
@@ -0,0 +1,3868 @@ |
38 |
+diff --git a/Documentation/devicetree/bindings/arm/tegra.yaml b/Documentation/devicetree/bindings/arm/tegra.yaml |
39 |
+index 767e86354c8e9..2c6911c775c8e 100644 |
40 |
+--- a/Documentation/devicetree/bindings/arm/tegra.yaml |
41 |
++++ b/Documentation/devicetree/bindings/arm/tegra.yaml |
42 |
+@@ -54,7 +54,7 @@ properties: |
43 |
+ - const: toradex,apalis_t30 |
44 |
+ - const: nvidia,tegra30 |
45 |
+ - items: |
46 |
+- - const: toradex,apalis_t30-eval-v1.1 |
47 |
++ - const: toradex,apalis_t30-v1.1-eval |
48 |
+ - const: toradex,apalis_t30-eval |
49 |
+ - const: toradex,apalis_t30-v1.1 |
50 |
+ - const: toradex,apalis_t30 |
51 |
+diff --git a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt |
52 |
+index 44919d48d2415..c459f169a9044 100644 |
53 |
+--- a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt |
54 |
++++ b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt |
55 |
+@@ -122,7 +122,7 @@ on various other factors also like; |
56 |
+ so the device should have enough free bytes available its OOB/Spare |
57 |
+ area to accommodate ECC for entire page. In general following expression |
58 |
+ helps in determining if given device can accommodate ECC syndrome: |
59 |
+- "2 + (PAGESIZE / 512) * ECC_BYTES" >= OOBSIZE" |
60 |
++ "2 + (PAGESIZE / 512) * ECC_BYTES" <= OOBSIZE" |
61 |
+ where |
62 |
+ OOBSIZE number of bytes in OOB/spare area |
63 |
+ PAGESIZE number of bytes in main-area of device page |
64 |
+diff --git a/Makefile b/Makefile |
65 |
+index a47273ecfdf21..e50581c9db50e 100644 |
66 |
+--- a/Makefile |
67 |
++++ b/Makefile |
68 |
+@@ -1,7 +1,7 @@ |
69 |
+ # SPDX-License-Identifier: GPL-2.0 |
70 |
+ VERSION = 5 |
71 |
+ PATCHLEVEL = 10 |
72 |
+-SUBLEVEL = 67 |
73 |
++SUBLEVEL = 68 |
74 |
+ EXTRAVERSION = |
75 |
+ NAME = Dare mighty things |
76 |
+ |
77 |
+diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c |
78 |
+index a2fbea3ee07c7..102418ac5ff4a 100644 |
79 |
+--- a/arch/arc/mm/cache.c |
80 |
++++ b/arch/arc/mm/cache.c |
81 |
+@@ -1123,7 +1123,7 @@ void clear_user_page(void *to, unsigned long u_vaddr, struct page *page) |
82 |
+ clear_page(to); |
83 |
+ clear_bit(PG_dc_clean, &page->flags); |
84 |
+ } |
85 |
+- |
86 |
++EXPORT_SYMBOL(clear_user_page); |
87 |
+ |
88 |
+ /********************************************************************** |
89 |
+ * Explicit Cache flush request from user space via syscall |
90 |
+diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c |
91 |
+index 062b21f30f942..a9bbfb800ec2b 100644 |
92 |
+--- a/arch/arm64/kernel/fpsimd.c |
93 |
++++ b/arch/arm64/kernel/fpsimd.c |
94 |
+@@ -510,7 +510,7 @@ size_t sve_state_size(struct task_struct const *task) |
95 |
+ void sve_alloc(struct task_struct *task) |
96 |
+ { |
97 |
+ if (task->thread.sve_state) { |
98 |
+- memset(task->thread.sve_state, 0, sve_state_size(current)); |
99 |
++ memset(task->thread.sve_state, 0, sve_state_size(task)); |
100 |
+ return; |
101 |
+ } |
102 |
+ |
103 |
+diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c |
104 |
+index 5e5dd99e8cee8..5bc978be80434 100644 |
105 |
+--- a/arch/arm64/kvm/arm.c |
106 |
++++ b/arch/arm64/kvm/arm.c |
107 |
+@@ -1143,6 +1143,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp, |
108 |
+ if (copy_from_user(®, argp, sizeof(reg))) |
109 |
+ break; |
110 |
+ |
111 |
++ /* |
112 |
++ * We could owe a reset due to PSCI. Handle the pending reset |
113 |
++ * here to ensure userspace register accesses are ordered after |
114 |
++ * the reset. |
115 |
++ */ |
116 |
++ if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu)) |
117 |
++ kvm_reset_vcpu(vcpu); |
118 |
++ |
119 |
+ if (ioctl == KVM_SET_ONE_REG) |
120 |
+ r = kvm_arm_set_reg(vcpu, ®); |
121 |
+ else |
122 |
+diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c |
123 |
+index b969c2157ad2e..204c62debf06e 100644 |
124 |
+--- a/arch/arm64/kvm/reset.c |
125 |
++++ b/arch/arm64/kvm/reset.c |
126 |
+@@ -263,10 +263,16 @@ static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu) |
127 |
+ */ |
128 |
+ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) |
129 |
+ { |
130 |
++ struct vcpu_reset_state reset_state; |
131 |
+ int ret; |
132 |
+ bool loaded; |
133 |
+ u32 pstate; |
134 |
+ |
135 |
++ mutex_lock(&vcpu->kvm->lock); |
136 |
++ reset_state = vcpu->arch.reset_state; |
137 |
++ WRITE_ONCE(vcpu->arch.reset_state.reset, false); |
138 |
++ mutex_unlock(&vcpu->kvm->lock); |
139 |
++ |
140 |
+ /* Reset PMU outside of the non-preemptible section */ |
141 |
+ kvm_pmu_vcpu_reset(vcpu); |
142 |
+ |
143 |
+@@ -325,8 +331,8 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) |
144 |
+ * Additional reset state handling that PSCI may have imposed on us. |
145 |
+ * Must be done after all the sys_reg reset. |
146 |
+ */ |
147 |
+- if (vcpu->arch.reset_state.reset) { |
148 |
+- unsigned long target_pc = vcpu->arch.reset_state.pc; |
149 |
++ if (reset_state.reset) { |
150 |
++ unsigned long target_pc = reset_state.pc; |
151 |
+ |
152 |
+ /* Gracefully handle Thumb2 entry point */ |
153 |
+ if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) { |
154 |
+@@ -335,13 +341,11 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) |
155 |
+ } |
156 |
+ |
157 |
+ /* Propagate caller endianness */ |
158 |
+- if (vcpu->arch.reset_state.be) |
159 |
++ if (reset_state.be) |
160 |
+ kvm_vcpu_set_be(vcpu); |
161 |
+ |
162 |
+ *vcpu_pc(vcpu) = target_pc; |
163 |
+- vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0); |
164 |
+- |
165 |
+- vcpu->arch.reset_state.reset = false; |
166 |
++ vcpu_set_reg(vcpu, 0, reset_state.r0); |
167 |
+ } |
168 |
+ |
169 |
+ /* Reset timer */ |
170 |
+@@ -366,6 +370,14 @@ int kvm_set_ipa_limit(void) |
171 |
+ mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); |
172 |
+ parange = cpuid_feature_extract_unsigned_field(mmfr0, |
173 |
+ ID_AA64MMFR0_PARANGE_SHIFT); |
174 |
++ /* |
175 |
++ * IPA size beyond 48 bits could not be supported |
176 |
++ * on either 4K or 16K page size. Hence let's cap |
177 |
++ * it to 48 bits, in case it's reported as larger |
178 |
++ * on the system. |
179 |
++ */ |
180 |
++ if (PAGE_SIZE != SZ_64K) |
181 |
++ parange = min(parange, (unsigned int)ID_AA64MMFR0_PARANGE_48); |
182 |
+ |
183 |
+ /* |
184 |
+ * Check with ARMv8.5-GTG that our PAGE_SIZE is supported at |
185 |
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S |
186 |
+index cd9995ee84419..5777b72bb8b62 100644 |
187 |
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S |
188 |
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S |
189 |
+@@ -3146,7 +3146,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) |
190 |
+ /* The following code handles the fake_suspend = 1 case */ |
191 |
+ mflr r0 |
192 |
+ std r0, PPC_LR_STKOFF(r1) |
193 |
+- stdu r1, -PPC_MIN_STKFRM(r1) |
194 |
++ stdu r1, -TM_FRAME_SIZE(r1) |
195 |
+ |
196 |
+ /* Turn on TM. */ |
197 |
+ mfmsr r8 |
198 |
+@@ -3161,10 +3161,42 @@ BEGIN_FTR_SECTION |
199 |
+ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) |
200 |
+ nop |
201 |
+ |
202 |
++ /* |
203 |
++ * It's possible that treclaim. may modify registers, if we have lost |
204 |
++ * track of fake-suspend state in the guest due to it using rfscv. |
205 |
++ * Save and restore registers in case this occurs. |
206 |
++ */ |
207 |
++ mfspr r3, SPRN_DSCR |
208 |
++ mfspr r4, SPRN_XER |
209 |
++ mfspr r5, SPRN_AMR |
210 |
++ /* SPRN_TAR would need to be saved here if the kernel ever used it */ |
211 |
++ mfcr r12 |
212 |
++ SAVE_NVGPRS(r1) |
213 |
++ SAVE_GPR(2, r1) |
214 |
++ SAVE_GPR(3, r1) |
215 |
++ SAVE_GPR(4, r1) |
216 |
++ SAVE_GPR(5, r1) |
217 |
++ stw r12, 8(r1) |
218 |
++ std r1, HSTATE_HOST_R1(r13) |
219 |
++ |
220 |
+ /* We have to treclaim here because that's the only way to do S->N */ |
221 |
+ li r3, TM_CAUSE_KVM_RESCHED |
222 |
+ TRECLAIM(R3) |
223 |
+ |
224 |
++ GET_PACA(r13) |
225 |
++ ld r1, HSTATE_HOST_R1(r13) |
226 |
++ REST_GPR(2, r1) |
227 |
++ REST_GPR(3, r1) |
228 |
++ REST_GPR(4, r1) |
229 |
++ REST_GPR(5, r1) |
230 |
++ lwz r12, 8(r1) |
231 |
++ REST_NVGPRS(r1) |
232 |
++ mtspr SPRN_DSCR, r3 |
233 |
++ mtspr SPRN_XER, r4 |
234 |
++ mtspr SPRN_AMR, r5 |
235 |
++ mtcr r12 |
236 |
++ HMT_MEDIUM |
237 |
++ |
238 |
+ /* |
239 |
+ * We were in fake suspend, so we are not going to save the |
240 |
+ * register state as the guest checkpointed state (since |
241 |
+@@ -3192,7 +3224,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) |
242 |
+ std r5, VCPU_TFHAR(r9) |
243 |
+ std r6, VCPU_TFIAR(r9) |
244 |
+ |
245 |
+- addi r1, r1, PPC_MIN_STKFRM |
246 |
++ addi r1, r1, TM_FRAME_SIZE |
247 |
+ ld r0, PPC_LR_STKOFF(r1) |
248 |
+ mtlr r0 |
249 |
+ blr |
250 |
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c |
251 |
+index dee01d3b23a40..8d9047d2d1e11 100644 |
252 |
+--- a/arch/s390/net/bpf_jit_comp.c |
253 |
++++ b/arch/s390/net/bpf_jit_comp.c |
254 |
+@@ -248,8 +248,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) |
255 |
+ |
256 |
+ #define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask) \ |
257 |
+ ({ \ |
258 |
+- /* Branch instruction needs 6 bytes */ \ |
259 |
+- int rel = (addrs[(i) + (off) + 1] - (addrs[(i) + 1] - 6)) / 2;\ |
260 |
++ int rel = (addrs[(i) + (off) + 1] - jit->prg) / 2; \ |
261 |
+ _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\ |
262 |
+ REG_SET_SEEN(b1); \ |
263 |
+ REG_SET_SEEN(b2); \ |
264 |
+@@ -761,10 +760,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, |
265 |
+ EMIT4(0xb9080000, dst_reg, src_reg); |
266 |
+ break; |
267 |
+ case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */ |
268 |
+- if (!imm) |
269 |
+- break; |
270 |
+- /* alfi %dst,imm */ |
271 |
+- EMIT6_IMM(0xc20b0000, dst_reg, imm); |
272 |
++ if (imm != 0) { |
273 |
++ /* alfi %dst,imm */ |
274 |
++ EMIT6_IMM(0xc20b0000, dst_reg, imm); |
275 |
++ } |
276 |
+ EMIT_ZERO(dst_reg); |
277 |
+ break; |
278 |
+ case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */ |
279 |
+@@ -786,17 +785,22 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, |
280 |
+ EMIT4(0xb9090000, dst_reg, src_reg); |
281 |
+ break; |
282 |
+ case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */ |
283 |
+- if (!imm) |
284 |
+- break; |
285 |
+- /* alfi %dst,-imm */ |
286 |
+- EMIT6_IMM(0xc20b0000, dst_reg, -imm); |
287 |
++ if (imm != 0) { |
288 |
++ /* alfi %dst,-imm */ |
289 |
++ EMIT6_IMM(0xc20b0000, dst_reg, -imm); |
290 |
++ } |
291 |
+ EMIT_ZERO(dst_reg); |
292 |
+ break; |
293 |
+ case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */ |
294 |
+ if (!imm) |
295 |
+ break; |
296 |
+- /* agfi %dst,-imm */ |
297 |
+- EMIT6_IMM(0xc2080000, dst_reg, -imm); |
298 |
++ if (imm == -0x80000000) { |
299 |
++ /* algfi %dst,0x80000000 */ |
300 |
++ EMIT6_IMM(0xc20a0000, dst_reg, 0x80000000); |
301 |
++ } else { |
302 |
++ /* agfi %dst,-imm */ |
303 |
++ EMIT6_IMM(0xc2080000, dst_reg, -imm); |
304 |
++ } |
305 |
+ break; |
306 |
+ /* |
307 |
+ * BPF_MUL |
308 |
+@@ -811,10 +815,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, |
309 |
+ EMIT4(0xb90c0000, dst_reg, src_reg); |
310 |
+ break; |
311 |
+ case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */ |
312 |
+- if (imm == 1) |
313 |
+- break; |
314 |
+- /* msfi %r5,imm */ |
315 |
+- EMIT6_IMM(0xc2010000, dst_reg, imm); |
316 |
++ if (imm != 1) { |
317 |
++ /* msfi %r5,imm */ |
318 |
++ EMIT6_IMM(0xc2010000, dst_reg, imm); |
319 |
++ } |
320 |
+ EMIT_ZERO(dst_reg); |
321 |
+ break; |
322 |
+ case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */ |
323 |
+@@ -867,6 +871,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, |
324 |
+ if (BPF_OP(insn->code) == BPF_MOD) |
325 |
+ /* lhgi %dst,0 */ |
326 |
+ EMIT4_IMM(0xa7090000, dst_reg, 0); |
327 |
++ else |
328 |
++ EMIT_ZERO(dst_reg); |
329 |
+ break; |
330 |
+ } |
331 |
+ /* lhi %w0,0 */ |
332 |
+@@ -999,10 +1005,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, |
333 |
+ EMIT4(0xb9820000, dst_reg, src_reg); |
334 |
+ break; |
335 |
+ case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */ |
336 |
+- if (!imm) |
337 |
+- break; |
338 |
+- /* xilf %dst,imm */ |
339 |
+- EMIT6_IMM(0xc0070000, dst_reg, imm); |
340 |
++ if (imm != 0) { |
341 |
++ /* xilf %dst,imm */ |
342 |
++ EMIT6_IMM(0xc0070000, dst_reg, imm); |
343 |
++ } |
344 |
+ EMIT_ZERO(dst_reg); |
345 |
+ break; |
346 |
+ case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */ |
347 |
+@@ -1033,10 +1039,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, |
348 |
+ EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0); |
349 |
+ break; |
350 |
+ case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */ |
351 |
+- if (imm == 0) |
352 |
+- break; |
353 |
+- /* sll %dst,imm(%r0) */ |
354 |
+- EMIT4_DISP(0x89000000, dst_reg, REG_0, imm); |
355 |
++ if (imm != 0) { |
356 |
++ /* sll %dst,imm(%r0) */ |
357 |
++ EMIT4_DISP(0x89000000, dst_reg, REG_0, imm); |
358 |
++ } |
359 |
+ EMIT_ZERO(dst_reg); |
360 |
+ break; |
361 |
+ case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */ |
362 |
+@@ -1058,10 +1064,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, |
363 |
+ EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0); |
364 |
+ break; |
365 |
+ case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */ |
366 |
+- if (imm == 0) |
367 |
+- break; |
368 |
+- /* srl %dst,imm(%r0) */ |
369 |
+- EMIT4_DISP(0x88000000, dst_reg, REG_0, imm); |
370 |
++ if (imm != 0) { |
371 |
++ /* srl %dst,imm(%r0) */ |
372 |
++ EMIT4_DISP(0x88000000, dst_reg, REG_0, imm); |
373 |
++ } |
374 |
+ EMIT_ZERO(dst_reg); |
375 |
+ break; |
376 |
+ case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */ |
377 |
+@@ -1083,10 +1089,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, |
378 |
+ EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0); |
379 |
+ break; |
380 |
+ case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */ |
381 |
+- if (imm == 0) |
382 |
+- break; |
383 |
+- /* sra %dst,imm(%r0) */ |
384 |
+- EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm); |
385 |
++ if (imm != 0) { |
386 |
++ /* sra %dst,imm(%r0) */ |
387 |
++ EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm); |
388 |
++ } |
389 |
+ EMIT_ZERO(dst_reg); |
390 |
+ break; |
391 |
+ case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */ |
392 |
+diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h |
393 |
+index c9fa7be3df82d..5c95d242f38d7 100644 |
394 |
+--- a/arch/x86/include/asm/uaccess.h |
395 |
++++ b/arch/x86/include/asm/uaccess.h |
396 |
+@@ -301,8 +301,8 @@ do { \ |
397 |
+ unsigned int __gu_low, __gu_high; \ |
398 |
+ const unsigned int __user *__gu_ptr; \ |
399 |
+ __gu_ptr = (const void __user *)(ptr); \ |
400 |
+- __get_user_asm(__gu_low, ptr, "l", "=r", label); \ |
401 |
+- __get_user_asm(__gu_high, ptr+1, "l", "=r", label); \ |
402 |
++ __get_user_asm(__gu_low, __gu_ptr, "l", "=r", label); \ |
403 |
++ __get_user_asm(__gu_high, __gu_ptr+1, "l", "=r", label); \ |
404 |
+ (x) = ((unsigned long long)__gu_high << 32) | __gu_low; \ |
405 |
+ } while (0) |
406 |
+ #else |
407 |
+diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c |
408 |
+index 056d0367864e9..14b34963eb1f7 100644 |
409 |
+--- a/arch/x86/kernel/cpu/mce/core.c |
410 |
++++ b/arch/x86/kernel/cpu/mce/core.c |
411 |
+@@ -1241,6 +1241,9 @@ static void __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *fin |
412 |
+ |
413 |
+ static void kill_me_now(struct callback_head *ch) |
414 |
+ { |
415 |
++ struct task_struct *p = container_of(ch, struct task_struct, mce_kill_me); |
416 |
++ |
417 |
++ p->mce_count = 0; |
418 |
+ force_sig(SIGBUS); |
419 |
+ } |
420 |
+ |
421 |
+@@ -1249,6 +1252,7 @@ static void kill_me_maybe(struct callback_head *cb) |
422 |
+ struct task_struct *p = container_of(cb, struct task_struct, mce_kill_me); |
423 |
+ int flags = MF_ACTION_REQUIRED; |
424 |
+ |
425 |
++ p->mce_count = 0; |
426 |
+ pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr); |
427 |
+ |
428 |
+ if (!p->mce_ripv) |
429 |
+@@ -1269,17 +1273,34 @@ static void kill_me_maybe(struct callback_head *cb) |
430 |
+ } |
431 |
+ } |
432 |
+ |
433 |
+-static void queue_task_work(struct mce *m, int kill_it) |
434 |
++static void queue_task_work(struct mce *m, char *msg, int kill_current_task) |
435 |
+ { |
436 |
+- current->mce_addr = m->addr; |
437 |
+- current->mce_kflags = m->kflags; |
438 |
+- current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV); |
439 |
+- current->mce_whole_page = whole_page(m); |
440 |
++ int count = ++current->mce_count; |
441 |
+ |
442 |
+- if (kill_it) |
443 |
+- current->mce_kill_me.func = kill_me_now; |
444 |
+- else |
445 |
+- current->mce_kill_me.func = kill_me_maybe; |
446 |
++ /* First call, save all the details */ |
447 |
++ if (count == 1) { |
448 |
++ current->mce_addr = m->addr; |
449 |
++ current->mce_kflags = m->kflags; |
450 |
++ current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV); |
451 |
++ current->mce_whole_page = whole_page(m); |
452 |
++ |
453 |
++ if (kill_current_task) |
454 |
++ current->mce_kill_me.func = kill_me_now; |
455 |
++ else |
456 |
++ current->mce_kill_me.func = kill_me_maybe; |
457 |
++ } |
458 |
++ |
459 |
++ /* Ten is likely overkill. Don't expect more than two faults before task_work() */ |
460 |
++ if (count > 10) |
461 |
++ mce_panic("Too many consecutive machine checks while accessing user data", m, msg); |
462 |
++ |
463 |
++ /* Second or later call, make sure page address matches the one from first call */ |
464 |
++ if (count > 1 && (current->mce_addr >> PAGE_SHIFT) != (m->addr >> PAGE_SHIFT)) |
465 |
++ mce_panic("Consecutive machine checks to different user pages", m, msg); |
466 |
++ |
467 |
++ /* Do not call task_work_add() more than once */ |
468 |
++ if (count > 1) |
469 |
++ return; |
470 |
+ |
471 |
+ task_work_add(current, ¤t->mce_kill_me, TWA_RESUME); |
472 |
+ } |
473 |
+@@ -1427,7 +1448,7 @@ noinstr void do_machine_check(struct pt_regs *regs) |
474 |
+ /* If this triggers there is no way to recover. Die hard. */ |
475 |
+ BUG_ON(!on_thread_stack() || !user_mode(regs)); |
476 |
+ |
477 |
+- queue_task_work(&m, kill_it); |
478 |
++ queue_task_work(&m, msg, kill_it); |
479 |
+ |
480 |
+ } else { |
481 |
+ /* |
482 |
+@@ -1445,7 +1466,7 @@ noinstr void do_machine_check(struct pt_regs *regs) |
483 |
+ } |
484 |
+ |
485 |
+ if (m.kflags & MCE_IN_KERNEL_COPYIN) |
486 |
+- queue_task_work(&m, kill_it); |
487 |
++ queue_task_work(&m, msg, kill_it); |
488 |
+ } |
489 |
+ out: |
490 |
+ mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); |
491 |
+diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c |
492 |
+index b5a3fa4033d38..067ca92e69ef9 100644 |
493 |
+--- a/arch/x86/mm/init_64.c |
494 |
++++ b/arch/x86/mm/init_64.c |
495 |
+@@ -1389,18 +1389,18 @@ int kern_addr_valid(unsigned long addr) |
496 |
+ return 0; |
497 |
+ |
498 |
+ p4d = p4d_offset(pgd, addr); |
499 |
+- if (p4d_none(*p4d)) |
500 |
++ if (!p4d_present(*p4d)) |
501 |
+ return 0; |
502 |
+ |
503 |
+ pud = pud_offset(p4d, addr); |
504 |
+- if (pud_none(*pud)) |
505 |
++ if (!pud_present(*pud)) |
506 |
+ return 0; |
507 |
+ |
508 |
+ if (pud_large(*pud)) |
509 |
+ return pfn_valid(pud_pfn(*pud)); |
510 |
+ |
511 |
+ pmd = pmd_offset(pud, addr); |
512 |
+- if (pmd_none(*pmd)) |
513 |
++ if (!pmd_present(*pmd)) |
514 |
+ return 0; |
515 |
+ |
516 |
+ if (pmd_large(*pmd)) |
517 |
+diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c |
518 |
+index ca311aaa67b88..232932bda4e5e 100644 |
519 |
+--- a/arch/x86/mm/pat/memtype.c |
520 |
++++ b/arch/x86/mm/pat/memtype.c |
521 |
+@@ -583,7 +583,12 @@ int memtype_reserve(u64 start, u64 end, enum page_cache_mode req_type, |
522 |
+ int err = 0; |
523 |
+ |
524 |
+ start = sanitize_phys(start); |
525 |
+- end = sanitize_phys(end); |
526 |
++ |
527 |
++ /* |
528 |
++ * The end address passed into this function is exclusive, but |
529 |
++ * sanitize_phys() expects an inclusive address. |
530 |
++ */ |
531 |
++ end = sanitize_phys(end - 1) + 1; |
532 |
+ if (start >= end) { |
533 |
+ WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__, |
534 |
+ start, end - 1, cattr_name(req_type)); |
535 |
+diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c |
536 |
+index d3cdf467d91fa..c758fd913cedd 100644 |
537 |
+--- a/arch/x86/xen/enlighten_pv.c |
538 |
++++ b/arch/x86/xen/enlighten_pv.c |
539 |
+@@ -1204,6 +1204,11 @@ static void __init xen_dom0_set_legacy_features(void) |
540 |
+ x86_platform.legacy.rtc = 1; |
541 |
+ } |
542 |
+ |
543 |
++static void __init xen_domu_set_legacy_features(void) |
544 |
++{ |
545 |
++ x86_platform.legacy.rtc = 0; |
546 |
++} |
547 |
++ |
548 |
+ /* First C function to be called on Xen boot */ |
549 |
+ asmlinkage __visible void __init xen_start_kernel(void) |
550 |
+ { |
551 |
+@@ -1356,6 +1361,8 @@ asmlinkage __visible void __init xen_start_kernel(void) |
552 |
+ add_preferred_console("xenboot", 0, NULL); |
553 |
+ if (pci_xen) |
554 |
+ x86_init.pci.arch_init = pci_xen_init; |
555 |
++ x86_platform.set_legacy_features = |
556 |
++ xen_domu_set_legacy_features; |
557 |
+ } else { |
558 |
+ const struct dom0_vga_console_info *info = |
559 |
+ (void *)((char *)xen_start_info + |
560 |
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c |
561 |
+index b8c2ddc01aec3..65c200e0ecb59 100644 |
562 |
+--- a/block/bfq-iosched.c |
563 |
++++ b/block/bfq-iosched.c |
564 |
+@@ -2526,6 +2526,15 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) |
565 |
+ * are likely to increase the throughput. |
566 |
+ */ |
567 |
+ bfqq->new_bfqq = new_bfqq; |
568 |
++ /* |
569 |
++ * The above assignment schedules the following redirections: |
570 |
++ * each time some I/O for bfqq arrives, the process that |
571 |
++ * generated that I/O is disassociated from bfqq and |
572 |
++ * associated with new_bfqq. Here we increases new_bfqq->ref |
573 |
++ * in advance, adding the number of processes that are |
574 |
++ * expected to be associated with new_bfqq as they happen to |
575 |
++ * issue I/O. |
576 |
++ */ |
577 |
+ new_bfqq->ref += process_refs; |
578 |
+ return new_bfqq; |
579 |
+ } |
580 |
+@@ -2585,6 +2594,10 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
581 |
+ { |
582 |
+ struct bfq_queue *in_service_bfqq, *new_bfqq; |
583 |
+ |
584 |
++ /* if a merge has already been setup, then proceed with that first */ |
585 |
++ if (bfqq->new_bfqq) |
586 |
++ return bfqq->new_bfqq; |
587 |
++ |
588 |
+ /* |
589 |
+ * Do not perform queue merging if the device is non |
590 |
+ * rotational and performs internal queueing. In fact, such a |
591 |
+@@ -2639,9 +2652,6 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
592 |
+ if (bfq_too_late_for_merging(bfqq)) |
593 |
+ return NULL; |
594 |
+ |
595 |
+- if (bfqq->new_bfqq) |
596 |
+- return bfqq->new_bfqq; |
597 |
+- |
598 |
+ if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq)) |
599 |
+ return NULL; |
600 |
+ |
601 |
+diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c |
602 |
+index a97f33d0c59f9..94665037f4a35 100644 |
603 |
+--- a/drivers/base/power/trace.c |
604 |
++++ b/drivers/base/power/trace.c |
605 |
+@@ -13,6 +13,7 @@ |
606 |
+ #include <linux/export.h> |
607 |
+ #include <linux/rtc.h> |
608 |
+ #include <linux/suspend.h> |
609 |
++#include <linux/init.h> |
610 |
+ |
611 |
+ #include <linux/mc146818rtc.h> |
612 |
+ |
613 |
+@@ -165,6 +166,9 @@ void generate_pm_trace(const void *tracedata, unsigned int user) |
614 |
+ const char *file = *(const char **)(tracedata + 2); |
615 |
+ unsigned int user_hash_value, file_hash_value; |
616 |
+ |
617 |
++ if (!x86_platform.legacy.rtc) |
618 |
++ return; |
619 |
++ |
620 |
+ user_hash_value = user % USERHASH; |
621 |
+ file_hash_value = hash_string(lineno, file, FILEHASH); |
622 |
+ set_magic_time(user_hash_value, file_hash_value, dev_hash_value); |
623 |
+@@ -267,6 +271,9 @@ static struct notifier_block pm_trace_nb = { |
624 |
+ |
625 |
+ static int __init early_resume_init(void) |
626 |
+ { |
627 |
++ if (!x86_platform.legacy.rtc) |
628 |
++ return 0; |
629 |
++ |
630 |
+ hash_value_early_read = read_magic_time(); |
631 |
+ register_pm_notifier(&pm_trace_nb); |
632 |
+ return 0; |
633 |
+@@ -277,6 +284,9 @@ static int __init late_resume_init(void) |
634 |
+ unsigned int val = hash_value_early_read; |
635 |
+ unsigned int user, file, dev; |
636 |
+ |
637 |
++ if (!x86_platform.legacy.rtc) |
638 |
++ return 0; |
639 |
++ |
640 |
+ user = val % USERHASH; |
641 |
+ val = val / USERHASH; |
642 |
+ file = val % FILEHASH; |
643 |
+diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c |
644 |
+index 3c2fa44d9279b..d60d5520707dc 100644 |
645 |
+--- a/drivers/gpio/gpio-mpc8xxx.c |
646 |
++++ b/drivers/gpio/gpio-mpc8xxx.c |
647 |
+@@ -374,7 +374,7 @@ static int mpc8xxx_probe(struct platform_device *pdev) |
648 |
+ of_device_is_compatible(np, "fsl,ls1088a-gpio")) |
649 |
+ gc->write_reg(mpc8xxx_gc->regs + GPIO_IBE, 0xffffffff); |
650 |
+ |
651 |
+- ret = gpiochip_add_data(gc, mpc8xxx_gc); |
652 |
++ ret = devm_gpiochip_add_data(&pdev->dev, gc, mpc8xxx_gc); |
653 |
+ if (ret) { |
654 |
+ pr_err("%pOF: GPIO chip registration failed with status %d\n", |
655 |
+ np, ret); |
656 |
+@@ -406,6 +406,8 @@ static int mpc8xxx_probe(struct platform_device *pdev) |
657 |
+ |
658 |
+ return 0; |
659 |
+ err: |
660 |
++ if (mpc8xxx_gc->irq) |
661 |
++ irq_domain_remove(mpc8xxx_gc->irq); |
662 |
+ iounmap(mpc8xxx_gc->regs); |
663 |
+ return ret; |
664 |
+ } |
665 |
+@@ -419,7 +421,6 @@ static int mpc8xxx_remove(struct platform_device *pdev) |
666 |
+ irq_domain_remove(mpc8xxx_gc->irq); |
667 |
+ } |
668 |
+ |
669 |
+- gpiochip_remove(&mpc8xxx_gc->gc); |
670 |
+ iounmap(mpc8xxx_gc->regs); |
671 |
+ |
672 |
+ return 0; |
673 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h |
674 |
+index 76c31aa7b84df..d949d6c52f24b 100644 |
675 |
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h |
676 |
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h |
677 |
+@@ -717,7 +717,7 @@ enum amd_hw_ip_block_type { |
678 |
+ MAX_HWIP |
679 |
+ }; |
680 |
+ |
681 |
+-#define HWIP_MAX_INSTANCE 8 |
682 |
++#define HWIP_MAX_INSTANCE 10 |
683 |
+ |
684 |
+ struct amd_powerplay { |
685 |
+ void *pp_handle; |
686 |
+diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c |
687 |
+index c1926154eda84..29b1ce2140abc 100644 |
688 |
+--- a/drivers/gpu/drm/bridge/lontium-lt9611.c |
689 |
++++ b/drivers/gpu/drm/bridge/lontium-lt9611.c |
690 |
+@@ -867,8 +867,14 @@ static enum drm_mode_status lt9611_bridge_mode_valid(struct drm_bridge *bridge, |
691 |
+ const struct drm_display_mode *mode) |
692 |
+ { |
693 |
+ struct lt9611_mode *lt9611_mode = lt9611_find_mode(mode); |
694 |
++ struct lt9611 *lt9611 = bridge_to_lt9611(bridge); |
695 |
+ |
696 |
+- return lt9611_mode ? MODE_OK : MODE_BAD; |
697 |
++ if (!lt9611_mode) |
698 |
++ return MODE_BAD; |
699 |
++ else if (lt9611_mode->intfs > 1 && !lt9611->dsi1) |
700 |
++ return MODE_PANEL; |
701 |
++ else |
702 |
++ return MODE_OK; |
703 |
+ } |
704 |
+ |
705 |
+ static void lt9611_bridge_pre_enable(struct drm_bridge *bridge) |
706 |
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c |
707 |
+index 76d38561c9103..cf741c5c82d25 100644 |
708 |
+--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c |
709 |
++++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c |
710 |
+@@ -397,8 +397,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, |
711 |
+ if (switch_mmu_context) { |
712 |
+ struct etnaviv_iommu_context *old_context = gpu->mmu_context; |
713 |
+ |
714 |
+- etnaviv_iommu_context_get(mmu_context); |
715 |
+- gpu->mmu_context = mmu_context; |
716 |
++ gpu->mmu_context = etnaviv_iommu_context_get(mmu_context); |
717 |
+ etnaviv_iommu_context_put(old_context); |
718 |
+ } |
719 |
+ |
720 |
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c |
721 |
+index 2b7e85318a76a..424474041c943 100644 |
722 |
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c |
723 |
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c |
724 |
+@@ -305,8 +305,7 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get( |
725 |
+ list_del(&mapping->obj_node); |
726 |
+ } |
727 |
+ |
728 |
+- etnaviv_iommu_context_get(mmu_context); |
729 |
+- mapping->context = mmu_context; |
730 |
++ mapping->context = etnaviv_iommu_context_get(mmu_context); |
731 |
+ mapping->use = 1; |
732 |
+ |
733 |
+ ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj, |
734 |
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c |
735 |
+index d05c359945799..5f24cc52c2878 100644 |
736 |
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c |
737 |
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c |
738 |
+@@ -532,8 +532,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, |
739 |
+ goto err_submit_objects; |
740 |
+ |
741 |
+ submit->ctx = file->driver_priv; |
742 |
+- etnaviv_iommu_context_get(submit->ctx->mmu); |
743 |
+- submit->mmu_context = submit->ctx->mmu; |
744 |
++ submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu); |
745 |
+ submit->exec_state = args->exec_state; |
746 |
+ submit->flags = args->flags; |
747 |
+ |
748 |
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c |
749 |
+index c6404b8d067f1..2520b7dad6ce7 100644 |
750 |
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c |
751 |
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c |
752 |
+@@ -561,6 +561,12 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) |
753 |
+ /* We rely on the GPU running, so program the clock */ |
754 |
+ etnaviv_gpu_update_clock(gpu); |
755 |
+ |
756 |
++ gpu->fe_running = false; |
757 |
++ gpu->exec_state = -1; |
758 |
++ if (gpu->mmu_context) |
759 |
++ etnaviv_iommu_context_put(gpu->mmu_context); |
760 |
++ gpu->mmu_context = NULL; |
761 |
++ |
762 |
+ return 0; |
763 |
+ } |
764 |
+ |
765 |
+@@ -623,19 +629,23 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch) |
766 |
+ VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE | |
767 |
+ VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch)); |
768 |
+ } |
769 |
++ |
770 |
++ gpu->fe_running = true; |
771 |
+ } |
772 |
+ |
773 |
+-static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu) |
774 |
++static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu, |
775 |
++ struct etnaviv_iommu_context *context) |
776 |
+ { |
777 |
+- u32 address = etnaviv_cmdbuf_get_va(&gpu->buffer, |
778 |
+- &gpu->mmu_context->cmdbuf_mapping); |
779 |
+ u16 prefetch; |
780 |
++ u32 address; |
781 |
+ |
782 |
+ /* setup the MMU */ |
783 |
+- etnaviv_iommu_restore(gpu, gpu->mmu_context); |
784 |
++ etnaviv_iommu_restore(gpu, context); |
785 |
+ |
786 |
+ /* Start command processor */ |
787 |
+ prefetch = etnaviv_buffer_init(gpu); |
788 |
++ address = etnaviv_cmdbuf_get_va(&gpu->buffer, |
789 |
++ &gpu->mmu_context->cmdbuf_mapping); |
790 |
+ |
791 |
+ etnaviv_gpu_start_fe(gpu, address, prefetch); |
792 |
+ } |
793 |
+@@ -814,7 +824,6 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) |
794 |
+ /* Now program the hardware */ |
795 |
+ mutex_lock(&gpu->lock); |
796 |
+ etnaviv_gpu_hw_init(gpu); |
797 |
+- gpu->exec_state = -1; |
798 |
+ mutex_unlock(&gpu->lock); |
799 |
+ |
800 |
+ pm_runtime_mark_last_busy(gpu->dev); |
801 |
+@@ -1039,8 +1048,6 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu) |
802 |
+ spin_unlock(&gpu->event_spinlock); |
803 |
+ |
804 |
+ etnaviv_gpu_hw_init(gpu); |
805 |
+- gpu->exec_state = -1; |
806 |
+- gpu->mmu_context = NULL; |
807 |
+ |
808 |
+ mutex_unlock(&gpu->lock); |
809 |
+ pm_runtime_mark_last_busy(gpu->dev); |
810 |
+@@ -1352,14 +1359,12 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit) |
811 |
+ goto out_unlock; |
812 |
+ } |
813 |
+ |
814 |
+- if (!gpu->mmu_context) { |
815 |
+- etnaviv_iommu_context_get(submit->mmu_context); |
816 |
+- gpu->mmu_context = submit->mmu_context; |
817 |
+- etnaviv_gpu_start_fe_idleloop(gpu); |
818 |
+- } else { |
819 |
+- etnaviv_iommu_context_get(gpu->mmu_context); |
820 |
+- submit->prev_mmu_context = gpu->mmu_context; |
821 |
+- } |
822 |
++ if (!gpu->fe_running) |
823 |
++ etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context); |
824 |
++ |
825 |
++ if (submit->prev_mmu_context) |
826 |
++ etnaviv_iommu_context_put(submit->prev_mmu_context); |
827 |
++ submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context); |
828 |
+ |
829 |
+ if (submit->nr_pmrs) { |
830 |
+ gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre; |
831 |
+@@ -1561,7 +1566,7 @@ int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms) |
832 |
+ |
833 |
+ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu) |
834 |
+ { |
835 |
+- if (gpu->initialized && gpu->mmu_context) { |
836 |
++ if (gpu->initialized && gpu->fe_running) { |
837 |
+ /* Replace the last WAIT with END */ |
838 |
+ mutex_lock(&gpu->lock); |
839 |
+ etnaviv_buffer_end(gpu); |
840 |
+@@ -1574,8 +1579,7 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu) |
841 |
+ */ |
842 |
+ etnaviv_gpu_wait_idle(gpu, 100); |
843 |
+ |
844 |
+- etnaviv_iommu_context_put(gpu->mmu_context); |
845 |
+- gpu->mmu_context = NULL; |
846 |
++ gpu->fe_running = false; |
847 |
+ } |
848 |
+ |
849 |
+ gpu->exec_state = -1; |
850 |
+@@ -1723,6 +1727,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master, |
851 |
+ etnaviv_gpu_hw_suspend(gpu); |
852 |
+ #endif |
853 |
+ |
854 |
++ if (gpu->mmu_context) |
855 |
++ etnaviv_iommu_context_put(gpu->mmu_context); |
856 |
++ |
857 |
+ if (gpu->initialized) { |
858 |
+ etnaviv_cmdbuf_free(&gpu->buffer); |
859 |
+ etnaviv_iommu_global_fini(gpu); |
860 |
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h |
861 |
+index 8ea48697d1321..1c75c8ed5bcea 100644 |
862 |
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h |
863 |
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h |
864 |
+@@ -101,6 +101,7 @@ struct etnaviv_gpu { |
865 |
+ struct workqueue_struct *wq; |
866 |
+ struct drm_gpu_scheduler sched; |
867 |
+ bool initialized; |
868 |
++ bool fe_running; |
869 |
+ |
870 |
+ /* 'ring'-buffer: */ |
871 |
+ struct etnaviv_cmdbuf buffer; |
872 |
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c |
873 |
+index 1a7c89a67bea3..afe5dd6a9925b 100644 |
874 |
+--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c |
875 |
++++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c |
876 |
+@@ -92,6 +92,10 @@ static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu, |
877 |
+ struct etnaviv_iommuv1_context *v1_context = to_v1_context(context); |
878 |
+ u32 pgtable; |
879 |
+ |
880 |
++ if (gpu->mmu_context) |
881 |
++ etnaviv_iommu_context_put(gpu->mmu_context); |
882 |
++ gpu->mmu_context = etnaviv_iommu_context_get(context); |
883 |
++ |
884 |
+ /* set base addresses */ |
885 |
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base); |
886 |
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base); |
887 |
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c |
888 |
+index f8bf488e9d717..d664ae29ae209 100644 |
889 |
+--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c |
890 |
++++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c |
891 |
+@@ -172,6 +172,10 @@ static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu, |
892 |
+ if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE) |
893 |
+ return; |
894 |
+ |
895 |
++ if (gpu->mmu_context) |
896 |
++ etnaviv_iommu_context_put(gpu->mmu_context); |
897 |
++ gpu->mmu_context = etnaviv_iommu_context_get(context); |
898 |
++ |
899 |
+ prefetch = etnaviv_buffer_config_mmuv2(gpu, |
900 |
+ (u32)v2_context->mtlb_dma, |
901 |
+ (u32)context->global->bad_page_dma); |
902 |
+@@ -192,6 +196,10 @@ static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu, |
903 |
+ if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE) |
904 |
+ return; |
905 |
+ |
906 |
++ if (gpu->mmu_context) |
907 |
++ etnaviv_iommu_context_put(gpu->mmu_context); |
908 |
++ gpu->mmu_context = etnaviv_iommu_context_get(context); |
909 |
++ |
910 |
+ gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW, |
911 |
+ lower_32_bits(context->global->v2.pta_dma)); |
912 |
+ gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH, |
913 |
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c |
914 |
+index 15d9fa3879e5d..984569a59a90a 100644 |
915 |
+--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c |
916 |
++++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c |
917 |
+@@ -197,6 +197,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context, |
918 |
+ */ |
919 |
+ list_for_each_entry_safe(m, n, &list, scan_node) { |
920 |
+ etnaviv_iommu_remove_mapping(context, m); |
921 |
++ etnaviv_iommu_context_put(m->context); |
922 |
+ m->context = NULL; |
923 |
+ list_del_init(&m->mmu_node); |
924 |
+ list_del_init(&m->scan_node); |
925 |
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h |
926 |
+index d1d6902fd13be..e4a0b7d09c2ea 100644 |
927 |
+--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h |
928 |
++++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h |
929 |
+@@ -105,9 +105,11 @@ void etnaviv_iommu_dump(struct etnaviv_iommu_context *ctx, void *buf); |
930 |
+ struct etnaviv_iommu_context * |
931 |
+ etnaviv_iommu_context_init(struct etnaviv_iommu_global *global, |
932 |
+ struct etnaviv_cmdbuf_suballoc *suballoc); |
933 |
+-static inline void etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx) |
934 |
++static inline struct etnaviv_iommu_context * |
935 |
++etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx) |
936 |
+ { |
937 |
+ kref_get(&ctx->refcount); |
938 |
++ return ctx; |
939 |
+ } |
940 |
+ void etnaviv_iommu_context_put(struct etnaviv_iommu_context *ctx); |
941 |
+ void etnaviv_iommu_restore(struct etnaviv_gpu *gpu, |
942 |
+diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c |
943 |
+index 6802d9b65f828..dec54c70e0082 100644 |
944 |
+--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c |
945 |
++++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c |
946 |
+@@ -1122,7 +1122,7 @@ static int cdn_dp_suspend(struct device *dev) |
947 |
+ return ret; |
948 |
+ } |
949 |
+ |
950 |
+-static int cdn_dp_resume(struct device *dev) |
951 |
++static __maybe_unused int cdn_dp_resume(struct device *dev) |
952 |
+ { |
953 |
+ struct cdn_dp_device *dp = dev_get_drvdata(dev); |
954 |
+ |
955 |
+diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c |
956 |
+index a3bac9da8cbbc..4cea63a4cab73 100644 |
957 |
+--- a/drivers/mfd/ab8500-core.c |
958 |
++++ b/drivers/mfd/ab8500-core.c |
959 |
+@@ -493,7 +493,7 @@ static int ab8500_handle_hierarchical_line(struct ab8500 *ab8500, |
960 |
+ if (line == AB8540_INT_GPIO43F || line == AB8540_INT_GPIO44F) |
961 |
+ line += 1; |
962 |
+ |
963 |
+- handle_nested_irq(irq_create_mapping(ab8500->domain, line)); |
964 |
++ handle_nested_irq(irq_find_mapping(ab8500->domain, line)); |
965 |
+ } |
966 |
+ |
967 |
+ return 0; |
968 |
+diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c |
969 |
+index aa59496e43768..9db1000944c34 100644 |
970 |
+--- a/drivers/mfd/axp20x.c |
971 |
++++ b/drivers/mfd/axp20x.c |
972 |
+@@ -125,12 +125,13 @@ static const struct regmap_range axp288_writeable_ranges[] = { |
973 |
+ |
974 |
+ static const struct regmap_range axp288_volatile_ranges[] = { |
975 |
+ regmap_reg_range(AXP20X_PWR_INPUT_STATUS, AXP288_POWER_REASON), |
976 |
++ regmap_reg_range(AXP22X_PWR_OUT_CTRL1, AXP22X_ALDO3_V_OUT), |
977 |
+ regmap_reg_range(AXP288_BC_GLOBAL, AXP288_BC_GLOBAL), |
978 |
+ regmap_reg_range(AXP288_BC_DET_STAT, AXP20X_VBUS_IPSOUT_MGMT), |
979 |
+ regmap_reg_range(AXP20X_CHRG_BAK_CTRL, AXP20X_CHRG_BAK_CTRL), |
980 |
+ regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IPSOUT_V_HIGH_L), |
981 |
+ regmap_reg_range(AXP20X_TIMER_CTRL, AXP20X_TIMER_CTRL), |
982 |
+- regmap_reg_range(AXP22X_GPIO_STATE, AXP22X_GPIO_STATE), |
983 |
++ regmap_reg_range(AXP20X_GPIO1_CTRL, AXP22X_GPIO_STATE), |
984 |
+ regmap_reg_range(AXP288_RT_BATT_V_H, AXP288_RT_BATT_V_L), |
985 |
+ regmap_reg_range(AXP20X_FG_RES, AXP288_FG_CC_CAP_REG), |
986 |
+ }; |
987 |
+diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c |
988 |
+index a5983d515db03..8d5f8f07d8a66 100644 |
989 |
+--- a/drivers/mfd/db8500-prcmu.c |
990 |
++++ b/drivers/mfd/db8500-prcmu.c |
991 |
+@@ -1622,22 +1622,20 @@ static long round_clock_rate(u8 clock, unsigned long rate) |
992 |
+ } |
993 |
+ |
994 |
+ static const unsigned long db8500_armss_freqs[] = { |
995 |
+- 200000000, |
996 |
+- 400000000, |
997 |
+- 800000000, |
998 |
++ 199680000, |
999 |
++ 399360000, |
1000 |
++ 798720000, |
1001 |
+ 998400000 |
1002 |
+ }; |
1003 |
+ |
1004 |
+ /* The DB8520 has slightly higher ARMSS max frequency */ |
1005 |
+ static const unsigned long db8520_armss_freqs[] = { |
1006 |
+- 200000000, |
1007 |
+- 400000000, |
1008 |
+- 800000000, |
1009 |
++ 199680000, |
1010 |
++ 399360000, |
1011 |
++ 798720000, |
1012 |
+ 1152000000 |
1013 |
+ }; |
1014 |
+ |
1015 |
+- |
1016 |
+- |
1017 |
+ static long round_armss_rate(unsigned long rate) |
1018 |
+ { |
1019 |
+ unsigned long freq = 0; |
1020 |
+diff --git a/drivers/mfd/lpc_sch.c b/drivers/mfd/lpc_sch.c |
1021 |
+index f27eb8dabc1c8..9ab9adce06fdd 100644 |
1022 |
+--- a/drivers/mfd/lpc_sch.c |
1023 |
++++ b/drivers/mfd/lpc_sch.c |
1024 |
+@@ -22,13 +22,10 @@ |
1025 |
+ #define SMBASE 0x40 |
1026 |
+ #define SMBUS_IO_SIZE 64 |
1027 |
+ |
1028 |
+-#define GPIOBASE 0x44 |
1029 |
++#define GPIO_BASE 0x44 |
1030 |
+ #define GPIO_IO_SIZE 64 |
1031 |
+ #define GPIO_IO_SIZE_CENTERTON 128 |
1032 |
+ |
1033 |
+-/* Intel Quark X1000 GPIO IRQ Number */ |
1034 |
+-#define GPIO_IRQ_QUARK_X1000 9 |
1035 |
+- |
1036 |
+ #define WDTBASE 0x84 |
1037 |
+ #define WDT_IO_SIZE 64 |
1038 |
+ |
1039 |
+@@ -43,30 +40,25 @@ struct lpc_sch_info { |
1040 |
+ unsigned int io_size_smbus; |
1041 |
+ unsigned int io_size_gpio; |
1042 |
+ unsigned int io_size_wdt; |
1043 |
+- int irq_gpio; |
1044 |
+ }; |
1045 |
+ |
1046 |
+ static struct lpc_sch_info sch_chipset_info[] = { |
1047 |
+ [LPC_SCH] = { |
1048 |
+ .io_size_smbus = SMBUS_IO_SIZE, |
1049 |
+ .io_size_gpio = GPIO_IO_SIZE, |
1050 |
+- .irq_gpio = -1, |
1051 |
+ }, |
1052 |
+ [LPC_ITC] = { |
1053 |
+ .io_size_smbus = SMBUS_IO_SIZE, |
1054 |
+ .io_size_gpio = GPIO_IO_SIZE, |
1055 |
+ .io_size_wdt = WDT_IO_SIZE, |
1056 |
+- .irq_gpio = -1, |
1057 |
+ }, |
1058 |
+ [LPC_CENTERTON] = { |
1059 |
+ .io_size_smbus = SMBUS_IO_SIZE, |
1060 |
+ .io_size_gpio = GPIO_IO_SIZE_CENTERTON, |
1061 |
+ .io_size_wdt = WDT_IO_SIZE, |
1062 |
+- .irq_gpio = -1, |
1063 |
+ }, |
1064 |
+ [LPC_QUARK_X1000] = { |
1065 |
+ .io_size_gpio = GPIO_IO_SIZE, |
1066 |
+- .irq_gpio = GPIO_IRQ_QUARK_X1000, |
1067 |
+ .io_size_wdt = WDT_IO_SIZE, |
1068 |
+ }, |
1069 |
+ }; |
1070 |
+@@ -113,13 +105,13 @@ static int lpc_sch_get_io(struct pci_dev *pdev, int where, const char *name, |
1071 |
+ } |
1072 |
+ |
1073 |
+ static int lpc_sch_populate_cell(struct pci_dev *pdev, int where, |
1074 |
+- const char *name, int size, int irq, |
1075 |
+- int id, struct mfd_cell *cell) |
1076 |
++ const char *name, int size, int id, |
1077 |
++ struct mfd_cell *cell) |
1078 |
+ { |
1079 |
+ struct resource *res; |
1080 |
+ int ret; |
1081 |
+ |
1082 |
+- res = devm_kcalloc(&pdev->dev, 2, sizeof(*res), GFP_KERNEL); |
1083 |
++ res = devm_kzalloc(&pdev->dev, sizeof(*res), GFP_KERNEL); |
1084 |
+ if (!res) |
1085 |
+ return -ENOMEM; |
1086 |
+ |
1087 |
+@@ -135,18 +127,6 @@ static int lpc_sch_populate_cell(struct pci_dev *pdev, int where, |
1088 |
+ cell->ignore_resource_conflicts = true; |
1089 |
+ cell->id = id; |
1090 |
+ |
1091 |
+- /* Check if we need to add an IRQ resource */ |
1092 |
+- if (irq < 0) |
1093 |
+- return 0; |
1094 |
+- |
1095 |
+- res++; |
1096 |
+- |
1097 |
+- res->start = irq; |
1098 |
+- res->end = irq; |
1099 |
+- res->flags = IORESOURCE_IRQ; |
1100 |
+- |
1101 |
+- cell->num_resources++; |
1102 |
+- |
1103 |
+ return 0; |
1104 |
+ } |
1105 |
+ |
1106 |
+@@ -158,15 +138,15 @@ static int lpc_sch_probe(struct pci_dev *dev, const struct pci_device_id *id) |
1107 |
+ int ret; |
1108 |
+ |
1109 |
+ ret = lpc_sch_populate_cell(dev, SMBASE, "isch_smbus", |
1110 |
+- info->io_size_smbus, -1, |
1111 |
++ info->io_size_smbus, |
1112 |
+ id->device, &lpc_sch_cells[cells]); |
1113 |
+ if (ret < 0) |
1114 |
+ return ret; |
1115 |
+ if (ret == 0) |
1116 |
+ cells++; |
1117 |
+ |
1118 |
+- ret = lpc_sch_populate_cell(dev, GPIOBASE, "sch_gpio", |
1119 |
+- info->io_size_gpio, info->irq_gpio, |
1120 |
++ ret = lpc_sch_populate_cell(dev, GPIO_BASE, "sch_gpio", |
1121 |
++ info->io_size_gpio, |
1122 |
+ id->device, &lpc_sch_cells[cells]); |
1123 |
+ if (ret < 0) |
1124 |
+ return ret; |
1125 |
+@@ -174,7 +154,7 @@ static int lpc_sch_probe(struct pci_dev *dev, const struct pci_device_id *id) |
1126 |
+ cells++; |
1127 |
+ |
1128 |
+ ret = lpc_sch_populate_cell(dev, WDTBASE, "ie6xx_wdt", |
1129 |
+- info->io_size_wdt, -1, |
1130 |
++ info->io_size_wdt, |
1131 |
+ id->device, &lpc_sch_cells[cells]); |
1132 |
+ if (ret < 0) |
1133 |
+ return ret; |
1134 |
+diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c |
1135 |
+index 1aee3b3253fc9..508349399f8af 100644 |
1136 |
+--- a/drivers/mfd/stmpe.c |
1137 |
++++ b/drivers/mfd/stmpe.c |
1138 |
+@@ -1091,7 +1091,7 @@ static irqreturn_t stmpe_irq(int irq, void *data) |
1139 |
+ |
1140 |
+ if (variant->id_val == STMPE801_ID || |
1141 |
+ variant->id_val == STMPE1600_ID) { |
1142 |
+- int base = irq_create_mapping(stmpe->domain, 0); |
1143 |
++ int base = irq_find_mapping(stmpe->domain, 0); |
1144 |
+ |
1145 |
+ handle_nested_irq(base); |
1146 |
+ return IRQ_HANDLED; |
1147 |
+@@ -1119,7 +1119,7 @@ static irqreturn_t stmpe_irq(int irq, void *data) |
1148 |
+ while (status) { |
1149 |
+ int bit = __ffs(status); |
1150 |
+ int line = bank * 8 + bit; |
1151 |
+- int nestedirq = irq_create_mapping(stmpe->domain, line); |
1152 |
++ int nestedirq = irq_find_mapping(stmpe->domain, line); |
1153 |
+ |
1154 |
+ handle_nested_irq(nestedirq); |
1155 |
+ status &= ~(1 << bit); |
1156 |
+diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c |
1157 |
+index 7882a37ffc352..5c2d5a6a6da9c 100644 |
1158 |
+--- a/drivers/mfd/tc3589x.c |
1159 |
++++ b/drivers/mfd/tc3589x.c |
1160 |
+@@ -187,7 +187,7 @@ again: |
1161 |
+ |
1162 |
+ while (status) { |
1163 |
+ int bit = __ffs(status); |
1164 |
+- int virq = irq_create_mapping(tc3589x->domain, bit); |
1165 |
++ int virq = irq_find_mapping(tc3589x->domain, bit); |
1166 |
+ |
1167 |
+ handle_nested_irq(virq); |
1168 |
+ status &= ~(1 << bit); |
1169 |
+diff --git a/drivers/mfd/tqmx86.c b/drivers/mfd/tqmx86.c |
1170 |
+index ddddf08b6a4cc..732013f40e4e8 100644 |
1171 |
+--- a/drivers/mfd/tqmx86.c |
1172 |
++++ b/drivers/mfd/tqmx86.c |
1173 |
+@@ -209,6 +209,8 @@ static int tqmx86_probe(struct platform_device *pdev) |
1174 |
+ |
1175 |
+ /* Assumes the IRQ resource is first. */ |
1176 |
+ tqmx_gpio_resources[0].start = gpio_irq; |
1177 |
++ } else { |
1178 |
++ tqmx_gpio_resources[0].flags = 0; |
1179 |
+ } |
1180 |
+ |
1181 |
+ ocores_platfom_data.clock_khz = tqmx86_board_id_to_clk_rate(board_id); |
1182 |
+diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c |
1183 |
+index 6c3a619e26286..651a028bc519a 100644 |
1184 |
+--- a/drivers/mfd/wm8994-irq.c |
1185 |
++++ b/drivers/mfd/wm8994-irq.c |
1186 |
+@@ -154,7 +154,7 @@ static irqreturn_t wm8994_edge_irq(int irq, void *data) |
1187 |
+ struct wm8994 *wm8994 = data; |
1188 |
+ |
1189 |
+ while (gpio_get_value_cansleep(wm8994->pdata.irq_gpio)) |
1190 |
+- handle_nested_irq(irq_create_mapping(wm8994->edge_irq, 0)); |
1191 |
++ handle_nested_irq(irq_find_mapping(wm8994->edge_irq, 0)); |
1192 |
+ |
1193 |
+ return IRQ_HANDLED; |
1194 |
+ } |
1195 |
+diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c |
1196 |
+index 6e4d0017c0bd4..f685a581df481 100644 |
1197 |
+--- a/drivers/mtd/mtdconcat.c |
1198 |
++++ b/drivers/mtd/mtdconcat.c |
1199 |
+@@ -641,6 +641,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c |
1200 |
+ int i; |
1201 |
+ size_t size; |
1202 |
+ struct mtd_concat *concat; |
1203 |
++ struct mtd_info *subdev_master = NULL; |
1204 |
+ uint32_t max_erasesize, curr_erasesize; |
1205 |
+ int num_erase_region; |
1206 |
+ int max_writebufsize = 0; |
1207 |
+@@ -679,18 +680,24 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c |
1208 |
+ concat->mtd.subpage_sft = subdev[0]->subpage_sft; |
1209 |
+ concat->mtd.oobsize = subdev[0]->oobsize; |
1210 |
+ concat->mtd.oobavail = subdev[0]->oobavail; |
1211 |
+- if (subdev[0]->_writev) |
1212 |
++ |
1213 |
++ subdev_master = mtd_get_master(subdev[0]); |
1214 |
++ if (subdev_master->_writev) |
1215 |
+ concat->mtd._writev = concat_writev; |
1216 |
+- if (subdev[0]->_read_oob) |
1217 |
++ if (subdev_master->_read_oob) |
1218 |
+ concat->mtd._read_oob = concat_read_oob; |
1219 |
+- if (subdev[0]->_write_oob) |
1220 |
++ if (subdev_master->_write_oob) |
1221 |
+ concat->mtd._write_oob = concat_write_oob; |
1222 |
+- if (subdev[0]->_block_isbad) |
1223 |
++ if (subdev_master->_block_isbad) |
1224 |
+ concat->mtd._block_isbad = concat_block_isbad; |
1225 |
+- if (subdev[0]->_block_markbad) |
1226 |
++ if (subdev_master->_block_markbad) |
1227 |
+ concat->mtd._block_markbad = concat_block_markbad; |
1228 |
+- if (subdev[0]->_panic_write) |
1229 |
++ if (subdev_master->_panic_write) |
1230 |
+ concat->mtd._panic_write = concat_panic_write; |
1231 |
++ if (subdev_master->_read) |
1232 |
++ concat->mtd._read = concat_read; |
1233 |
++ if (subdev_master->_write) |
1234 |
++ concat->mtd._write = concat_write; |
1235 |
+ |
1236 |
+ concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks; |
1237 |
+ |
1238 |
+@@ -721,14 +728,22 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c |
1239 |
+ subdev[i]->flags & MTD_WRITEABLE; |
1240 |
+ } |
1241 |
+ |
1242 |
++ subdev_master = mtd_get_master(subdev[i]); |
1243 |
+ concat->mtd.size += subdev[i]->size; |
1244 |
+ concat->mtd.ecc_stats.badblocks += |
1245 |
+ subdev[i]->ecc_stats.badblocks; |
1246 |
+ if (concat->mtd.writesize != subdev[i]->writesize || |
1247 |
+ concat->mtd.subpage_sft != subdev[i]->subpage_sft || |
1248 |
+ concat->mtd.oobsize != subdev[i]->oobsize || |
1249 |
+- !concat->mtd._read_oob != !subdev[i]->_read_oob || |
1250 |
+- !concat->mtd._write_oob != !subdev[i]->_write_oob) { |
1251 |
++ !concat->mtd._read_oob != !subdev_master->_read_oob || |
1252 |
++ !concat->mtd._write_oob != !subdev_master->_write_oob) { |
1253 |
++ /* |
1254 |
++ * Check against subdev[i] for data members, because |
1255 |
++ * subdev's attributes may be different from master |
1256 |
++ * mtd device. Check against subdev's master mtd |
1257 |
++ * device for callbacks, because the existence of |
1258 |
++ * subdev's callbacks is decided by master mtd device. |
1259 |
++ */ |
1260 |
+ kfree(concat); |
1261 |
+ printk("Incompatible OOB or ECC data on \"%s\"\n", |
1262 |
+ subdev[i]->name); |
1263 |
+@@ -744,8 +759,6 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c |
1264 |
+ concat->mtd.name = name; |
1265 |
+ |
1266 |
+ concat->mtd._erase = concat_erase; |
1267 |
+- concat->mtd._read = concat_read; |
1268 |
+- concat->mtd._write = concat_write; |
1269 |
+ concat->mtd._sync = concat_sync; |
1270 |
+ concat->mtd._lock = concat_lock; |
1271 |
+ concat->mtd._unlock = concat_unlock; |
1272 |
+diff --git a/drivers/mtd/nand/raw/cafe_nand.c b/drivers/mtd/nand/raw/cafe_nand.c |
1273 |
+index 2b94f385a1a88..04502d22efc9c 100644 |
1274 |
+--- a/drivers/mtd/nand/raw/cafe_nand.c |
1275 |
++++ b/drivers/mtd/nand/raw/cafe_nand.c |
1276 |
+@@ -751,7 +751,7 @@ static int cafe_nand_probe(struct pci_dev *pdev, |
1277 |
+ "CAFE NAND", mtd); |
1278 |
+ if (err) { |
1279 |
+ dev_warn(&pdev->dev, "Could not register IRQ %d\n", pdev->irq); |
1280 |
+- goto out_ior; |
1281 |
++ goto out_free_rs; |
1282 |
+ } |
1283 |
+ |
1284 |
+ /* Disable master reset, enable NAND clock */ |
1285 |
+@@ -795,6 +795,8 @@ static int cafe_nand_probe(struct pci_dev *pdev, |
1286 |
+ /* Disable NAND IRQ in global IRQ mask register */ |
1287 |
+ cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK); |
1288 |
+ free_irq(pdev->irq, mtd); |
1289 |
++ out_free_rs: |
1290 |
++ free_rs(cafe->rs); |
1291 |
+ out_ior: |
1292 |
+ pci_iounmap(pdev, cafe->mmio); |
1293 |
+ out_free_mtd: |
1294 |
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c |
1295 |
+index 52100d4fe5a25..d3b37cebcfde8 100644 |
1296 |
+--- a/drivers/net/dsa/b53/b53_common.c |
1297 |
++++ b/drivers/net/dsa/b53/b53_common.c |
1298 |
+@@ -1083,7 +1083,7 @@ static void b53_force_link(struct b53_device *dev, int port, int link) |
1299 |
+ u8 reg, val, off; |
1300 |
+ |
1301 |
+ /* Override the port settings */ |
1302 |
+- if (port == dev->cpu_port) { |
1303 |
++ if (port == dev->imp_port) { |
1304 |
+ off = B53_PORT_OVERRIDE_CTRL; |
1305 |
+ val = PORT_OVERRIDE_EN; |
1306 |
+ } else { |
1307 |
+@@ -1107,7 +1107,7 @@ static void b53_force_port_config(struct b53_device *dev, int port, |
1308 |
+ u8 reg, val, off; |
1309 |
+ |
1310 |
+ /* Override the port settings */ |
1311 |
+- if (port == dev->cpu_port) { |
1312 |
++ if (port == dev->imp_port) { |
1313 |
+ off = B53_PORT_OVERRIDE_CTRL; |
1314 |
+ val = PORT_OVERRIDE_EN; |
1315 |
+ } else { |
1316 |
+@@ -1175,7 +1175,7 @@ static void b53_adjust_link(struct dsa_switch *ds, int port, |
1317 |
+ b53_force_link(dev, port, phydev->link); |
1318 |
+ |
1319 |
+ if (is531x5(dev) && phy_interface_is_rgmii(phydev)) { |
1320 |
+- if (port == 8) |
1321 |
++ if (port == dev->imp_port) |
1322 |
+ off = B53_RGMII_CTRL_IMP; |
1323 |
+ else |
1324 |
+ off = B53_RGMII_CTRL_P(port); |
1325 |
+@@ -2238,6 +2238,7 @@ struct b53_chip_data { |
1326 |
+ const char *dev_name; |
1327 |
+ u16 vlans; |
1328 |
+ u16 enabled_ports; |
1329 |
++ u8 imp_port; |
1330 |
+ u8 cpu_port; |
1331 |
+ u8 vta_regs[3]; |
1332 |
+ u8 arl_bins; |
1333 |
+@@ -2262,6 +2263,7 @@ static const struct b53_chip_data b53_switch_chips[] = { |
1334 |
+ .enabled_ports = 0x1f, |
1335 |
+ .arl_bins = 2, |
1336 |
+ .arl_buckets = 1024, |
1337 |
++ .imp_port = 5, |
1338 |
+ .cpu_port = B53_CPU_PORT_25, |
1339 |
+ .duplex_reg = B53_DUPLEX_STAT_FE, |
1340 |
+ }, |
1341 |
+@@ -2272,6 +2274,7 @@ static const struct b53_chip_data b53_switch_chips[] = { |
1342 |
+ .enabled_ports = 0x1f, |
1343 |
+ .arl_bins = 2, |
1344 |
+ .arl_buckets = 1024, |
1345 |
++ .imp_port = 5, |
1346 |
+ .cpu_port = B53_CPU_PORT_25, |
1347 |
+ .duplex_reg = B53_DUPLEX_STAT_FE, |
1348 |
+ }, |
1349 |
+@@ -2282,6 +2285,7 @@ static const struct b53_chip_data b53_switch_chips[] = { |
1350 |
+ .enabled_ports = 0x1f, |
1351 |
+ .arl_bins = 4, |
1352 |
+ .arl_buckets = 1024, |
1353 |
++ .imp_port = 8, |
1354 |
+ .cpu_port = B53_CPU_PORT, |
1355 |
+ .vta_regs = B53_VTA_REGS, |
1356 |
+ .duplex_reg = B53_DUPLEX_STAT_GE, |
1357 |
+@@ -2295,6 +2299,7 @@ static const struct b53_chip_data b53_switch_chips[] = { |
1358 |
+ .enabled_ports = 0x1f, |
1359 |
+ .arl_bins = 4, |
1360 |
+ .arl_buckets = 1024, |
1361 |
++ .imp_port = 8, |
1362 |
+ .cpu_port = B53_CPU_PORT, |
1363 |
+ .vta_regs = B53_VTA_REGS, |
1364 |
+ .duplex_reg = B53_DUPLEX_STAT_GE, |
1365 |
+@@ -2308,6 +2313,7 @@ static const struct b53_chip_data b53_switch_chips[] = { |
1366 |
+ .enabled_ports = 0x1f, |
1367 |
+ .arl_bins = 4, |
1368 |
+ .arl_buckets = 1024, |
1369 |
++ .imp_port = 8, |
1370 |
+ .cpu_port = B53_CPU_PORT, |
1371 |
+ .vta_regs = B53_VTA_REGS_9798, |
1372 |
+ .duplex_reg = B53_DUPLEX_STAT_GE, |
1373 |
+@@ -2321,6 +2327,7 @@ static const struct b53_chip_data b53_switch_chips[] = { |
1374 |
+ .enabled_ports = 0x7f, |
1375 |
+ .arl_bins = 4, |
1376 |
+ .arl_buckets = 1024, |
1377 |
++ .imp_port = 8, |
1378 |
+ .cpu_port = B53_CPU_PORT, |
1379 |
+ .vta_regs = B53_VTA_REGS_9798, |
1380 |
+ .duplex_reg = B53_DUPLEX_STAT_GE, |
1381 |
+@@ -2335,6 +2342,7 @@ static const struct b53_chip_data b53_switch_chips[] = { |
1382 |
+ .arl_bins = 4, |
1383 |
+ .arl_buckets = 1024, |
1384 |
+ .vta_regs = B53_VTA_REGS, |
1385 |
++ .imp_port = 8, |
1386 |
+ .cpu_port = B53_CPU_PORT, |
1387 |
+ .duplex_reg = B53_DUPLEX_STAT_GE, |
1388 |
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK, |
1389 |
+@@ -2347,6 +2355,7 @@ static const struct b53_chip_data b53_switch_chips[] = { |
1390 |
+ .enabled_ports = 0xff, |
1391 |
+ .arl_bins = 4, |
1392 |
+ .arl_buckets = 1024, |
1393 |
++ .imp_port = 8, |
1394 |
+ .cpu_port = B53_CPU_PORT, |
1395 |
+ .vta_regs = B53_VTA_REGS, |
1396 |
+ .duplex_reg = B53_DUPLEX_STAT_GE, |
1397 |
+@@ -2360,6 +2369,7 @@ static const struct b53_chip_data b53_switch_chips[] = { |
1398 |
+ .enabled_ports = 0x1ff, |
1399 |
+ .arl_bins = 4, |
1400 |
+ .arl_buckets = 1024, |
1401 |
++ .imp_port = 8, |
1402 |
+ .cpu_port = B53_CPU_PORT, |
1403 |
+ .vta_regs = B53_VTA_REGS, |
1404 |
+ .duplex_reg = B53_DUPLEX_STAT_GE, |
1405 |
+@@ -2373,6 +2383,7 @@ static const struct b53_chip_data b53_switch_chips[] = { |
1406 |
+ .enabled_ports = 0, /* pdata must provide them */ |
1407 |
+ .arl_bins = 4, |
1408 |
+ .arl_buckets = 1024, |
1409 |
++ .imp_port = 8, |
1410 |
+ .cpu_port = B53_CPU_PORT, |
1411 |
+ .vta_regs = B53_VTA_REGS_63XX, |
1412 |
+ .duplex_reg = B53_DUPLEX_STAT_63XX, |
1413 |
+@@ -2386,6 +2397,7 @@ static const struct b53_chip_data b53_switch_chips[] = { |
1414 |
+ .enabled_ports = 0x1f, |
1415 |
+ .arl_bins = 4, |
1416 |
+ .arl_buckets = 1024, |
1417 |
++ .imp_port = 8, |
1418 |
+ .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ |
1419 |
+ .vta_regs = B53_VTA_REGS, |
1420 |
+ .duplex_reg = B53_DUPLEX_STAT_GE, |
1421 |
+@@ -2399,6 +2411,7 @@ static const struct b53_chip_data b53_switch_chips[] = { |
1422 |
+ .enabled_ports = 0x1bf, |
1423 |
+ .arl_bins = 4, |
1424 |
+ .arl_buckets = 1024, |
1425 |
++ .imp_port = 8, |
1426 |
+ .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ |
1427 |
+ .vta_regs = B53_VTA_REGS, |
1428 |
+ .duplex_reg = B53_DUPLEX_STAT_GE, |
1429 |
+@@ -2412,6 +2425,7 @@ static const struct b53_chip_data b53_switch_chips[] = { |
1430 |
+ .enabled_ports = 0x1bf, |
1431 |
+ .arl_bins = 4, |
1432 |
+ .arl_buckets = 1024, |
1433 |
++ .imp_port = 8, |
1434 |
+ .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ |
1435 |
+ .vta_regs = B53_VTA_REGS, |
1436 |
+ .duplex_reg = B53_DUPLEX_STAT_GE, |
1437 |
+@@ -2425,6 +2439,7 @@ static const struct b53_chip_data b53_switch_chips[] = { |
1438 |
+ .enabled_ports = 0x1f, |
1439 |
+ .arl_bins = 4, |
1440 |
+ .arl_buckets = 1024, |
1441 |
++ .imp_port = 8, |
1442 |
+ .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ |
1443 |
+ .vta_regs = B53_VTA_REGS, |
1444 |
+ .duplex_reg = B53_DUPLEX_STAT_GE, |
1445 |
+@@ -2438,6 +2453,7 @@ static const struct b53_chip_data b53_switch_chips[] = { |
1446 |
+ .enabled_ports = 0x1f, |
1447 |
+ .arl_bins = 4, |
1448 |
+ .arl_buckets = 1024, |
1449 |
++ .imp_port = 8, |
1450 |
+ .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ |
1451 |
+ .vta_regs = B53_VTA_REGS, |
1452 |
+ .duplex_reg = B53_DUPLEX_STAT_GE, |
1453 |
+@@ -2451,6 +2467,7 @@ static const struct b53_chip_data b53_switch_chips[] = { |
1454 |
+ .enabled_ports = 0x1ff, |
1455 |
+ .arl_bins = 4, |
1456 |
+ .arl_buckets = 1024, |
1457 |
++ .imp_port = 8, |
1458 |
+ .cpu_port = B53_CPU_PORT, |
1459 |
+ .vta_regs = B53_VTA_REGS, |
1460 |
+ .duplex_reg = B53_DUPLEX_STAT_GE, |
1461 |
+@@ -2464,6 +2481,7 @@ static const struct b53_chip_data b53_switch_chips[] = { |
1462 |
+ .enabled_ports = 0x103, |
1463 |
+ .arl_bins = 4, |
1464 |
+ .arl_buckets = 1024, |
1465 |
++ .imp_port = 8, |
1466 |
+ .cpu_port = B53_CPU_PORT, |
1467 |
+ .vta_regs = B53_VTA_REGS, |
1468 |
+ .duplex_reg = B53_DUPLEX_STAT_GE, |
1469 |
+@@ -2477,6 +2495,7 @@ static const struct b53_chip_data b53_switch_chips[] = { |
1470 |
+ .enabled_ports = 0x1ff, |
1471 |
+ .arl_bins = 4, |
1472 |
+ .arl_buckets = 1024, |
1473 |
++ .imp_port = 8, |
1474 |
+ .cpu_port = B53_CPU_PORT, |
1475 |
+ .vta_regs = B53_VTA_REGS, |
1476 |
+ .duplex_reg = B53_DUPLEX_STAT_GE, |
1477 |
+@@ -2490,6 +2509,7 @@ static const struct b53_chip_data b53_switch_chips[] = { |
1478 |
+ .enabled_ports = 0x1ff, |
1479 |
+ .arl_bins = 4, |
1480 |
+ .arl_buckets = 256, |
1481 |
++ .imp_port = 8, |
1482 |
+ .cpu_port = B53_CPU_PORT, |
1483 |
+ .vta_regs = B53_VTA_REGS, |
1484 |
+ .duplex_reg = B53_DUPLEX_STAT_GE, |
1485 |
+@@ -2515,6 +2535,7 @@ static int b53_switch_init(struct b53_device *dev) |
1486 |
+ dev->vta_regs[1] = chip->vta_regs[1]; |
1487 |
+ dev->vta_regs[2] = chip->vta_regs[2]; |
1488 |
+ dev->jumbo_pm_reg = chip->jumbo_pm_reg; |
1489 |
++ dev->imp_port = chip->imp_port; |
1490 |
+ dev->cpu_port = chip->cpu_port; |
1491 |
+ dev->num_vlans = chip->vlans; |
1492 |
+ dev->num_arl_bins = chip->arl_bins; |
1493 |
+@@ -2556,9 +2577,10 @@ static int b53_switch_init(struct b53_device *dev) |
1494 |
+ dev->cpu_port = 5; |
1495 |
+ } |
1496 |
+ |
1497 |
+- /* cpu port is always last */ |
1498 |
+- dev->num_ports = dev->cpu_port + 1; |
1499 |
+ dev->enabled_ports |= BIT(dev->cpu_port); |
1500 |
++ dev->num_ports = fls(dev->enabled_ports); |
1501 |
++ |
1502 |
++ dev->ds->num_ports = min_t(unsigned int, dev->num_ports, DSA_MAX_PORTS); |
1503 |
+ |
1504 |
+ /* Include non standard CPU port built-in PHYs to be probed */ |
1505 |
+ if (is539x(dev) || is531x5(dev)) { |
1506 |
+@@ -2604,7 +2626,6 @@ struct b53_device *b53_switch_alloc(struct device *base, |
1507 |
+ return NULL; |
1508 |
+ |
1509 |
+ ds->dev = base; |
1510 |
+- ds->num_ports = DSA_MAX_PORTS; |
1511 |
+ |
1512 |
+ dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL); |
1513 |
+ if (!dev) |
1514 |
+diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h |
1515 |
+index 7c67409bb186d..bdb2ade7ad622 100644 |
1516 |
+--- a/drivers/net/dsa/b53/b53_priv.h |
1517 |
++++ b/drivers/net/dsa/b53/b53_priv.h |
1518 |
+@@ -122,6 +122,7 @@ struct b53_device { |
1519 |
+ |
1520 |
+ /* used ports mask */ |
1521 |
+ u16 enabled_ports; |
1522 |
++ unsigned int imp_port; |
1523 |
+ unsigned int cpu_port; |
1524 |
+ |
1525 |
+ /* connect specific data */ |
1526 |
+diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c |
1527 |
+index 510324916e916..690e9d9495e75 100644 |
1528 |
+--- a/drivers/net/dsa/bcm_sf2.c |
1529 |
++++ b/drivers/net/dsa/bcm_sf2.c |
1530 |
+@@ -38,7 +38,7 @@ static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds) |
1531 |
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); |
1532 |
+ unsigned int port, count = 0; |
1533 |
+ |
1534 |
+- for (port = 0; port < ARRAY_SIZE(priv->port_sts); port++) { |
1535 |
++ for (port = 0; port < ds->num_ports; port++) { |
1536 |
+ if (dsa_is_cpu_port(ds, port)) |
1537 |
+ continue; |
1538 |
+ if (priv->port_sts[port].enabled) |
1539 |
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c |
1540 |
+index 9108b497b3c99..03eb0179ec008 100644 |
1541 |
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c |
1542 |
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c |
1543 |
+@@ -1225,7 +1225,7 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, |
1544 |
+ |
1545 |
+ /* SR-IOV capability was enabled but there are no VFs*/ |
1546 |
+ if (iov->total == 0) { |
1547 |
+- err = -EINVAL; |
1548 |
++ err = 0; |
1549 |
+ goto failed; |
1550 |
+ } |
1551 |
+ |
1552 |
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c |
1553 |
+index 849ae99a955a3..26179e437bbfd 100644 |
1554 |
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c |
1555 |
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c |
1556 |
+@@ -272,6 +272,7 @@ static const u16 bnxt_async_events_arr[] = { |
1557 |
+ ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE, |
1558 |
+ ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY, |
1559 |
+ ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY, |
1560 |
++ ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION, |
1561 |
+ ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG, |
1562 |
+ }; |
1563 |
+ |
1564 |
+@@ -1304,8 +1305,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, |
1565 |
+ } else { |
1566 |
+ tpa_info->hash_type = PKT_HASH_TYPE_NONE; |
1567 |
+ tpa_info->gso_type = 0; |
1568 |
+- if (netif_msg_rx_err(bp)) |
1569 |
+- netdev_warn(bp->dev, "TPA packet without valid hash\n"); |
1570 |
++ netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n"); |
1571 |
+ } |
1572 |
+ tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); |
1573 |
+ tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); |
1574 |
+@@ -2081,10 +2081,9 @@ static int bnxt_async_event_process(struct bnxt *bp, |
1575 |
+ goto async_event_process_exit; |
1576 |
+ set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); |
1577 |
+ break; |
1578 |
+- case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: |
1579 |
+- if (netif_msg_hw(bp)) |
1580 |
+- netdev_warn(bp->dev, "Received RESET_NOTIFY event, data1: 0x%x, data2: 0x%x\n", |
1581 |
+- data1, data2); |
1582 |
++ case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: { |
1583 |
++ char *fatal_str = "non-fatal"; |
1584 |
++ |
1585 |
+ if (!bp->fw_health) |
1586 |
+ goto async_event_process_exit; |
1587 |
+ |
1588 |
+@@ -2096,42 +2095,57 @@ static int bnxt_async_event_process(struct bnxt *bp, |
1589 |
+ if (!bp->fw_reset_max_dsecs) |
1590 |
+ bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS; |
1591 |
+ if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) { |
1592 |
+- netdev_warn(bp->dev, "Firmware fatal reset event received\n"); |
1593 |
++ fatal_str = "fatal"; |
1594 |
+ set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); |
1595 |
+- } else { |
1596 |
+- netdev_warn(bp->dev, "Firmware non-fatal reset event received, max wait time %d msec\n", |
1597 |
+- bp->fw_reset_max_dsecs * 100); |
1598 |
+ } |
1599 |
++ netif_warn(bp, hw, bp->dev, |
1600 |
++ "Firmware %s reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n", |
1601 |
++ fatal_str, data1, data2, |
1602 |
++ bp->fw_reset_min_dsecs * 100, |
1603 |
++ bp->fw_reset_max_dsecs * 100); |
1604 |
+ set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event); |
1605 |
+ break; |
1606 |
++ } |
1607 |
+ case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: { |
1608 |
+ struct bnxt_fw_health *fw_health = bp->fw_health; |
1609 |
+ |
1610 |
+ if (!fw_health) |
1611 |
+ goto async_event_process_exit; |
1612 |
+ |
1613 |
+- fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1); |
1614 |
+- fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1); |
1615 |
+- if (!fw_health->enabled) |
1616 |
++ if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) { |
1617 |
++ fw_health->enabled = false; |
1618 |
++ netif_info(bp, drv, bp->dev, |
1619 |
++ "Error recovery info: error recovery[0]\n"); |
1620 |
+ break; |
1621 |
+- |
1622 |
+- if (netif_msg_drv(bp)) |
1623 |
+- netdev_info(bp->dev, "Error recovery info: error recovery[%d], master[%d], reset count[0x%x], health status: 0x%x\n", |
1624 |
+- fw_health->enabled, fw_health->master, |
1625 |
+- bnxt_fw_health_readl(bp, |
1626 |
+- BNXT_FW_RESET_CNT_REG), |
1627 |
+- bnxt_fw_health_readl(bp, |
1628 |
+- BNXT_FW_HEALTH_REG)); |
1629 |
++ } |
1630 |
++ fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1); |
1631 |
+ fw_health->tmr_multiplier = |
1632 |
+ DIV_ROUND_UP(fw_health->polling_dsecs * HZ, |
1633 |
+ bp->current_interval * 10); |
1634 |
+ fw_health->tmr_counter = fw_health->tmr_multiplier; |
1635 |
+- fw_health->last_fw_heartbeat = |
1636 |
+- bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); |
1637 |
++ if (!fw_health->enabled) |
1638 |
++ fw_health->last_fw_heartbeat = |
1639 |
++ bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); |
1640 |
+ fw_health->last_fw_reset_cnt = |
1641 |
+ bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); |
1642 |
++ netif_info(bp, drv, bp->dev, |
1643 |
++ "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n", |
1644 |
++ fw_health->master, fw_health->last_fw_reset_cnt, |
1645 |
++ bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG)); |
1646 |
++ if (!fw_health->enabled) { |
1647 |
++ /* Make sure tmr_counter is set and visible to |
1648 |
++ * bnxt_health_check() before setting enabled to true. |
1649 |
++ */ |
1650 |
++ smp_wmb(); |
1651 |
++ fw_health->enabled = true; |
1652 |
++ } |
1653 |
+ goto async_event_process_exit; |
1654 |
+ } |
1655 |
++ case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION: |
1656 |
++ netif_notice(bp, hw, bp->dev, |
1657 |
++ "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n", |
1658 |
++ data1, data2); |
1659 |
++ goto async_event_process_exit; |
1660 |
+ case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: { |
1661 |
+ struct bnxt_rx_ring_info *rxr; |
1662 |
+ u16 grp_idx; |
1663 |
+@@ -2591,6 +2605,9 @@ static void bnxt_free_tx_skbs(struct bnxt *bp) |
1664 |
+ struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; |
1665 |
+ int j; |
1666 |
+ |
1667 |
++ if (!txr->tx_buf_ring) |
1668 |
++ continue; |
1669 |
++ |
1670 |
+ for (j = 0; j < max_idx;) { |
1671 |
+ struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; |
1672 |
+ struct sk_buff *skb; |
1673 |
+@@ -2675,6 +2692,9 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) |
1674 |
+ } |
1675 |
+ |
1676 |
+ skip_rx_tpa_free: |
1677 |
++ if (!rxr->rx_buf_ring) |
1678 |
++ goto skip_rx_buf_free; |
1679 |
++ |
1680 |
+ for (i = 0; i < max_idx; i++) { |
1681 |
+ struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; |
1682 |
+ dma_addr_t mapping = rx_buf->mapping; |
1683 |
+@@ -2697,6 +2717,11 @@ skip_rx_tpa_free: |
1684 |
+ kfree(data); |
1685 |
+ } |
1686 |
+ } |
1687 |
++ |
1688 |
++skip_rx_buf_free: |
1689 |
++ if (!rxr->rx_agg_ring) |
1690 |
++ goto skip_rx_agg_free; |
1691 |
++ |
1692 |
+ for (i = 0; i < max_agg_idx; i++) { |
1693 |
+ struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i]; |
1694 |
+ struct page *page = rx_agg_buf->page; |
1695 |
+@@ -2713,6 +2738,8 @@ skip_rx_tpa_free: |
1696 |
+ |
1697 |
+ __free_page(page); |
1698 |
+ } |
1699 |
++ |
1700 |
++skip_rx_agg_free: |
1701 |
+ if (rxr->rx_page) { |
1702 |
+ __free_page(rxr->rx_page); |
1703 |
+ rxr->rx_page = NULL; |
1704 |
+@@ -10719,6 +10746,8 @@ static void bnxt_fw_health_check(struct bnxt *bp) |
1705 |
+ if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) |
1706 |
+ return; |
1707 |
+ |
1708 |
++ /* Make sure it is enabled before checking the tmr_counter. */ |
1709 |
++ smp_rmb(); |
1710 |
+ if (fw_health->tmr_counter) { |
1711 |
+ fw_health->tmr_counter--; |
1712 |
+ return; |
1713 |
+@@ -11623,6 +11652,11 @@ static void bnxt_fw_reset_task(struct work_struct *work) |
1714 |
+ dev_close(bp->dev); |
1715 |
+ } |
1716 |
+ |
1717 |
++ if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && |
1718 |
++ bp->fw_health->enabled) { |
1719 |
++ bp->fw_health->last_fw_reset_cnt = |
1720 |
++ bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); |
1721 |
++ } |
1722 |
+ bp->fw_reset_state = 0; |
1723 |
+ /* Make sure fw_reset_state is 0 before clearing the flag */ |
1724 |
+ smp_mb__before_atomic(); |
1725 |
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c |
1726 |
+index 8b0e916afe6b1..e2fd625fc6d20 100644 |
1727 |
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c |
1728 |
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c |
1729 |
+@@ -452,7 +452,7 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req, |
1730 |
+ return rc; |
1731 |
+ |
1732 |
+ ver_resp = &bp->ver_resp; |
1733 |
+- sprintf(buf, "%X", ver_resp->chip_rev); |
1734 |
++ sprintf(buf, "%c%d", 'A' + ver_resp->chip_rev, ver_resp->chip_metal); |
1735 |
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_FIXED, |
1736 |
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_REV, buf); |
1737 |
+ if (rc) |
1738 |
+@@ -474,8 +474,8 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req, |
1739 |
+ if (BNXT_PF(bp) && !bnxt_hwrm_get_nvm_cfg_ver(bp, &nvm_cfg_ver)) { |
1740 |
+ u32 ver = nvm_cfg_ver.vu32; |
1741 |
+ |
1742 |
+- sprintf(buf, "%d.%d.%d", (ver >> 16) & 0xf, (ver >> 8) & 0xf, |
1743 |
+- ver & 0xf); |
1744 |
++ sprintf(buf, "%d.%d.%d", (ver >> 16) & 0xff, (ver >> 8) & 0xff, |
1745 |
++ ver & 0xff); |
1746 |
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED, |
1747 |
+ DEVLINK_INFO_VERSION_GENERIC_FW_PSID, |
1748 |
+ buf); |
1749 |
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c |
1750 |
+index 5e4429b14b8ca..2186706cf9130 100644 |
1751 |
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c |
1752 |
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c |
1753 |
+@@ -1870,9 +1870,6 @@ bnxt_tc_indr_block_cb_lookup(struct bnxt *bp, struct net_device *netdev) |
1754 |
+ { |
1755 |
+ struct bnxt_flower_indr_block_cb_priv *cb_priv; |
1756 |
+ |
1757 |
+- /* All callback list access should be protected by RTNL. */ |
1758 |
+- ASSERT_RTNL(); |
1759 |
+- |
1760 |
+ list_for_each_entry(cb_priv, &bp->tc_indr_block_list, list) |
1761 |
+ if (cb_priv->tunnel_netdev == netdev) |
1762 |
+ return cb_priv; |
1763 |
+diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c |
1764 |
+index 0e4a0f413960a..c6db85fe16291 100644 |
1765 |
+--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c |
1766 |
++++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c |
1767 |
+@@ -1153,6 +1153,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
1768 |
+ if (!adapter->registered_device_map) { |
1769 |
+ pr_err("%s: could not register any net devices\n", |
1770 |
+ pci_name(pdev)); |
1771 |
++ err = -EINVAL; |
1772 |
+ goto out_release_adapter_res; |
1773 |
+ } |
1774 |
+ |
1775 |
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c |
1776 |
+index 92ca3b21968fe..936b9cfe1a62f 100644 |
1777 |
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c |
1778 |
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c |
1779 |
+@@ -60,6 +60,7 @@ MODULE_PARM_DESC(debug, " Network interface message level setting"); |
1780 |
+ #define HNS3_OUTER_VLAN_TAG 2 |
1781 |
+ |
1782 |
+ #define HNS3_MIN_TX_LEN 33U |
1783 |
++#define HNS3_MIN_TUN_PKT_LEN 65U |
1784 |
+ |
1785 |
+ /* hns3_pci_tbl - PCI Device ID Table |
1786 |
+ * |
1787 |
+@@ -913,8 +914,11 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, |
1788 |
+ l4.tcp->doff); |
1789 |
+ break; |
1790 |
+ case IPPROTO_UDP: |
1791 |
+- if (hns3_tunnel_csum_bug(skb)) |
1792 |
+- return skb_checksum_help(skb); |
1793 |
++ if (hns3_tunnel_csum_bug(skb)) { |
1794 |
++ int ret = skb_put_padto(skb, HNS3_MIN_TUN_PKT_LEN); |
1795 |
++ |
1796 |
++ return ret ? ret : skb_checksum_help(skb); |
1797 |
++ } |
1798 |
+ |
1799 |
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); |
1800 |
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, |
1801 |
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c |
1802 |
+index 2261de5caf863..59ec538eba1f0 100644 |
1803 |
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c |
1804 |
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c |
1805 |
+@@ -1463,9 +1463,10 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev) |
1806 |
+ |
1807 |
+ static int hclge_configure(struct hclge_dev *hdev) |
1808 |
+ { |
1809 |
++ const struct cpumask *cpumask = cpu_online_mask; |
1810 |
+ struct hclge_cfg cfg; |
1811 |
+ unsigned int i; |
1812 |
+- int ret; |
1813 |
++ int node, ret; |
1814 |
+ |
1815 |
+ ret = hclge_get_cfg(hdev, &cfg); |
1816 |
+ if (ret) |
1817 |
+@@ -1526,11 +1527,12 @@ static int hclge_configure(struct hclge_dev *hdev) |
1818 |
+ |
1819 |
+ hclge_init_kdump_kernel_config(hdev); |
1820 |
+ |
1821 |
+- /* Set the init affinity based on pci func number */ |
1822 |
+- i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev))); |
1823 |
+- i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0; |
1824 |
+- cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)), |
1825 |
+- &hdev->affinity_mask); |
1826 |
++ /* Set the affinity based on numa node */ |
1827 |
++ node = dev_to_node(&hdev->pdev->dev); |
1828 |
++ if (node != NUMA_NO_NODE) |
1829 |
++ cpumask = cpumask_of_node(node); |
1830 |
++ |
1831 |
++ cpumask_copy(&hdev->affinity_mask, cpumask); |
1832 |
+ |
1833 |
+ return ret; |
1834 |
+ } |
1835 |
+@@ -7003,11 +7005,12 @@ static void hclge_ae_stop(struct hnae3_handle *handle) |
1836 |
+ hclge_clear_arfs_rules(handle); |
1837 |
+ spin_unlock_bh(&hdev->fd_rule_lock); |
1838 |
+ |
1839 |
+- /* If it is not PF reset, the firmware will disable the MAC, |
1840 |
++ /* If it is not PF reset or FLR, the firmware will disable the MAC, |
1841 |
+ * so it only need to stop phy here. |
1842 |
+ */ |
1843 |
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && |
1844 |
+- hdev->reset_type != HNAE3_FUNC_RESET) { |
1845 |
++ hdev->reset_type != HNAE3_FUNC_RESET && |
1846 |
++ hdev->reset_type != HNAE3_FLR_RESET) { |
1847 |
+ hclge_mac_stop_phy(hdev); |
1848 |
+ hclge_update_link_status(hdev); |
1849 |
+ return; |
1850 |
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c |
1851 |
+index d3010d5ab3665..447457cacf973 100644 |
1852 |
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c |
1853 |
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c |
1854 |
+@@ -2352,6 +2352,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) |
1855 |
+ |
1856 |
+ hclgevf_enable_vector(&hdev->misc_vector, false); |
1857 |
+ event_cause = hclgevf_check_evt_cause(hdev, &clearval); |
1858 |
++ if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) |
1859 |
++ hclgevf_clear_event_cause(hdev, clearval); |
1860 |
+ |
1861 |
+ switch (event_cause) { |
1862 |
+ case HCLGEVF_VECTOR0_EVENT_RST: |
1863 |
+@@ -2364,10 +2366,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) |
1864 |
+ break; |
1865 |
+ } |
1866 |
+ |
1867 |
+- if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { |
1868 |
+- hclgevf_clear_event_cause(hdev, clearval); |
1869 |
++ if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) |
1870 |
+ hclgevf_enable_vector(&hdev->misc_vector, true); |
1871 |
+- } |
1872 |
+ |
1873 |
+ return IRQ_HANDLED; |
1874 |
+ } |
1875 |
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c |
1876 |
+index 3134c1988db36..bb8d0a0f48ee0 100644 |
1877 |
+--- a/drivers/net/ethernet/ibm/ibmvnic.c |
1878 |
++++ b/drivers/net/ethernet/ibm/ibmvnic.c |
1879 |
+@@ -4478,6 +4478,14 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, |
1880 |
+ return 0; |
1881 |
+ } |
1882 |
+ |
1883 |
++ if (adapter->failover_pending) { |
1884 |
++ adapter->init_done_rc = -EAGAIN; |
1885 |
++ netdev_dbg(netdev, "Failover pending, ignoring login response\n"); |
1886 |
++ complete(&adapter->init_done); |
1887 |
++ /* login response buffer will be released on reset */ |
1888 |
++ return 0; |
1889 |
++ } |
1890 |
++ |
1891 |
+ netdev->mtu = adapter->req_mtu - ETH_HLEN; |
1892 |
+ |
1893 |
+ netdev_dbg(adapter->netdev, "Login Response Buffer:\n"); |
1894 |
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c |
1895 |
+index 644d28b0692b3..c26652436c53a 100644 |
1896 |
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c |
1897 |
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c |
1898 |
+@@ -84,7 +84,8 @@ static void rvu_setup_hw_capabilities(struct rvu *rvu) |
1899 |
+ */ |
1900 |
+ int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero) |
1901 |
+ { |
1902 |
+- unsigned long timeout = jiffies + usecs_to_jiffies(10000); |
1903 |
++ unsigned long timeout = jiffies + usecs_to_jiffies(20000); |
1904 |
++ bool twice = false; |
1905 |
+ void __iomem *reg; |
1906 |
+ u64 reg_val; |
1907 |
+ |
1908 |
+@@ -99,6 +100,15 @@ again: |
1909 |
+ usleep_range(1, 5); |
1910 |
+ goto again; |
1911 |
+ } |
1912 |
++ /* In scenarios where CPU is scheduled out before checking |
1913 |
++ * 'time_before' (above) and gets scheduled in such that |
1914 |
++ * jiffies are beyond timeout value, then check again if HW is |
1915 |
++ * done with the operation in the meantime. |
1916 |
++ */ |
1917 |
++ if (!twice) { |
1918 |
++ twice = true; |
1919 |
++ goto again; |
1920 |
++ } |
1921 |
+ return -EBUSY; |
1922 |
+ } |
1923 |
+ |
1924 |
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c |
1925 |
+index 3dfcb20e97c6f..857be86b4a11a 100644 |
1926 |
+--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c |
1927 |
++++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c |
1928 |
+@@ -1007,7 +1007,7 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer) |
1929 |
+ err = mlx5_core_alloc_pd(dev, &tracer->buff.pdn); |
1930 |
+ if (err) { |
1931 |
+ mlx5_core_warn(dev, "FWTracer: Failed to allocate PD %d\n", err); |
1932 |
+- return err; |
1933 |
++ goto err_cancel_work; |
1934 |
+ } |
1935 |
+ |
1936 |
+ err = mlx5_fw_tracer_create_mkey(tracer); |
1937 |
+@@ -1031,6 +1031,7 @@ err_notifier_unregister: |
1938 |
+ mlx5_core_destroy_mkey(dev, &tracer->buff.mkey); |
1939 |
+ err_dealloc_pd: |
1940 |
+ mlx5_core_dealloc_pd(dev, tracer->buff.pdn); |
1941 |
++err_cancel_work: |
1942 |
+ cancel_work_sync(&tracer->read_fw_strings_work); |
1943 |
+ return err; |
1944 |
+ } |
1945 |
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c |
1946 |
+index e6f782743fbe8..2fdea05eec1de 100644 |
1947 |
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c |
1948 |
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c |
1949 |
+@@ -298,9 +298,6 @@ mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv, |
1950 |
+ { |
1951 |
+ struct mlx5e_rep_indr_block_priv *cb_priv; |
1952 |
+ |
1953 |
+- /* All callback list access should be protected by RTNL. */ |
1954 |
+- ASSERT_RTNL(); |
1955 |
+- |
1956 |
+ list_for_each_entry(cb_priv, |
1957 |
+ &rpriv->uplink_priv.tc_indr_block_priv_list, |
1958 |
+ list) |
1959 |
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c |
1960 |
+index 1d4b4e6f6fb41..0ff034b0866e2 100644 |
1961 |
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c |
1962 |
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c |
1963 |
+@@ -1675,14 +1675,13 @@ static int build_match_list(struct match_list *match_head, |
1964 |
+ |
1965 |
+ curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC); |
1966 |
+ if (!curr_match) { |
1967 |
++ rcu_read_unlock(); |
1968 |
+ free_match_list(match_head, ft_locked); |
1969 |
+- err = -ENOMEM; |
1970 |
+- goto out; |
1971 |
++ return -ENOMEM; |
1972 |
+ } |
1973 |
+ curr_match->g = g; |
1974 |
+ list_add_tail(&curr_match->list, &match_head->list); |
1975 |
+ } |
1976 |
+-out: |
1977 |
+ rcu_read_unlock(); |
1978 |
+ return err; |
1979 |
+ } |
1980 |
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c |
1981 |
+index e95969c462e46..3f34e6da72958 100644 |
1982 |
+--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c |
1983 |
++++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c |
1984 |
+@@ -1732,9 +1732,6 @@ nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app, |
1985 |
+ struct nfp_flower_indr_block_cb_priv *cb_priv; |
1986 |
+ struct nfp_flower_priv *priv = app->priv; |
1987 |
+ |
1988 |
+- /* All callback list access should be protected by RTNL. */ |
1989 |
+- ASSERT_RTNL(); |
1990 |
+- |
1991 |
+ list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list) |
1992 |
+ if (cb_priv->netdev == netdev) |
1993 |
+ return cb_priv; |
1994 |
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c |
1995 |
+index caeef25c89bb1..2cd14ee95c1ff 100644 |
1996 |
+--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c |
1997 |
++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c |
1998 |
+@@ -3376,6 +3376,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, |
1999 |
+ struct qed_nvm_image_att *p_image_att) |
2000 |
+ { |
2001 |
+ enum nvm_image_type type; |
2002 |
++ int rc; |
2003 |
+ u32 i; |
2004 |
+ |
2005 |
+ /* Translate image_id into MFW definitions */ |
2006 |
+@@ -3404,7 +3405,10 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, |
2007 |
+ return -EINVAL; |
2008 |
+ } |
2009 |
+ |
2010 |
+- qed_mcp_nvm_info_populate(p_hwfn); |
2011 |
++ rc = qed_mcp_nvm_info_populate(p_hwfn); |
2012 |
++ if (rc) |
2013 |
++ return rc; |
2014 |
++ |
2015 |
+ for (i = 0; i < p_hwfn->nvm_info.num_images; i++) |
2016 |
+ if (type == p_hwfn->nvm_info.image_att[i].image_type) |
2017 |
+ break; |
2018 |
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c |
2019 |
+index e6784023bce42..aa7ee43f92525 100644 |
2020 |
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c |
2021 |
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c |
2022 |
+@@ -439,7 +439,6 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter) |
2023 |
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c, 1); |
2024 |
+ msleep(20); |
2025 |
+ |
2026 |
+- qlcnic_rom_unlock(adapter); |
2027 |
+ /* big hammer don't reset CAM block on reset */ |
2028 |
+ QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff); |
2029 |
+ |
2030 |
+diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c |
2031 |
+index 7c74318620b1d..ccdfa930130bc 100644 |
2032 |
+--- a/drivers/net/ethernet/rdc/r6040.c |
2033 |
++++ b/drivers/net/ethernet/rdc/r6040.c |
2034 |
+@@ -119,6 +119,8 @@ |
2035 |
+ #define PHY_ST 0x8A /* PHY status register */ |
2036 |
+ #define MAC_SM 0xAC /* MAC status machine */ |
2037 |
+ #define MAC_SM_RST 0x0002 /* MAC status machine reset */ |
2038 |
++#define MD_CSC 0xb6 /* MDC speed control register */ |
2039 |
++#define MD_CSC_DEFAULT 0x0030 |
2040 |
+ #define MAC_ID 0xBE /* Identifier register */ |
2041 |
+ |
2042 |
+ #define TX_DCNT 0x80 /* TX descriptor count */ |
2043 |
+@@ -355,8 +357,9 @@ static void r6040_reset_mac(struct r6040_private *lp) |
2044 |
+ { |
2045 |
+ void __iomem *ioaddr = lp->base; |
2046 |
+ int limit = MAC_DEF_TIMEOUT; |
2047 |
+- u16 cmd; |
2048 |
++ u16 cmd, md_csc; |
2049 |
+ |
2050 |
++ md_csc = ioread16(ioaddr + MD_CSC); |
2051 |
+ iowrite16(MAC_RST, ioaddr + MCR1); |
2052 |
+ while (limit--) { |
2053 |
+ cmd = ioread16(ioaddr + MCR1); |
2054 |
+@@ -368,6 +371,10 @@ static void r6040_reset_mac(struct r6040_private *lp) |
2055 |
+ iowrite16(MAC_SM_RST, ioaddr + MAC_SM); |
2056 |
+ iowrite16(0, ioaddr + MAC_SM); |
2057 |
+ mdelay(5); |
2058 |
++ |
2059 |
++ /* Restore MDIO clock frequency */ |
2060 |
++ if (md_csc != MD_CSC_DEFAULT) |
2061 |
++ iowrite16(md_csc, ioaddr + MD_CSC); |
2062 |
+ } |
2063 |
+ |
2064 |
+ static void r6040_init_mac_regs(struct net_device *dev) |
2065 |
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c |
2066 |
+index 5cab2d3c00236..8927d59977458 100644 |
2067 |
+--- a/drivers/net/ethernet/renesas/sh_eth.c |
2068 |
++++ b/drivers/net/ethernet/renesas/sh_eth.c |
2069 |
+@@ -2533,6 +2533,7 @@ static netdev_tx_t sh_eth_start_xmit(struct sk_buff *skb, |
2070 |
+ else |
2071 |
+ txdesc->status |= cpu_to_le32(TD_TACT); |
2072 |
+ |
2073 |
++ wmb(); /* cur_tx must be incremented after TACT bit was set */ |
2074 |
+ mdp->cur_tx++; |
2075 |
+ |
2076 |
+ if (!(sh_eth_read(ndev, EDTRR) & mdp->cd->edtrr_trns)) |
2077 |
+diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c |
2078 |
+index b3790aa952a15..0747866d60abc 100644 |
2079 |
+--- a/drivers/net/ipa/ipa_table.c |
2080 |
++++ b/drivers/net/ipa/ipa_table.c |
2081 |
+@@ -451,7 +451,8 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter, |
2082 |
+ * table region determines the number of entries it has. |
2083 |
+ */ |
2084 |
+ if (filter) { |
2085 |
+- count = hweight32(ipa->filter_map); |
2086 |
++ /* Include one extra "slot" to hold the filter map itself */ |
2087 |
++ count = 1 + hweight32(ipa->filter_map); |
2088 |
+ hash_count = hash_mem->size ? count : 0; |
2089 |
+ } else { |
2090 |
+ count = mem->size / IPA_TABLE_ENTRY_SIZE; |
2091 |
+diff --git a/drivers/net/phy/dp83640_reg.h b/drivers/net/phy/dp83640_reg.h |
2092 |
+index 21aa24c741b96..daae7fa58fb82 100644 |
2093 |
+--- a/drivers/net/phy/dp83640_reg.h |
2094 |
++++ b/drivers/net/phy/dp83640_reg.h |
2095 |
+@@ -5,7 +5,7 @@ |
2096 |
+ #ifndef HAVE_DP83640_REGISTERS |
2097 |
+ #define HAVE_DP83640_REGISTERS |
2098 |
+ |
2099 |
+-#define PAGE0 0x0000 |
2100 |
++/* #define PAGE0 0x0000 */ |
2101 |
+ #define PHYCR2 0x001c /* PHY Control Register 2 */ |
2102 |
+ |
2103 |
+ #define PAGE4 0x0004 |
2104 |
+diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c |
2105 |
+index eb100eb33de3d..77ac5a721e7b6 100644 |
2106 |
+--- a/drivers/net/usb/cdc_mbim.c |
2107 |
++++ b/drivers/net/usb/cdc_mbim.c |
2108 |
+@@ -653,6 +653,11 @@ static const struct usb_device_id mbim_devs[] = { |
2109 |
+ .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle, |
2110 |
+ }, |
2111 |
+ |
2112 |
++ /* Telit LN920 */ |
2113 |
++ { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1061, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), |
2114 |
++ .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle, |
2115 |
++ }, |
2116 |
++ |
2117 |
+ /* default entry */ |
2118 |
+ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), |
2119 |
+ .driver_info = (unsigned long)&cdc_mbim_info_zlp, |
2120 |
+diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c |
2121 |
+index 5b3aff2c279f7..f269337c82c58 100644 |
2122 |
+--- a/drivers/net/usb/hso.c |
2123 |
++++ b/drivers/net/usb/hso.c |
2124 |
+@@ -2537,13 +2537,17 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface, |
2125 |
+ if (!hso_net->mux_bulk_tx_buf) |
2126 |
+ goto err_free_tx_urb; |
2127 |
+ |
2128 |
+- add_net_device(hso_dev); |
2129 |
++ result = add_net_device(hso_dev); |
2130 |
++ if (result) { |
2131 |
++ dev_err(&interface->dev, "Failed to add net device\n"); |
2132 |
++ goto err_free_tx_buf; |
2133 |
++ } |
2134 |
+ |
2135 |
+ /* registering our net device */ |
2136 |
+ result = register_netdev(net); |
2137 |
+ if (result) { |
2138 |
+ dev_err(&interface->dev, "Failed to register device\n"); |
2139 |
+- goto err_free_tx_buf; |
2140 |
++ goto err_rmv_ndev; |
2141 |
+ } |
2142 |
+ |
2143 |
+ hso_log_port(hso_dev); |
2144 |
+@@ -2552,8 +2556,9 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface, |
2145 |
+ |
2146 |
+ return hso_dev; |
2147 |
+ |
2148 |
+-err_free_tx_buf: |
2149 |
++err_rmv_ndev: |
2150 |
+ remove_net_device(hso_dev); |
2151 |
++err_free_tx_buf: |
2152 |
+ kfree(hso_net->mux_bulk_tx_buf); |
2153 |
+ err_free_tx_urb: |
2154 |
+ usb_free_urb(hso_net->mux_bulk_tx_urb); |
2155 |
+diff --git a/drivers/ntb/test/ntb_msi_test.c b/drivers/ntb/test/ntb_msi_test.c |
2156 |
+index 7095ecd6223a7..4e18e08776c98 100644 |
2157 |
+--- a/drivers/ntb/test/ntb_msi_test.c |
2158 |
++++ b/drivers/ntb/test/ntb_msi_test.c |
2159 |
+@@ -369,8 +369,10 @@ static int ntb_msit_probe(struct ntb_client *client, struct ntb_dev *ntb) |
2160 |
+ if (ret) |
2161 |
+ goto remove_dbgfs; |
2162 |
+ |
2163 |
+- if (!nm->isr_ctx) |
2164 |
++ if (!nm->isr_ctx) { |
2165 |
++ ret = -ENOMEM; |
2166 |
+ goto remove_dbgfs; |
2167 |
++ } |
2168 |
+ |
2169 |
+ ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); |
2170 |
+ |
2171 |
+diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c |
2172 |
+index 89df1350fefd8..65e1e5cf1b29a 100644 |
2173 |
+--- a/drivers/ntb/test/ntb_perf.c |
2174 |
++++ b/drivers/ntb/test/ntb_perf.c |
2175 |
+@@ -598,6 +598,7 @@ static int perf_setup_inbuf(struct perf_peer *peer) |
2176 |
+ return -ENOMEM; |
2177 |
+ } |
2178 |
+ if (!IS_ALIGNED(peer->inbuf_xlat, xlat_align)) { |
2179 |
++ ret = -EINVAL; |
2180 |
+ dev_err(&perf->ntb->dev, "Unaligned inbuf allocated\n"); |
2181 |
+ goto err_free_inbuf; |
2182 |
+ } |
2183 |
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c |
2184 |
+index c9a925999c6ea..a6b3b07627630 100644 |
2185 |
+--- a/drivers/nvme/host/tcp.c |
2186 |
++++ b/drivers/nvme/host/tcp.c |
2187 |
+@@ -273,6 +273,12 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue) |
2188 |
+ } while (ret > 0); |
2189 |
+ } |
2190 |
+ |
2191 |
++static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue) |
2192 |
++{ |
2193 |
++ return !list_empty(&queue->send_list) || |
2194 |
++ !llist_empty(&queue->req_list) || queue->more_requests; |
2195 |
++} |
2196 |
++ |
2197 |
+ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, |
2198 |
+ bool sync, bool last) |
2199 |
+ { |
2200 |
+@@ -293,9 +299,10 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, |
2201 |
+ nvme_tcp_send_all(queue); |
2202 |
+ queue->more_requests = false; |
2203 |
+ mutex_unlock(&queue->send_mutex); |
2204 |
+- } else if (last) { |
2205 |
+- queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); |
2206 |
+ } |
2207 |
++ |
2208 |
++ if (last && nvme_tcp_queue_more(queue)) |
2209 |
++ queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); |
2210 |
+ } |
2211 |
+ |
2212 |
+ static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue) |
2213 |
+@@ -890,12 +897,6 @@ done: |
2214 |
+ read_unlock_bh(&sk->sk_callback_lock); |
2215 |
+ } |
2216 |
+ |
2217 |
+-static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue) |
2218 |
+-{ |
2219 |
+- return !list_empty(&queue->send_list) || |
2220 |
+- !llist_empty(&queue->req_list) || queue->more_requests; |
2221 |
+-} |
2222 |
+- |
2223 |
+ static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue) |
2224 |
+ { |
2225 |
+ queue->request = NULL; |
2226 |
+@@ -1132,8 +1133,7 @@ static void nvme_tcp_io_work(struct work_struct *w) |
2227 |
+ pending = true; |
2228 |
+ else if (unlikely(result < 0)) |
2229 |
+ break; |
2230 |
+- } else |
2231 |
+- pending = !llist_empty(&queue->req_list); |
2232 |
++ } |
2233 |
+ |
2234 |
+ result = nvme_tcp_try_recv(queue); |
2235 |
+ if (result > 0) |
2236 |
+diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c |
2237 |
+index d34ca0fda0f66..8a6d68e13f301 100644 |
2238 |
+--- a/drivers/pci/controller/cadence/pci-j721e.c |
2239 |
++++ b/drivers/pci/controller/cadence/pci-j721e.c |
2240 |
+@@ -25,6 +25,7 @@ |
2241 |
+ #define STATUS_REG_SYS_2 0x508 |
2242 |
+ #define STATUS_CLR_REG_SYS_2 0x708 |
2243 |
+ #define LINK_DOWN BIT(1) |
2244 |
++#define J7200_LINK_DOWN BIT(10) |
2245 |
+ |
2246 |
+ #define J721E_PCIE_USER_CMD_STATUS 0x4 |
2247 |
+ #define LINK_TRAINING_ENABLE BIT(0) |
2248 |
+@@ -54,6 +55,7 @@ struct j721e_pcie { |
2249 |
+ struct cdns_pcie *cdns_pcie; |
2250 |
+ void __iomem *user_cfg_base; |
2251 |
+ void __iomem *intd_cfg_base; |
2252 |
++ u32 linkdown_irq_regfield; |
2253 |
+ }; |
2254 |
+ |
2255 |
+ enum j721e_pcie_mode { |
2256 |
+@@ -63,7 +65,10 @@ enum j721e_pcie_mode { |
2257 |
+ |
2258 |
+ struct j721e_pcie_data { |
2259 |
+ enum j721e_pcie_mode mode; |
2260 |
+- bool quirk_retrain_flag; |
2261 |
++ unsigned int quirk_retrain_flag:1; |
2262 |
++ unsigned int quirk_detect_quiet_flag:1; |
2263 |
++ u32 linkdown_irq_regfield; |
2264 |
++ unsigned int byte_access_allowed:1; |
2265 |
+ }; |
2266 |
+ |
2267 |
+ static inline u32 j721e_pcie_user_readl(struct j721e_pcie *pcie, u32 offset) |
2268 |
+@@ -95,12 +100,12 @@ static irqreturn_t j721e_pcie_link_irq_handler(int irq, void *priv) |
2269 |
+ u32 reg; |
2270 |
+ |
2271 |
+ reg = j721e_pcie_intd_readl(pcie, STATUS_REG_SYS_2); |
2272 |
+- if (!(reg & LINK_DOWN)) |
2273 |
++ if (!(reg & pcie->linkdown_irq_regfield)) |
2274 |
+ return IRQ_NONE; |
2275 |
+ |
2276 |
+ dev_err(dev, "LINK DOWN!\n"); |
2277 |
+ |
2278 |
+- j721e_pcie_intd_writel(pcie, STATUS_CLR_REG_SYS_2, LINK_DOWN); |
2279 |
++ j721e_pcie_intd_writel(pcie, STATUS_CLR_REG_SYS_2, pcie->linkdown_irq_regfield); |
2280 |
+ return IRQ_HANDLED; |
2281 |
+ } |
2282 |
+ |
2283 |
+@@ -109,7 +114,7 @@ static void j721e_pcie_config_link_irq(struct j721e_pcie *pcie) |
2284 |
+ u32 reg; |
2285 |
+ |
2286 |
+ reg = j721e_pcie_intd_readl(pcie, ENABLE_REG_SYS_2); |
2287 |
+- reg |= LINK_DOWN; |
2288 |
++ reg |= pcie->linkdown_irq_regfield; |
2289 |
+ j721e_pcie_intd_writel(pcie, ENABLE_REG_SYS_2, reg); |
2290 |
+ } |
2291 |
+ |
2292 |
+@@ -272,10 +277,36 @@ static struct pci_ops cdns_ti_pcie_host_ops = { |
2293 |
+ static const struct j721e_pcie_data j721e_pcie_rc_data = { |
2294 |
+ .mode = PCI_MODE_RC, |
2295 |
+ .quirk_retrain_flag = true, |
2296 |
++ .byte_access_allowed = false, |
2297 |
++ .linkdown_irq_regfield = LINK_DOWN, |
2298 |
+ }; |
2299 |
+ |
2300 |
+ static const struct j721e_pcie_data j721e_pcie_ep_data = { |
2301 |
+ .mode = PCI_MODE_EP, |
2302 |
++ .linkdown_irq_regfield = LINK_DOWN, |
2303 |
++}; |
2304 |
++ |
2305 |
++static const struct j721e_pcie_data j7200_pcie_rc_data = { |
2306 |
++ .mode = PCI_MODE_RC, |
2307 |
++ .quirk_detect_quiet_flag = true, |
2308 |
++ .linkdown_irq_regfield = J7200_LINK_DOWN, |
2309 |
++ .byte_access_allowed = true, |
2310 |
++}; |
2311 |
++ |
2312 |
++static const struct j721e_pcie_data j7200_pcie_ep_data = { |
2313 |
++ .mode = PCI_MODE_EP, |
2314 |
++ .quirk_detect_quiet_flag = true, |
2315 |
++}; |
2316 |
++ |
2317 |
++static const struct j721e_pcie_data am64_pcie_rc_data = { |
2318 |
++ .mode = PCI_MODE_RC, |
2319 |
++ .linkdown_irq_regfield = J7200_LINK_DOWN, |
2320 |
++ .byte_access_allowed = true, |
2321 |
++}; |
2322 |
++ |
2323 |
++static const struct j721e_pcie_data am64_pcie_ep_data = { |
2324 |
++ .mode = PCI_MODE_EP, |
2325 |
++ .linkdown_irq_regfield = J7200_LINK_DOWN, |
2326 |
+ }; |
2327 |
+ |
2328 |
+ static const struct of_device_id of_j721e_pcie_match[] = { |
2329 |
+@@ -287,6 +318,22 @@ static const struct of_device_id of_j721e_pcie_match[] = { |
2330 |
+ .compatible = "ti,j721e-pcie-ep", |
2331 |
+ .data = &j721e_pcie_ep_data, |
2332 |
+ }, |
2333 |
++ { |
2334 |
++ .compatible = "ti,j7200-pcie-host", |
2335 |
++ .data = &j7200_pcie_rc_data, |
2336 |
++ }, |
2337 |
++ { |
2338 |
++ .compatible = "ti,j7200-pcie-ep", |
2339 |
++ .data = &j7200_pcie_ep_data, |
2340 |
++ }, |
2341 |
++ { |
2342 |
++ .compatible = "ti,am64-pcie-host", |
2343 |
++ .data = &am64_pcie_rc_data, |
2344 |
++ }, |
2345 |
++ { |
2346 |
++ .compatible = "ti,am64-pcie-ep", |
2347 |
++ .data = &am64_pcie_ep_data, |
2348 |
++ }, |
2349 |
+ {}, |
2350 |
+ }; |
2351 |
+ |
2352 |
+@@ -319,6 +366,7 @@ static int j721e_pcie_probe(struct platform_device *pdev) |
2353 |
+ |
2354 |
+ pcie->dev = dev; |
2355 |
+ pcie->mode = mode; |
2356 |
++ pcie->linkdown_irq_regfield = data->linkdown_irq_regfield; |
2357 |
+ |
2358 |
+ base = devm_platform_ioremap_resource_byname(pdev, "intd_cfg"); |
2359 |
+ if (IS_ERR(base)) |
2360 |
+@@ -378,9 +426,11 @@ static int j721e_pcie_probe(struct platform_device *pdev) |
2361 |
+ goto err_get_sync; |
2362 |
+ } |
2363 |
+ |
2364 |
+- bridge->ops = &cdns_ti_pcie_host_ops; |
2365 |
++ if (!data->byte_access_allowed) |
2366 |
++ bridge->ops = &cdns_ti_pcie_host_ops; |
2367 |
+ rc = pci_host_bridge_priv(bridge); |
2368 |
+ rc->quirk_retrain_flag = data->quirk_retrain_flag; |
2369 |
++ rc->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag; |
2370 |
+ |
2371 |
+ cdns_pcie = &rc->pcie; |
2372 |
+ cdns_pcie->dev = dev; |
2373 |
+@@ -430,6 +480,7 @@ static int j721e_pcie_probe(struct platform_device *pdev) |
2374 |
+ ret = -ENOMEM; |
2375 |
+ goto err_get_sync; |
2376 |
+ } |
2377 |
++ ep->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag; |
2378 |
+ |
2379 |
+ cdns_pcie = &ep->pcie; |
2380 |
+ cdns_pcie->dev = dev; |
2381 |
+diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c |
2382 |
+index 84cc58dc8512c..1af14474abcf1 100644 |
2383 |
+--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c |
2384 |
++++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c |
2385 |
+@@ -578,6 +578,10 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) |
2386 |
+ ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE; |
2387 |
+ /* Reserve region 0 for IRQs */ |
2388 |
+ set_bit(0, &ep->ob_region_map); |
2389 |
++ |
2390 |
++ if (ep->quirk_detect_quiet_flag) |
2391 |
++ cdns_pcie_detect_quiet_min_delay_set(&ep->pcie); |
2392 |
++ |
2393 |
+ spin_lock_init(&ep->lock); |
2394 |
+ |
2395 |
+ return 0; |
2396 |
+diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c |
2397 |
+index 73dcf8cf98fbf..a40ed9e12b4bb 100644 |
2398 |
+--- a/drivers/pci/controller/cadence/pcie-cadence-host.c |
2399 |
++++ b/drivers/pci/controller/cadence/pcie-cadence-host.c |
2400 |
+@@ -497,6 +497,9 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) |
2401 |
+ return PTR_ERR(rc->cfg_base); |
2402 |
+ rc->cfg_res = res; |
2403 |
+ |
2404 |
++ if (rc->quirk_detect_quiet_flag) |
2405 |
++ cdns_pcie_detect_quiet_min_delay_set(&rc->pcie); |
2406 |
++ |
2407 |
+ ret = cdns_pcie_start_link(pcie); |
2408 |
+ if (ret) { |
2409 |
+ dev_err(dev, "Failed to start link\n"); |
2410 |
+diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c |
2411 |
+index 3c3646502d05c..52767f26048fd 100644 |
2412 |
+--- a/drivers/pci/controller/cadence/pcie-cadence.c |
2413 |
++++ b/drivers/pci/controller/cadence/pcie-cadence.c |
2414 |
+@@ -7,6 +7,22 @@ |
2415 |
+ |
2416 |
+ #include "pcie-cadence.h" |
2417 |
+ |
2418 |
++void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie) |
2419 |
++{ |
2420 |
++ u32 delay = 0x3; |
2421 |
++ u32 ltssm_control_cap; |
2422 |
++ |
2423 |
++ /* |
2424 |
++ * Set the LTSSM Detect Quiet state min. delay to 2ms. |
2425 |
++ */ |
2426 |
++ ltssm_control_cap = cdns_pcie_readl(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP); |
2427 |
++ ltssm_control_cap = ((ltssm_control_cap & |
2428 |
++ ~CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK) | |
2429 |
++ CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay)); |
2430 |
++ |
2431 |
++ cdns_pcie_writel(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP, ltssm_control_cap); |
2432 |
++} |
2433 |
++ |
2434 |
+ void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn, |
2435 |
+ u32 r, bool is_io, |
2436 |
+ u64 cpu_addr, u64 pci_addr, size_t size) |
2437 |
+diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h |
2438 |
+index 6705a5fedfbb0..e0b59730bffb7 100644 |
2439 |
+--- a/drivers/pci/controller/cadence/pcie-cadence.h |
2440 |
++++ b/drivers/pci/controller/cadence/pcie-cadence.h |
2441 |
+@@ -189,6 +189,14 @@ |
2442 |
+ /* AXI link down register */ |
2443 |
+ #define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824) |
2444 |
+ |
2445 |
++/* LTSSM Capabilities register */ |
2446 |
++#define CDNS_PCIE_LTSSM_CONTROL_CAP (CDNS_PCIE_LM_BASE + 0x0054) |
2447 |
++#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK GENMASK(2, 1) |
2448 |
++#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT 1 |
2449 |
++#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay) \ |
2450 |
++ (((delay) << CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT) & \ |
2451 |
++ CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK) |
2452 |
++ |
2453 |
+ enum cdns_pcie_rp_bar { |
2454 |
+ RP_BAR_UNDEFINED = -1, |
2455 |
+ RP_BAR0, |
2456 |
+@@ -291,6 +299,7 @@ struct cdns_pcie { |
2457 |
+ * @avail_ib_bar: Satus of RP_BAR0, RP_BAR1 and RP_NO_BAR if it's free or |
2458 |
+ * available |
2459 |
+ * @quirk_retrain_flag: Retrain link as quirk for PCIe Gen2 |
2460 |
++ * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk |
2461 |
+ */ |
2462 |
+ struct cdns_pcie_rc { |
2463 |
+ struct cdns_pcie pcie; |
2464 |
+@@ -299,7 +308,8 @@ struct cdns_pcie_rc { |
2465 |
+ u32 vendor_id; |
2466 |
+ u32 device_id; |
2467 |
+ bool avail_ib_bar[CDNS_PCIE_RP_MAX_IB]; |
2468 |
+- bool quirk_retrain_flag; |
2469 |
++ unsigned int quirk_retrain_flag:1; |
2470 |
++ unsigned int quirk_detect_quiet_flag:1; |
2471 |
+ }; |
2472 |
+ |
2473 |
+ /** |
2474 |
+@@ -330,6 +340,7 @@ struct cdns_pcie_epf { |
2475 |
+ * registers fields (RMW) accessible by both remote RC and EP to |
2476 |
+ * minimize time between read and write |
2477 |
+ * @epf: Structure to hold info about endpoint function |
2478 |
++ * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk |
2479 |
+ */ |
2480 |
+ struct cdns_pcie_ep { |
2481 |
+ struct cdns_pcie pcie; |
2482 |
+@@ -344,6 +355,7 @@ struct cdns_pcie_ep { |
2483 |
+ /* protect writing to PCI_STATUS while raising legacy interrupts */ |
2484 |
+ spinlock_t lock; |
2485 |
+ struct cdns_pcie_epf *epf; |
2486 |
++ unsigned int quirk_detect_quiet_flag:1; |
2487 |
+ }; |
2488 |
+ |
2489 |
+ |
2490 |
+@@ -504,6 +516,9 @@ static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) |
2491 |
+ return 0; |
2492 |
+ } |
2493 |
+ #endif |
2494 |
++ |
2495 |
++void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie); |
2496 |
++ |
2497 |
+ void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn, |
2498 |
+ u32 r, bool is_io, |
2499 |
+ u64 cpu_addr, u64 pci_addr, size_t size); |
2500 |
+diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c |
2501 |
+index 506f6a294eac3..a5b677ec07690 100644 |
2502 |
+--- a/drivers/pci/controller/dwc/pcie-tegra194.c |
2503 |
++++ b/drivers/pci/controller/dwc/pcie-tegra194.c |
2504 |
+@@ -515,19 +515,19 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg) |
2505 |
+ struct tegra_pcie_dw *pcie = arg; |
2506 |
+ struct dw_pcie_ep *ep = &pcie->pci.ep; |
2507 |
+ int spurious = 1; |
2508 |
+- u32 val, tmp; |
2509 |
++ u32 status_l0, status_l1, link_status; |
2510 |
+ |
2511 |
+- val = appl_readl(pcie, APPL_INTR_STATUS_L0); |
2512 |
+- if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) { |
2513 |
+- val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0); |
2514 |
+- appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0); |
2515 |
++ status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0); |
2516 |
++ if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) { |
2517 |
++ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0); |
2518 |
++ appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0); |
2519 |
+ |
2520 |
+- if (val & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE) |
2521 |
++ if (status_l1 & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE) |
2522 |
+ pex_ep_event_hot_rst_done(pcie); |
2523 |
+ |
2524 |
+- if (val & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) { |
2525 |
+- tmp = appl_readl(pcie, APPL_LINK_STATUS); |
2526 |
+- if (tmp & APPL_LINK_STATUS_RDLH_LINK_UP) { |
2527 |
++ if (status_l1 & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) { |
2528 |
++ link_status = appl_readl(pcie, APPL_LINK_STATUS); |
2529 |
++ if (link_status & APPL_LINK_STATUS_RDLH_LINK_UP) { |
2530 |
+ dev_dbg(pcie->dev, "Link is up with Host\n"); |
2531 |
+ dw_pcie_ep_linkup(ep); |
2532 |
+ } |
2533 |
+@@ -536,11 +536,11 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg) |
2534 |
+ spurious = 0; |
2535 |
+ } |
2536 |
+ |
2537 |
+- if (val & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) { |
2538 |
+- val = appl_readl(pcie, APPL_INTR_STATUS_L1_15); |
2539 |
+- appl_writel(pcie, val, APPL_INTR_STATUS_L1_15); |
2540 |
++ if (status_l0 & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) { |
2541 |
++ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_15); |
2542 |
++ appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_15); |
2543 |
+ |
2544 |
+- if (val & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED) |
2545 |
++ if (status_l1 & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED) |
2546 |
+ return IRQ_WAKE_THREAD; |
2547 |
+ |
2548 |
+ spurious = 0; |
2549 |
+@@ -548,8 +548,8 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg) |
2550 |
+ |
2551 |
+ if (spurious) { |
2552 |
+ dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n", |
2553 |
+- val); |
2554 |
+- appl_writel(pcie, val, APPL_INTR_STATUS_L0); |
2555 |
++ status_l0); |
2556 |
++ appl_writel(pcie, status_l0, APPL_INTR_STATUS_L0); |
2557 |
+ } |
2558 |
+ |
2559 |
+ return IRQ_HANDLED; |
2560 |
+@@ -1778,7 +1778,7 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie) |
2561 |
+ val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK); |
2562 |
+ val |= MSIX_ADDR_MATCH_LOW_OFF_EN; |
2563 |
+ dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val); |
2564 |
+- val = (lower_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK); |
2565 |
++ val = (upper_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK); |
2566 |
+ dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val); |
2567 |
+ |
2568 |
+ ret = dw_pcie_ep_init_complete(ep); |
2569 |
+diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c |
2570 |
+index 1a2af963599ca..b4eb75f25906e 100644 |
2571 |
+--- a/drivers/pci/controller/pci-tegra.c |
2572 |
++++ b/drivers/pci/controller/pci-tegra.c |
2573 |
+@@ -2160,13 +2160,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) |
2574 |
+ rp->np = port; |
2575 |
+ |
2576 |
+ rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs); |
2577 |
+- if (IS_ERR(rp->base)) |
2578 |
+- return PTR_ERR(rp->base); |
2579 |
++ if (IS_ERR(rp->base)) { |
2580 |
++ err = PTR_ERR(rp->base); |
2581 |
++ goto err_node_put; |
2582 |
++ } |
2583 |
+ |
2584 |
+ label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index); |
2585 |
+ if (!label) { |
2586 |
+- dev_err(dev, "failed to create reset GPIO label\n"); |
2587 |
+- return -ENOMEM; |
2588 |
++ err = -ENOMEM; |
2589 |
++ goto err_node_put; |
2590 |
+ } |
2591 |
+ |
2592 |
+ /* |
2593 |
+@@ -2184,7 +2186,8 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) |
2594 |
+ } else { |
2595 |
+ dev_err(dev, "failed to get reset GPIO: %ld\n", |
2596 |
+ PTR_ERR(rp->reset_gpio)); |
2597 |
+- return PTR_ERR(rp->reset_gpio); |
2598 |
++ err = PTR_ERR(rp->reset_gpio); |
2599 |
++ goto err_node_put; |
2600 |
+ } |
2601 |
+ } |
2602 |
+ |
2603 |
+diff --git a/drivers/pci/controller/pcie-iproc-bcma.c b/drivers/pci/controller/pcie-iproc-bcma.c |
2604 |
+index 56b8ee7bf3307..f918c713afb08 100644 |
2605 |
+--- a/drivers/pci/controller/pcie-iproc-bcma.c |
2606 |
++++ b/drivers/pci/controller/pcie-iproc-bcma.c |
2607 |
+@@ -35,7 +35,6 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev) |
2608 |
+ { |
2609 |
+ struct device *dev = &bdev->dev; |
2610 |
+ struct iproc_pcie *pcie; |
2611 |
+- LIST_HEAD(resources); |
2612 |
+ struct pci_host_bridge *bridge; |
2613 |
+ int ret; |
2614 |
+ |
2615 |
+@@ -60,19 +59,16 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev) |
2616 |
+ pcie->mem.end = bdev->addr_s[0] + SZ_128M - 1; |
2617 |
+ pcie->mem.name = "PCIe MEM space"; |
2618 |
+ pcie->mem.flags = IORESOURCE_MEM; |
2619 |
+- pci_add_resource(&resources, &pcie->mem); |
2620 |
++ pci_add_resource(&bridge->windows, &pcie->mem); |
2621 |
++ ret = devm_request_pci_bus_resources(dev, &bridge->windows); |
2622 |
++ if (ret) |
2623 |
++ return ret; |
2624 |
+ |
2625 |
+ pcie->map_irq = iproc_pcie_bcma_map_irq; |
2626 |
+ |
2627 |
+- ret = iproc_pcie_setup(pcie, &resources); |
2628 |
+- if (ret) { |
2629 |
+- dev_err(dev, "PCIe controller setup failed\n"); |
2630 |
+- pci_free_resource_list(&resources); |
2631 |
+- return ret; |
2632 |
+- } |
2633 |
+- |
2634 |
+ bcma_set_drvdata(bdev, pcie); |
2635 |
+- return 0; |
2636 |
++ |
2637 |
++ return iproc_pcie_setup(pcie, &bridge->windows); |
2638 |
+ } |
2639 |
+ |
2640 |
+ static void iproc_pcie_bcma_remove(struct bcma_device *bdev) |
2641 |
+diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c |
2642 |
+index b4a288e24aafb..c91d85b151290 100644 |
2643 |
+--- a/drivers/pci/controller/pcie-rcar-ep.c |
2644 |
++++ b/drivers/pci/controller/pcie-rcar-ep.c |
2645 |
+@@ -492,9 +492,9 @@ static int rcar_pcie_ep_probe(struct platform_device *pdev) |
2646 |
+ pcie->dev = dev; |
2647 |
+ |
2648 |
+ pm_runtime_enable(dev); |
2649 |
+- err = pm_runtime_get_sync(dev); |
2650 |
++ err = pm_runtime_resume_and_get(dev); |
2651 |
+ if (err < 0) { |
2652 |
+- dev_err(dev, "pm_runtime_get_sync failed\n"); |
2653 |
++ dev_err(dev, "pm_runtime_resume_and_get failed\n"); |
2654 |
+ goto err_pm_disable; |
2655 |
+ } |
2656 |
+ |
2657 |
+diff --git a/drivers/pci/hotplug/TODO b/drivers/pci/hotplug/TODO |
2658 |
+index a32070be5adf9..cc6194aa24c15 100644 |
2659 |
+--- a/drivers/pci/hotplug/TODO |
2660 |
++++ b/drivers/pci/hotplug/TODO |
2661 |
+@@ -40,9 +40,6 @@ ibmphp: |
2662 |
+ |
2663 |
+ * The return value of pci_hp_register() is not checked. |
2664 |
+ |
2665 |
+-* iounmap(io_mem) is called in the error path of ebda_rsrc_controller() |
2666 |
+- and once more in the error path of its caller ibmphp_access_ebda(). |
2667 |
+- |
2668 |
+ * The various slot data structures are difficult to follow and need to be |
2669 |
+ simplified. A lot of functions are too large and too complex, they need |
2670 |
+ to be broken up into smaller, manageable pieces. Negative examples are |
2671 |
+diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c |
2672 |
+index 11a2661dc0627..7fb75401ad8a7 100644 |
2673 |
+--- a/drivers/pci/hotplug/ibmphp_ebda.c |
2674 |
++++ b/drivers/pci/hotplug/ibmphp_ebda.c |
2675 |
+@@ -714,8 +714,7 @@ static int __init ebda_rsrc_controller(void) |
2676 |
+ /* init hpc structure */ |
2677 |
+ hpc_ptr = alloc_ebda_hpc(slot_num, bus_num); |
2678 |
+ if (!hpc_ptr) { |
2679 |
+- rc = -ENOMEM; |
2680 |
+- goto error_no_hpc; |
2681 |
++ return -ENOMEM; |
2682 |
+ } |
2683 |
+ hpc_ptr->ctlr_id = ctlr_id; |
2684 |
+ hpc_ptr->ctlr_relative_id = ctlr; |
2685 |
+@@ -910,8 +909,6 @@ error: |
2686 |
+ kfree(tmp_slot); |
2687 |
+ error_no_slot: |
2688 |
+ free_ebda_hpc(hpc_ptr); |
2689 |
+-error_no_hpc: |
2690 |
+- iounmap(io_mem); |
2691 |
+ return rc; |
2692 |
+ } |
2693 |
+ |
2694 |
+diff --git a/drivers/pci/of.c b/drivers/pci/of.c |
2695 |
+index ac24cd5439a93..3f6ef2f45e57a 100644 |
2696 |
+--- a/drivers/pci/of.c |
2697 |
++++ b/drivers/pci/of.c |
2698 |
+@@ -295,7 +295,7 @@ static int devm_of_pci_get_host_bridge_resources(struct device *dev, |
2699 |
+ /* Check for ranges property */ |
2700 |
+ err = of_pci_range_parser_init(&parser, dev_node); |
2701 |
+ if (err) |
2702 |
+- goto failed; |
2703 |
++ return 0; |
2704 |
+ |
2705 |
+ dev_dbg(dev, "Parsing ranges property...\n"); |
2706 |
+ for_each_of_pci_range(&parser, &range) { |
2707 |
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c |
2708 |
+index eae6a9fdd33d4..0d7109018a91f 100644 |
2709 |
+--- a/drivers/pci/pci.c |
2710 |
++++ b/drivers/pci/pci.c |
2711 |
+@@ -265,7 +265,7 @@ static int pci_dev_str_match_path(struct pci_dev *dev, const char *path, |
2712 |
+ |
2713 |
+ *endptr = strchrnul(path, ';'); |
2714 |
+ |
2715 |
+- wpath = kmemdup_nul(path, *endptr - path, GFP_KERNEL); |
2716 |
++ wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC); |
2717 |
+ if (!wpath) |
2718 |
+ return -ENOMEM; |
2719 |
+ |
2720 |
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c |
2721 |
+index bad294c352519..5d2acebc3e966 100644 |
2722 |
+--- a/drivers/pci/quirks.c |
2723 |
++++ b/drivers/pci/quirks.c |
2724 |
+@@ -4626,6 +4626,18 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags) |
2725 |
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); |
2726 |
+ } |
2727 |
+ |
2728 |
++/* |
2729 |
++ * Each of these NXP Root Ports is in a Root Complex with a unique segment |
2730 |
++ * number and does provide isolation features to disable peer transactions |
2731 |
++ * and validate bus numbers in requests, but does not provide an ACS |
2732 |
++ * capability. |
2733 |
++ */ |
2734 |
++static int pci_quirk_nxp_rp_acs(struct pci_dev *dev, u16 acs_flags) |
2735 |
++{ |
2736 |
++ return pci_acs_ctrl_enabled(acs_flags, |
2737 |
++ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); |
2738 |
++} |
2739 |
++ |
2740 |
+ static int pci_quirk_al_acs(struct pci_dev *dev, u16 acs_flags) |
2741 |
+ { |
2742 |
+ if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) |
2743 |
+@@ -4852,6 +4864,10 @@ static const struct pci_dev_acs_enabled { |
2744 |
+ { 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */ |
2745 |
+ /* Cavium ThunderX */ |
2746 |
+ { PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs }, |
2747 |
++ /* Cavium multi-function devices */ |
2748 |
++ { PCI_VENDOR_ID_CAVIUM, 0xA026, pci_quirk_mf_endpoint_acs }, |
2749 |
++ { PCI_VENDOR_ID_CAVIUM, 0xA059, pci_quirk_mf_endpoint_acs }, |
2750 |
++ { PCI_VENDOR_ID_CAVIUM, 0xA060, pci_quirk_mf_endpoint_acs }, |
2751 |
+ /* APM X-Gene */ |
2752 |
+ { PCI_VENDOR_ID_AMCC, 0xE004, pci_quirk_xgene_acs }, |
2753 |
+ /* Ampere Computing */ |
2754 |
+@@ -4872,6 +4888,39 @@ static const struct pci_dev_acs_enabled { |
2755 |
+ { PCI_VENDOR_ID_ZHAOXIN, 0x3038, pci_quirk_mf_endpoint_acs }, |
2756 |
+ { PCI_VENDOR_ID_ZHAOXIN, 0x3104, pci_quirk_mf_endpoint_acs }, |
2757 |
+ { PCI_VENDOR_ID_ZHAOXIN, 0x9083, pci_quirk_mf_endpoint_acs }, |
2758 |
++ /* NXP root ports, xx=16, 12, or 08 cores */ |
2759 |
++ /* LX2xx0A : without security features + CAN-FD */ |
2760 |
++ { PCI_VENDOR_ID_NXP, 0x8d81, pci_quirk_nxp_rp_acs }, |
2761 |
++ { PCI_VENDOR_ID_NXP, 0x8da1, pci_quirk_nxp_rp_acs }, |
2762 |
++ { PCI_VENDOR_ID_NXP, 0x8d83, pci_quirk_nxp_rp_acs }, |
2763 |
++ /* LX2xx0C : security features + CAN-FD */ |
2764 |
++ { PCI_VENDOR_ID_NXP, 0x8d80, pci_quirk_nxp_rp_acs }, |
2765 |
++ { PCI_VENDOR_ID_NXP, 0x8da0, pci_quirk_nxp_rp_acs }, |
2766 |
++ { PCI_VENDOR_ID_NXP, 0x8d82, pci_quirk_nxp_rp_acs }, |
2767 |
++ /* LX2xx0E : security features + CAN */ |
2768 |
++ { PCI_VENDOR_ID_NXP, 0x8d90, pci_quirk_nxp_rp_acs }, |
2769 |
++ { PCI_VENDOR_ID_NXP, 0x8db0, pci_quirk_nxp_rp_acs }, |
2770 |
++ { PCI_VENDOR_ID_NXP, 0x8d92, pci_quirk_nxp_rp_acs }, |
2771 |
++ /* LX2xx0N : without security features + CAN */ |
2772 |
++ { PCI_VENDOR_ID_NXP, 0x8d91, pci_quirk_nxp_rp_acs }, |
2773 |
++ { PCI_VENDOR_ID_NXP, 0x8db1, pci_quirk_nxp_rp_acs }, |
2774 |
++ { PCI_VENDOR_ID_NXP, 0x8d93, pci_quirk_nxp_rp_acs }, |
2775 |
++ /* LX2xx2A : without security features + CAN-FD */ |
2776 |
++ { PCI_VENDOR_ID_NXP, 0x8d89, pci_quirk_nxp_rp_acs }, |
2777 |
++ { PCI_VENDOR_ID_NXP, 0x8da9, pci_quirk_nxp_rp_acs }, |
2778 |
++ { PCI_VENDOR_ID_NXP, 0x8d8b, pci_quirk_nxp_rp_acs }, |
2779 |
++ /* LX2xx2C : security features + CAN-FD */ |
2780 |
++ { PCI_VENDOR_ID_NXP, 0x8d88, pci_quirk_nxp_rp_acs }, |
2781 |
++ { PCI_VENDOR_ID_NXP, 0x8da8, pci_quirk_nxp_rp_acs }, |
2782 |
++ { PCI_VENDOR_ID_NXP, 0x8d8a, pci_quirk_nxp_rp_acs }, |
2783 |
++ /* LX2xx2E : security features + CAN */ |
2784 |
++ { PCI_VENDOR_ID_NXP, 0x8d98, pci_quirk_nxp_rp_acs }, |
2785 |
++ { PCI_VENDOR_ID_NXP, 0x8db8, pci_quirk_nxp_rp_acs }, |
2786 |
++ { PCI_VENDOR_ID_NXP, 0x8d9a, pci_quirk_nxp_rp_acs }, |
2787 |
++ /* LX2xx2N : without security features + CAN */ |
2788 |
++ { PCI_VENDOR_ID_NXP, 0x8d99, pci_quirk_nxp_rp_acs }, |
2789 |
++ { PCI_VENDOR_ID_NXP, 0x8db9, pci_quirk_nxp_rp_acs }, |
2790 |
++ { PCI_VENDOR_ID_NXP, 0x8d9b, pci_quirk_nxp_rp_acs }, |
2791 |
+ /* Zhaoxin Root/Downstream Ports */ |
2792 |
+ { PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs }, |
2793 |
+ { 0 } |
2794 |
+@@ -5346,7 +5395,7 @@ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, |
2795 |
+ PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda); |
2796 |
+ |
2797 |
+ /* |
2798 |
+- * Create device link for NVIDIA GPU with integrated USB xHCI Host |
2799 |
++ * Create device link for GPUs with integrated USB xHCI Host |
2800 |
+ * controller to VGA. |
2801 |
+ */ |
2802 |
+ static void quirk_gpu_usb(struct pci_dev *usb) |
2803 |
+@@ -5355,9 +5404,11 @@ static void quirk_gpu_usb(struct pci_dev *usb) |
2804 |
+ } |
2805 |
+ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, |
2806 |
+ PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb); |
2807 |
++DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID, |
2808 |
++ PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb); |
2809 |
+ |
2810 |
+ /* |
2811 |
+- * Create device link for NVIDIA GPU with integrated Type-C UCSI controller |
2812 |
++ * Create device link for GPUs with integrated Type-C UCSI controller |
2813 |
+ * to VGA. Currently there is no class code defined for UCSI device over PCI |
2814 |
+ * so using UNKNOWN class for now and it will be updated when UCSI |
2815 |
+ * over PCI gets a class code. |
2816 |
+@@ -5370,6 +5421,9 @@ static void quirk_gpu_usb_typec_ucsi(struct pci_dev *ucsi) |
2817 |
+ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, |
2818 |
+ PCI_CLASS_SERIAL_UNKNOWN, 8, |
2819 |
+ quirk_gpu_usb_typec_ucsi); |
2820 |
++DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID, |
2821 |
++ PCI_CLASS_SERIAL_UNKNOWN, 8, |
2822 |
++ quirk_gpu_usb_typec_ucsi); |
2823 |
+ |
2824 |
+ /* |
2825 |
+ * Enable the NVIDIA GPU integrated HDA controller if the BIOS left it |
2826 |
+diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c |
2827 |
+index cc5e84b80c699..faa3a4b8ed91d 100644 |
2828 |
+--- a/drivers/s390/char/sclp_early.c |
2829 |
++++ b/drivers/s390/char/sclp_early.c |
2830 |
+@@ -40,13 +40,14 @@ static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb) |
2831 |
+ sclp.has_gisaf = !!(sccb->fac118 & 0x08); |
2832 |
+ sclp.has_hvs = !!(sccb->fac119 & 0x80); |
2833 |
+ sclp.has_kss = !!(sccb->fac98 & 0x01); |
2834 |
+- sclp.has_sipl = !!(sccb->cbl & 0x4000); |
2835 |
+ if (sccb->fac85 & 0x02) |
2836 |
+ S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; |
2837 |
+ if (sccb->fac91 & 0x40) |
2838 |
+ S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_GUEST; |
2839 |
+ if (sccb->cpuoff > 134) |
2840 |
+ sclp.has_diag318 = !!(sccb->byte_134 & 0x80); |
2841 |
++ if (sccb->cpuoff > 137) |
2842 |
++ sclp.has_sipl = !!(sccb->cbl & 0x4000); |
2843 |
+ sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; |
2844 |
+ sclp.rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; |
2845 |
+ sclp.rzm <<= 20; |
2846 |
+diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c |
2847 |
+index c8784dfafdd73..da02c3e96e7b2 100644 |
2848 |
+--- a/drivers/vhost/net.c |
2849 |
++++ b/drivers/vhost/net.c |
2850 |
+@@ -466,7 +466,7 @@ static void vhost_tx_batch(struct vhost_net *net, |
2851 |
+ .num = nvq->batched_xdp, |
2852 |
+ .ptr = nvq->xdp, |
2853 |
+ }; |
2854 |
+- int err; |
2855 |
++ int i, err; |
2856 |
+ |
2857 |
+ if (nvq->batched_xdp == 0) |
2858 |
+ goto signal_used; |
2859 |
+@@ -475,6 +475,15 @@ static void vhost_tx_batch(struct vhost_net *net, |
2860 |
+ err = sock->ops->sendmsg(sock, msghdr, 0); |
2861 |
+ if (unlikely(err < 0)) { |
2862 |
+ vq_err(&nvq->vq, "Fail to batch sending packets\n"); |
2863 |
++ |
2864 |
++ /* free pages owned by XDP; since this is an unlikely error path, |
2865 |
++ * keep it simple and avoid more complex bulk update for the |
2866 |
++ * used pages |
2867 |
++ */ |
2868 |
++ for (i = 0; i < nvq->batched_xdp; ++i) |
2869 |
++ put_page(virt_to_head_page(nvq->xdp[i].data)); |
2870 |
++ nvq->batched_xdp = 0; |
2871 |
++ nvq->done_idx = 0; |
2872 |
+ return; |
2873 |
+ } |
2874 |
+ |
2875 |
+diff --git a/drivers/video/backlight/ktd253-backlight.c b/drivers/video/backlight/ktd253-backlight.c |
2876 |
+index e3fee3f1f5828..9d355fd989d86 100644 |
2877 |
+--- a/drivers/video/backlight/ktd253-backlight.c |
2878 |
++++ b/drivers/video/backlight/ktd253-backlight.c |
2879 |
+@@ -25,6 +25,7 @@ |
2880 |
+ |
2881 |
+ #define KTD253_T_LOW_NS (200 + 10) /* Additional 10ns as safety factor */ |
2882 |
+ #define KTD253_T_HIGH_NS (200 + 10) /* Additional 10ns as safety factor */ |
2883 |
++#define KTD253_T_OFF_CRIT_NS 100000 /* 100 us, now it doesn't look good */ |
2884 |
+ #define KTD253_T_OFF_MS 3 |
2885 |
+ |
2886 |
+ struct ktd253_backlight { |
2887 |
+@@ -34,13 +35,50 @@ struct ktd253_backlight { |
2888 |
+ u16 ratio; |
2889 |
+ }; |
2890 |
+ |
2891 |
++static void ktd253_backlight_set_max_ratio(struct ktd253_backlight *ktd253) |
2892 |
++{ |
2893 |
++ gpiod_set_value_cansleep(ktd253->gpiod, 1); |
2894 |
++ ndelay(KTD253_T_HIGH_NS); |
2895 |
++ /* We always fall back to this when we power on */ |
2896 |
++} |
2897 |
++ |
2898 |
++static int ktd253_backlight_stepdown(struct ktd253_backlight *ktd253) |
2899 |
++{ |
2900 |
++ /* |
2901 |
++ * These GPIO operations absolutely can NOT sleep so no _cansleep |
2902 |
++ * suffixes, and no using GPIO expanders on slow buses for this! |
2903 |
++ * |
2904 |
++ * The maximum number of cycles of the loop is 32 so the time taken |
2905 |
++ * should nominally be: |
2906 |
++ * (T_LOW_NS + T_HIGH_NS + loop_time) * 32 |
2907 |
++ * |
2908 |
++ * Architectures do not always support ndelay() and we will get a few us |
2909 |
++ * instead. If we get to a critical time limit an interrupt has likely |
2910 |
++ * occured in the low part of the loop and we need to restart from the |
2911 |
++ * top so we have the backlight in a known state. |
2912 |
++ */ |
2913 |
++ u64 ns; |
2914 |
++ |
2915 |
++ ns = ktime_get_ns(); |
2916 |
++ gpiod_set_value(ktd253->gpiod, 0); |
2917 |
++ ndelay(KTD253_T_LOW_NS); |
2918 |
++ gpiod_set_value(ktd253->gpiod, 1); |
2919 |
++ ns = ktime_get_ns() - ns; |
2920 |
++ if (ns >= KTD253_T_OFF_CRIT_NS) { |
2921 |
++ dev_err(ktd253->dev, "PCM on backlight took too long (%llu ns)\n", ns); |
2922 |
++ return -EAGAIN; |
2923 |
++ } |
2924 |
++ ndelay(KTD253_T_HIGH_NS); |
2925 |
++ return 0; |
2926 |
++} |
2927 |
++ |
2928 |
+ static int ktd253_backlight_update_status(struct backlight_device *bl) |
2929 |
+ { |
2930 |
+ struct ktd253_backlight *ktd253 = bl_get_data(bl); |
2931 |
+ int brightness = backlight_get_brightness(bl); |
2932 |
+ u16 target_ratio; |
2933 |
+ u16 current_ratio = ktd253->ratio; |
2934 |
+- unsigned long flags; |
2935 |
++ int ret; |
2936 |
+ |
2937 |
+ dev_dbg(ktd253->dev, "new brightness/ratio: %d/32\n", brightness); |
2938 |
+ |
2939 |
+@@ -62,37 +100,34 @@ static int ktd253_backlight_update_status(struct backlight_device *bl) |
2940 |
+ } |
2941 |
+ |
2942 |
+ if (current_ratio == 0) { |
2943 |
+- gpiod_set_value_cansleep(ktd253->gpiod, 1); |
2944 |
+- ndelay(KTD253_T_HIGH_NS); |
2945 |
+- /* We always fall back to this when we power on */ |
2946 |
++ ktd253_backlight_set_max_ratio(ktd253); |
2947 |
+ current_ratio = KTD253_MAX_RATIO; |
2948 |
+ } |
2949 |
+ |
2950 |
+- /* |
2951 |
+- * WARNING: |
2952 |
+- * The loop to set the correct current level is performed |
2953 |
+- * with interrupts disabled as it is timing critical. |
2954 |
+- * The maximum number of cycles of the loop is 32 |
2955 |
+- * so the time taken will be (T_LOW_NS + T_HIGH_NS + loop_time) * 32, |
2956 |
+- */ |
2957 |
+- local_irq_save(flags); |
2958 |
+ while (current_ratio != target_ratio) { |
2959 |
+ /* |
2960 |
+ * These GPIO operations absolutely can NOT sleep so no |
2961 |
+ * _cansleep suffixes, and no using GPIO expanders on |
2962 |
+ * slow buses for this! |
2963 |
+ */ |
2964 |
+- gpiod_set_value(ktd253->gpiod, 0); |
2965 |
+- ndelay(KTD253_T_LOW_NS); |
2966 |
+- gpiod_set_value(ktd253->gpiod, 1); |
2967 |
+- ndelay(KTD253_T_HIGH_NS); |
2968 |
+- /* After 1/32 we loop back to 32/32 */ |
2969 |
+- if (current_ratio == KTD253_MIN_RATIO) |
2970 |
++ ret = ktd253_backlight_stepdown(ktd253); |
2971 |
++ if (ret == -EAGAIN) { |
2972 |
++ /* |
2973 |
++ * Something disturbed the backlight setting code when |
2974 |
++ * running so we need to bring the PWM back to a known |
2975 |
++ * state. This shouldn't happen too much. |
2976 |
++ */ |
2977 |
++ gpiod_set_value_cansleep(ktd253->gpiod, 0); |
2978 |
++ msleep(KTD253_T_OFF_MS); |
2979 |
++ ktd253_backlight_set_max_ratio(ktd253); |
2980 |
++ current_ratio = KTD253_MAX_RATIO; |
2981 |
++ } else if (current_ratio == KTD253_MIN_RATIO) { |
2982 |
++ /* After 1/32 we loop back to 32/32 */ |
2983 |
+ current_ratio = KTD253_MAX_RATIO; |
2984 |
+- else |
2985 |
++ } else { |
2986 |
+ current_ratio--; |
2987 |
++ } |
2988 |
+ } |
2989 |
+- local_irq_restore(flags); |
2990 |
+ ktd253->ratio = current_ratio; |
2991 |
+ |
2992 |
+ dev_dbg(ktd253->dev, "new ratio set to %d/32\n", target_ratio); |
2993 |
+diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c |
2994 |
+index 2946f3a63110c..2ee017442dfcd 100644 |
2995 |
+--- a/drivers/watchdog/watchdog_dev.c |
2996 |
++++ b/drivers/watchdog/watchdog_dev.c |
2997 |
+@@ -1164,7 +1164,10 @@ int watchdog_set_last_hw_keepalive(struct watchdog_device *wdd, |
2998 |
+ |
2999 |
+ wd_data->last_hw_keepalive = ktime_sub(now, ms_to_ktime(last_ping_ms)); |
3000 |
+ |
3001 |
+- return __watchdog_ping(wdd); |
3002 |
++ if (watchdog_hw_running(wdd) && handle_boot_enabled) |
3003 |
++ return __watchdog_ping(wdd); |
3004 |
++ |
3005 |
++ return 0; |
3006 |
+ } |
3007 |
+ EXPORT_SYMBOL_GPL(watchdog_set_last_hw_keepalive); |
3008 |
+ |
3009 |
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c |
3010 |
+index e025cd8f3f071..ef7df2141f34f 100644 |
3011 |
+--- a/fs/btrfs/disk-io.c |
3012 |
++++ b/fs/btrfs/disk-io.c |
3013 |
+@@ -3019,6 +3019,29 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device |
3014 |
+ */ |
3015 |
+ fs_info->compress_type = BTRFS_COMPRESS_ZLIB; |
3016 |
+ |
3017 |
++ /* |
3018 |
++ * Flag our filesystem as having big metadata blocks if they are bigger |
3019 |
++ * than the page size |
3020 |
++ */ |
3021 |
++ if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) { |
3022 |
++ if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA)) |
3023 |
++ btrfs_info(fs_info, |
3024 |
++ "flagging fs with big metadata feature"); |
3025 |
++ features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA; |
3026 |
++ } |
3027 |
++ |
3028 |
++ /* Set up fs_info before parsing mount options */ |
3029 |
++ nodesize = btrfs_super_nodesize(disk_super); |
3030 |
++ sectorsize = btrfs_super_sectorsize(disk_super); |
3031 |
++ stripesize = sectorsize; |
3032 |
++ fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids)); |
3033 |
++ fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids)); |
3034 |
++ |
3035 |
++ /* Cache block sizes */ |
3036 |
++ fs_info->nodesize = nodesize; |
3037 |
++ fs_info->sectorsize = sectorsize; |
3038 |
++ fs_info->stripesize = stripesize; |
3039 |
++ |
3040 |
+ ret = btrfs_parse_options(fs_info, options, sb->s_flags); |
3041 |
+ if (ret) { |
3042 |
+ err = ret; |
3043 |
+@@ -3045,28 +3068,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device |
3044 |
+ if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA) |
3045 |
+ btrfs_info(fs_info, "has skinny extents"); |
3046 |
+ |
3047 |
+- /* |
3048 |
+- * flag our filesystem as having big metadata blocks if |
3049 |
+- * they are bigger than the page size |
3050 |
+- */ |
3051 |
+- if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) { |
3052 |
+- if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA)) |
3053 |
+- btrfs_info(fs_info, |
3054 |
+- "flagging fs with big metadata feature"); |
3055 |
+- features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA; |
3056 |
+- } |
3057 |
+- |
3058 |
+- nodesize = btrfs_super_nodesize(disk_super); |
3059 |
+- sectorsize = btrfs_super_sectorsize(disk_super); |
3060 |
+- stripesize = sectorsize; |
3061 |
+- fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids)); |
3062 |
+- fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids)); |
3063 |
+- |
3064 |
+- /* Cache block sizes */ |
3065 |
+- fs_info->nodesize = nodesize; |
3066 |
+- fs_info->sectorsize = sectorsize; |
3067 |
+- fs_info->stripesize = stripesize; |
3068 |
+- |
3069 |
+ /* |
3070 |
+ * mixed block groups end up with duplicate but slightly offset |
3071 |
+ * extent buffers for the same range. It leads to corruptions |
3072 |
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c |
3073 |
+index 4140d5c3ab5a5..f943eea9fe4e1 100644 |
3074 |
+--- a/fs/fuse/dev.c |
3075 |
++++ b/fs/fuse/dev.c |
3076 |
+@@ -288,10 +288,10 @@ void fuse_request_end(struct fuse_req *req) |
3077 |
+ |
3078 |
+ /* |
3079 |
+ * test_and_set_bit() implies smp_mb() between bit |
3080 |
+- * changing and below intr_entry check. Pairs with |
3081 |
++ * changing and below FR_INTERRUPTED check. Pairs with |
3082 |
+ * smp_mb() from queue_interrupt(). |
3083 |
+ */ |
3084 |
+- if (!list_empty(&req->intr_entry)) { |
3085 |
++ if (test_bit(FR_INTERRUPTED, &req->flags)) { |
3086 |
+ spin_lock(&fiq->lock); |
3087 |
+ list_del_init(&req->intr_entry); |
3088 |
+ spin_unlock(&fiq->lock); |
3089 |
+diff --git a/fs/io_uring.c b/fs/io_uring.c |
3090 |
+index d0089039fee79..a8d07273ddc05 100644 |
3091 |
+--- a/fs/io_uring.c |
3092 |
++++ b/fs/io_uring.c |
3093 |
+@@ -3206,12 +3206,15 @@ static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter) |
3094 |
+ ret = nr; |
3095 |
+ break; |
3096 |
+ } |
3097 |
++ if (!iov_iter_is_bvec(iter)) { |
3098 |
++ iov_iter_advance(iter, nr); |
3099 |
++ } else { |
3100 |
++ req->rw.len -= nr; |
3101 |
++ req->rw.addr += nr; |
3102 |
++ } |
3103 |
+ ret += nr; |
3104 |
+ if (nr != iovec.iov_len) |
3105 |
+ break; |
3106 |
+- req->rw.len -= nr; |
3107 |
+- req->rw.addr += nr; |
3108 |
+- iov_iter_advance(iter, nr); |
3109 |
+ } |
3110 |
+ |
3111 |
+ return ret; |
3112 |
+diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h |
3113 |
+index 551093b74596b..1dafc7c7f5cfe 100644 |
3114 |
+--- a/include/linux/memory_hotplug.h |
3115 |
++++ b/include/linux/memory_hotplug.h |
3116 |
+@@ -359,8 +359,8 @@ extern void sparse_remove_section(struct mem_section *ms, |
3117 |
+ unsigned long map_offset, struct vmem_altmap *altmap); |
3118 |
+ extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, |
3119 |
+ unsigned long pnum); |
3120 |
+-extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, |
3121 |
+- unsigned long nr_pages); |
3122 |
++extern struct zone *zone_for_pfn_range(int online_type, int nid, |
3123 |
++ unsigned long start_pfn, unsigned long nr_pages); |
3124 |
+ #endif /* CONFIG_MEMORY_HOTPLUG */ |
3125 |
+ |
3126 |
+ #endif /* __LINUX_MEMORY_HOTPLUG_H */ |
3127 |
+diff --git a/include/linux/pci.h b/include/linux/pci.h |
3128 |
+index 22207a79762c2..a55097b4d9927 100644 |
3129 |
+--- a/include/linux/pci.h |
3130 |
++++ b/include/linux/pci.h |
3131 |
+@@ -1713,8 +1713,9 @@ static inline void pci_disable_device(struct pci_dev *dev) { } |
3132 |
+ static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; } |
3133 |
+ static inline int pci_assign_resource(struct pci_dev *dev, int i) |
3134 |
+ { return -EBUSY; } |
3135 |
+-static inline int __pci_register_driver(struct pci_driver *drv, |
3136 |
+- struct module *owner) |
3137 |
++static inline int __must_check __pci_register_driver(struct pci_driver *drv, |
3138 |
++ struct module *owner, |
3139 |
++ const char *mod_name) |
3140 |
+ { return 0; } |
3141 |
+ static inline int pci_register_driver(struct pci_driver *drv) |
3142 |
+ { return 0; } |
3143 |
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h |
3144 |
+index 1ab1e24bcbce5..635a9243cce0d 100644 |
3145 |
+--- a/include/linux/pci_ids.h |
3146 |
++++ b/include/linux/pci_ids.h |
3147 |
+@@ -2476,7 +2476,8 @@ |
3148 |
+ #define PCI_VENDOR_ID_TDI 0x192E |
3149 |
+ #define PCI_DEVICE_ID_TDI_EHCI 0x0101 |
3150 |
+ |
3151 |
+-#define PCI_VENDOR_ID_FREESCALE 0x1957 |
3152 |
++#define PCI_VENDOR_ID_FREESCALE 0x1957 /* duplicate: NXP */ |
3153 |
++#define PCI_VENDOR_ID_NXP 0x1957 /* duplicate: FREESCALE */ |
3154 |
+ #define PCI_DEVICE_ID_MPC8308 0xc006 |
3155 |
+ #define PCI_DEVICE_ID_MPC8315E 0x00b4 |
3156 |
+ #define PCI_DEVICE_ID_MPC8315 0x00b5 |
3157 |
+diff --git a/include/linux/sched.h b/include/linux/sched.h |
3158 |
+index 2660ee4b08adf..29c7ccd5ae42e 100644 |
3159 |
+--- a/include/linux/sched.h |
3160 |
++++ b/include/linux/sched.h |
3161 |
+@@ -1354,6 +1354,7 @@ struct task_struct { |
3162 |
+ mce_whole_page : 1, |
3163 |
+ __mce_reserved : 62; |
3164 |
+ struct callback_head mce_kill_me; |
3165 |
++ int mce_count; |
3166 |
+ #endif |
3167 |
+ |
3168 |
+ /* |
3169 |
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h |
3170 |
+index 0a1239819fd2a..acbf1875ad506 100644 |
3171 |
+--- a/include/linux/skbuff.h |
3172 |
++++ b/include/linux/skbuff.h |
3173 |
+@@ -1908,7 +1908,7 @@ static inline void __skb_insert(struct sk_buff *newsk, |
3174 |
+ WRITE_ONCE(newsk->prev, prev); |
3175 |
+ WRITE_ONCE(next->prev, newsk); |
3176 |
+ WRITE_ONCE(prev->next, newsk); |
3177 |
+- list->qlen++; |
3178 |
++ WRITE_ONCE(list->qlen, list->qlen + 1); |
3179 |
+ } |
3180 |
+ |
3181 |
+ static inline void __skb_queue_splice(const struct sk_buff_head *list, |
3182 |
+diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h |
3183 |
+index 9e7c2c6078456..69079fbf3ed2d 100644 |
3184 |
+--- a/include/uapi/linux/pkt_sched.h |
3185 |
++++ b/include/uapi/linux/pkt_sched.h |
3186 |
+@@ -826,6 +826,8 @@ struct tc_codel_xstats { |
3187 |
+ |
3188 |
+ /* FQ_CODEL */ |
3189 |
+ |
3190 |
++#define FQ_CODEL_QUANTUM_MAX (1 << 20) |
3191 |
++ |
3192 |
+ enum { |
3193 |
+ TCA_FQ_CODEL_UNSPEC, |
3194 |
+ TCA_FQ_CODEL_TARGET, |
3195 |
+diff --git a/kernel/events/core.c b/kernel/events/core.c |
3196 |
+index 7e0fdc19043e4..c677f934353af 100644 |
3197 |
+--- a/kernel/events/core.c |
3198 |
++++ b/kernel/events/core.c |
3199 |
+@@ -9973,7 +9973,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event) |
3200 |
+ return; |
3201 |
+ |
3202 |
+ if (ifh->nr_file_filters) { |
3203 |
+- mm = get_task_mm(event->ctx->task); |
3204 |
++ mm = get_task_mm(task); |
3205 |
+ if (!mm) |
3206 |
+ goto restart; |
3207 |
+ |
3208 |
+diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c |
3209 |
+index a82f03f385f89..0996d59750ff0 100644 |
3210 |
+--- a/kernel/trace/trace_boot.c |
3211 |
++++ b/kernel/trace/trace_boot.c |
3212 |
+@@ -205,12 +205,15 @@ trace_boot_init_one_event(struct trace_array *tr, struct xbc_node *gnode, |
3213 |
+ pr_err("Failed to apply filter: %s\n", buf); |
3214 |
+ } |
3215 |
+ |
3216 |
+- xbc_node_for_each_array_value(enode, "actions", anode, p) { |
3217 |
+- if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) |
3218 |
+- pr_err("action string is too long: %s\n", p); |
3219 |
+- else if (trigger_process_regex(file, buf) < 0) |
3220 |
+- pr_err("Failed to apply an action: %s\n", buf); |
3221 |
+- } |
3222 |
++ if (IS_ENABLED(CONFIG_HIST_TRIGGERS)) { |
3223 |
++ xbc_node_for_each_array_value(enode, "actions", anode, p) { |
3224 |
++ if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) |
3225 |
++ pr_err("action string is too long: %s\n", p); |
3226 |
++ else if (trigger_process_regex(file, buf) < 0) |
3227 |
++ pr_err("Failed to apply an action: %s\n", buf); |
3228 |
++ } |
3229 |
++ } else if (xbc_node_find_value(enode, "actions", NULL)) |
3230 |
++ pr_err("Failed to apply event actions because CONFIG_HIST_TRIGGERS is not set.\n"); |
3231 |
+ |
3232 |
+ if (xbc_node_find_value(enode, "enable", NULL)) { |
3233 |
+ if (trace_event_enable_disable(file, 1, 0) < 0) |
3234 |
+diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c |
3235 |
+index 68150b9cbde92..552dbc9d52260 100644 |
3236 |
+--- a/kernel/trace/trace_kprobe.c |
3237 |
++++ b/kernel/trace/trace_kprobe.c |
3238 |
+@@ -647,7 +647,11 @@ static int register_trace_kprobe(struct trace_kprobe *tk) |
3239 |
+ /* Register new event */ |
3240 |
+ ret = register_kprobe_event(tk); |
3241 |
+ if (ret) { |
3242 |
+- pr_warn("Failed to register probe event(%d)\n", ret); |
3243 |
++ if (ret == -EEXIST) { |
3244 |
++ trace_probe_log_set_index(0); |
3245 |
++ trace_probe_log_err(0, EVENT_EXIST); |
3246 |
++ } else |
3247 |
++ pr_warn("Failed to register probe event(%d)\n", ret); |
3248 |
+ goto end; |
3249 |
+ } |
3250 |
+ |
3251 |
+diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c |
3252 |
+index d2867ccc6acaa..1d31bc4acf7a5 100644 |
3253 |
+--- a/kernel/trace/trace_probe.c |
3254 |
++++ b/kernel/trace/trace_probe.c |
3255 |
+@@ -1029,11 +1029,36 @@ error: |
3256 |
+ return ret; |
3257 |
+ } |
3258 |
+ |
3259 |
++static struct trace_event_call * |
3260 |
++find_trace_event_call(const char *system, const char *event_name) |
3261 |
++{ |
3262 |
++ struct trace_event_call *tp_event; |
3263 |
++ const char *name; |
3264 |
++ |
3265 |
++ list_for_each_entry(tp_event, &ftrace_events, list) { |
3266 |
++ if (!tp_event->class->system || |
3267 |
++ strcmp(system, tp_event->class->system)) |
3268 |
++ continue; |
3269 |
++ name = trace_event_name(tp_event); |
3270 |
++ if (!name || strcmp(event_name, name)) |
3271 |
++ continue; |
3272 |
++ return tp_event; |
3273 |
++ } |
3274 |
++ |
3275 |
++ return NULL; |
3276 |
++} |
3277 |
++ |
3278 |
+ int trace_probe_register_event_call(struct trace_probe *tp) |
3279 |
+ { |
3280 |
+ struct trace_event_call *call = trace_probe_event_call(tp); |
3281 |
+ int ret; |
3282 |
+ |
3283 |
++ lockdep_assert_held(&event_mutex); |
3284 |
++ |
3285 |
++ if (find_trace_event_call(trace_probe_group_name(tp), |
3286 |
++ trace_probe_name(tp))) |
3287 |
++ return -EEXIST; |
3288 |
++ |
3289 |
+ ret = register_trace_event(&call->event); |
3290 |
+ if (!ret) |
3291 |
+ return -ENODEV; |
3292 |
+diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h |
3293 |
+index 2f703a20c724c..6d41e20c47ced 100644 |
3294 |
+--- a/kernel/trace/trace_probe.h |
3295 |
++++ b/kernel/trace/trace_probe.h |
3296 |
+@@ -398,6 +398,7 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call, |
3297 |
+ C(NO_EVENT_NAME, "Event name is not specified"), \ |
3298 |
+ C(EVENT_TOO_LONG, "Event name is too long"), \ |
3299 |
+ C(BAD_EVENT_NAME, "Event name must follow the same rules as C identifiers"), \ |
3300 |
++ C(EVENT_EXIST, "Given group/event name is already used by another event"), \ |
3301 |
+ C(RETVAL_ON_PROBE, "$retval is not available on probe"), \ |
3302 |
+ C(BAD_STACK_NUM, "Invalid stack number"), \ |
3303 |
+ C(BAD_ARG_NUM, "Invalid argument number"), \ |
3304 |
+diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c |
3305 |
+index 3cf7128e1ad30..0dd6e286e5196 100644 |
3306 |
+--- a/kernel/trace/trace_uprobe.c |
3307 |
++++ b/kernel/trace/trace_uprobe.c |
3308 |
+@@ -514,7 +514,11 @@ static int register_trace_uprobe(struct trace_uprobe *tu) |
3309 |
+ |
3310 |
+ ret = register_uprobe_event(tu); |
3311 |
+ if (ret) { |
3312 |
+- pr_warn("Failed to register probe event(%d)\n", ret); |
3313 |
++ if (ret == -EEXIST) { |
3314 |
++ trace_probe_log_set_index(0); |
3315 |
++ trace_probe_log_err(0, EVENT_EXIST); |
3316 |
++ } else |
3317 |
++ pr_warn("Failed to register probe event(%d)\n", ret); |
3318 |
+ goto end; |
3319 |
+ } |
3320 |
+ |
3321 |
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c |
3322 |
+index b9de2df5b8358..6275b1c05f111 100644 |
3323 |
+--- a/mm/memory_hotplug.c |
3324 |
++++ b/mm/memory_hotplug.c |
3325 |
+@@ -765,8 +765,8 @@ static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn |
3326 |
+ return movable_node_enabled ? movable_zone : kernel_zone; |
3327 |
+ } |
3328 |
+ |
3329 |
+-struct zone * zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, |
3330 |
+- unsigned long nr_pages) |
3331 |
++struct zone *zone_for_pfn_range(int online_type, int nid, |
3332 |
++ unsigned long start_pfn, unsigned long nr_pages) |
3333 |
+ { |
3334 |
+ if (online_type == MMOP_ONLINE_KERNEL) |
3335 |
+ return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages); |
3336 |
+diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c |
3337 |
+index 79b6a04d8eb61..42dc080a4dbbc 100644 |
3338 |
+--- a/net/caif/chnl_net.c |
3339 |
++++ b/net/caif/chnl_net.c |
3340 |
+@@ -53,20 +53,6 @@ struct chnl_net { |
3341 |
+ enum caif_states state; |
3342 |
+ }; |
3343 |
+ |
3344 |
+-static void robust_list_del(struct list_head *delete_node) |
3345 |
+-{ |
3346 |
+- struct list_head *list_node; |
3347 |
+- struct list_head *n; |
3348 |
+- ASSERT_RTNL(); |
3349 |
+- list_for_each_safe(list_node, n, &chnl_net_list) { |
3350 |
+- if (list_node == delete_node) { |
3351 |
+- list_del(list_node); |
3352 |
+- return; |
3353 |
+- } |
3354 |
+- } |
3355 |
+- WARN_ON(1); |
3356 |
+-} |
3357 |
+- |
3358 |
+ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt) |
3359 |
+ { |
3360 |
+ struct sk_buff *skb; |
3361 |
+@@ -369,6 +355,7 @@ static int chnl_net_init(struct net_device *dev) |
3362 |
+ ASSERT_RTNL(); |
3363 |
+ priv = netdev_priv(dev); |
3364 |
+ strncpy(priv->name, dev->name, sizeof(priv->name)); |
3365 |
++ INIT_LIST_HEAD(&priv->list_field); |
3366 |
+ return 0; |
3367 |
+ } |
3368 |
+ |
3369 |
+@@ -377,7 +364,7 @@ static void chnl_net_uninit(struct net_device *dev) |
3370 |
+ struct chnl_net *priv; |
3371 |
+ ASSERT_RTNL(); |
3372 |
+ priv = netdev_priv(dev); |
3373 |
+- robust_list_del(&priv->list_field); |
3374 |
++ list_del_init(&priv->list_field); |
3375 |
+ } |
3376 |
+ |
3377 |
+ static const struct net_device_ops netdev_ops = { |
3378 |
+@@ -542,7 +529,7 @@ static void __exit chnl_exit_module(void) |
3379 |
+ rtnl_lock(); |
3380 |
+ list_for_each_safe(list_node, _tmp, &chnl_net_list) { |
3381 |
+ dev = list_entry(list_node, struct chnl_net, list_field); |
3382 |
+- list_del(list_node); |
3383 |
++ list_del_init(list_node); |
3384 |
+ delete_device(dev); |
3385 |
+ } |
3386 |
+ rtnl_unlock(); |
3387 |
+diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c |
3388 |
+index c5c74a34d139d..91e7a22026971 100644 |
3389 |
+--- a/net/dccp/minisocks.c |
3390 |
++++ b/net/dccp/minisocks.c |
3391 |
+@@ -94,6 +94,8 @@ struct sock *dccp_create_openreq_child(const struct sock *sk, |
3392 |
+ newdp->dccps_role = DCCP_ROLE_SERVER; |
3393 |
+ newdp->dccps_hc_rx_ackvec = NULL; |
3394 |
+ newdp->dccps_service_list = NULL; |
3395 |
++ newdp->dccps_hc_rx_ccid = NULL; |
3396 |
++ newdp->dccps_hc_tx_ccid = NULL; |
3397 |
+ newdp->dccps_service = dreq->dreq_service; |
3398 |
+ newdp->dccps_timestamp_echo = dreq->dreq_timestamp_echo; |
3399 |
+ newdp->dccps_timestamp_time = dreq->dreq_timestamp_time; |
3400 |
+diff --git a/net/dsa/slave.c b/net/dsa/slave.c |
3401 |
+index 9281c9c6a253e..65b125bb3b860 100644 |
3402 |
+--- a/net/dsa/slave.c |
3403 |
++++ b/net/dsa/slave.c |
3404 |
+@@ -1728,13 +1728,11 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev) |
3405 |
+ * use the switch internal MDIO bus instead |
3406 |
+ */ |
3407 |
+ ret = dsa_slave_phy_connect(slave_dev, dp->index); |
3408 |
+- if (ret) { |
3409 |
+- netdev_err(slave_dev, |
3410 |
+- "failed to connect to port %d: %d\n", |
3411 |
+- dp->index, ret); |
3412 |
+- phylink_destroy(dp->pl); |
3413 |
+- return ret; |
3414 |
+- } |
3415 |
++ } |
3416 |
++ if (ret) { |
3417 |
++ netdev_err(slave_dev, "failed to connect to PHY: %pe\n", |
3418 |
++ ERR_PTR(ret)); |
3419 |
++ phylink_destroy(dp->pl); |
3420 |
+ } |
3421 |
+ |
3422 |
+ return ret; |
3423 |
+diff --git a/net/dsa/tag_rtl4_a.c b/net/dsa/tag_rtl4_a.c |
3424 |
+index e9176475bac89..24375ebd684e8 100644 |
3425 |
+--- a/net/dsa/tag_rtl4_a.c |
3426 |
++++ b/net/dsa/tag_rtl4_a.c |
3427 |
+@@ -54,9 +54,10 @@ static struct sk_buff *rtl4a_tag_xmit(struct sk_buff *skb, |
3428 |
+ p = (__be16 *)tag; |
3429 |
+ *p = htons(RTL4_A_ETHERTYPE); |
3430 |
+ |
3431 |
+- out = (RTL4_A_PROTOCOL_RTL8366RB << 12) | (2 << 8); |
3432 |
+- /* The lower bits is the port number */ |
3433 |
+- out |= (u8)dp->index; |
3434 |
++ out = (RTL4_A_PROTOCOL_RTL8366RB << RTL4_A_PROTOCOL_SHIFT) | (2 << 8); |
3435 |
++ /* The lower bits indicate the port number */ |
3436 |
++ out |= BIT(dp->index); |
3437 |
++ |
3438 |
+ p = (__be16 *)(tag + 2); |
3439 |
+ *p = htons(out); |
3440 |
+ |
3441 |
+diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c |
3442 |
+index 97b402b2d6fbd..80d2a00d30977 100644 |
3443 |
+--- a/net/ethtool/ioctl.c |
3444 |
++++ b/net/ethtool/ioctl.c |
3445 |
+@@ -906,7 +906,7 @@ static int ethtool_rxnfc_copy_to_user(void __user *useraddr, |
3446 |
+ rule_buf); |
3447 |
+ useraddr += offsetof(struct compat_ethtool_rxnfc, rule_locs); |
3448 |
+ } else { |
3449 |
+- ret = copy_to_user(useraddr, &rxnfc, size); |
3450 |
++ ret = copy_to_user(useraddr, rxnfc, size); |
3451 |
+ useraddr += offsetof(struct ethtool_rxnfc, rule_locs); |
3452 |
+ } |
3453 |
+ |
3454 |
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c |
3455 |
+index a0829495b211e..a9cc05043fa47 100644 |
3456 |
+--- a/net/ipv4/ip_gre.c |
3457 |
++++ b/net/ipv4/ip_gre.c |
3458 |
+@@ -468,8 +468,6 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, |
3459 |
+ |
3460 |
+ static int gre_handle_offloads(struct sk_buff *skb, bool csum) |
3461 |
+ { |
3462 |
+- if (csum && skb_checksum_start(skb) < skb->data) |
3463 |
+- return -EINVAL; |
3464 |
+ return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE); |
3465 |
+ } |
3466 |
+ |
3467 |
+@@ -627,15 +625,20 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb, |
3468 |
+ } |
3469 |
+ |
3470 |
+ if (dev->header_ops) { |
3471 |
++ const int pull_len = tunnel->hlen + sizeof(struct iphdr); |
3472 |
++ |
3473 |
+ if (skb_cow_head(skb, 0)) |
3474 |
+ goto free_skb; |
3475 |
+ |
3476 |
+ tnl_params = (const struct iphdr *)skb->data; |
3477 |
+ |
3478 |
++ if (pull_len > skb_transport_offset(skb)) |
3479 |
++ goto free_skb; |
3480 |
++ |
3481 |
+ /* Pull skb since ip_tunnel_xmit() needs skb->data pointing |
3482 |
+ * to gre header. |
3483 |
+ */ |
3484 |
+- skb_pull(skb, tunnel->hlen + sizeof(struct iphdr)); |
3485 |
++ skb_pull(skb, pull_len); |
3486 |
+ skb_reset_mac_header(skb); |
3487 |
+ } else { |
3488 |
+ if (skb_cow_head(skb, dev->needed_headroom)) |
3489 |
+diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c |
3490 |
+index f2d313c5900df..1075cc2136ac6 100644 |
3491 |
+--- a/net/ipv4/nexthop.c |
3492 |
++++ b/net/ipv4/nexthop.c |
3493 |
+@@ -1303,6 +1303,7 @@ static int nh_create_ipv4(struct net *net, struct nexthop *nh, |
3494 |
+ .fc_gw4 = cfg->gw.ipv4, |
3495 |
+ .fc_gw_family = cfg->gw.ipv4 ? AF_INET : 0, |
3496 |
+ .fc_flags = cfg->nh_flags, |
3497 |
++ .fc_nlinfo = cfg->nlinfo, |
3498 |
+ .fc_encap = cfg->nh_encap, |
3499 |
+ .fc_encap_type = cfg->nh_encap_type, |
3500 |
+ }; |
3501 |
+@@ -1341,6 +1342,7 @@ static int nh_create_ipv6(struct net *net, struct nexthop *nh, |
3502 |
+ .fc_ifindex = cfg->nh_ifindex, |
3503 |
+ .fc_gateway = cfg->gw.ipv6, |
3504 |
+ .fc_flags = cfg->nh_flags, |
3505 |
++ .fc_nlinfo = cfg->nlinfo, |
3506 |
+ .fc_encap = cfg->nh_encap, |
3507 |
+ .fc_encap_type = cfg->nh_encap_type, |
3508 |
+ .fc_is_fdb = cfg->nh_fdb, |
3509 |
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c |
3510 |
+index ac8d38e044002..991e3434957b8 100644 |
3511 |
+--- a/net/ipv4/tcp_input.c |
3512 |
++++ b/net/ipv4/tcp_input.c |
3513 |
+@@ -1314,7 +1314,7 @@ static u8 tcp_sacktag_one(struct sock *sk, |
3514 |
+ if (dup_sack && (sacked & TCPCB_RETRANS)) { |
3515 |
+ if (tp->undo_marker && tp->undo_retrans > 0 && |
3516 |
+ after(end_seq, tp->undo_marker)) |
3517 |
+- tp->undo_retrans--; |
3518 |
++ tp->undo_retrans = max_t(int, 0, tp->undo_retrans - pcount); |
3519 |
+ if ((sacked & TCPCB_SACKED_ACKED) && |
3520 |
+ before(start_seq, state->reord)) |
3521 |
+ state->reord = start_seq; |
3522 |
+diff --git a/net/ipv4/udp_tunnel_nic.c b/net/ipv4/udp_tunnel_nic.c |
3523 |
+index 0d122edc368dd..b91003538d87a 100644 |
3524 |
+--- a/net/ipv4/udp_tunnel_nic.c |
3525 |
++++ b/net/ipv4/udp_tunnel_nic.c |
3526 |
+@@ -935,7 +935,7 @@ static int __init udp_tunnel_nic_init_module(void) |
3527 |
+ { |
3528 |
+ int err; |
3529 |
+ |
3530 |
+- udp_tunnel_nic_workqueue = alloc_workqueue("udp_tunnel_nic", 0, 0); |
3531 |
++ udp_tunnel_nic_workqueue = alloc_ordered_workqueue("udp_tunnel_nic", 0); |
3532 |
+ if (!udp_tunnel_nic_workqueue) |
3533 |
+ return -ENOMEM; |
3534 |
+ |
3535 |
+diff --git a/net/ipv6/netfilter/nf_socket_ipv6.c b/net/ipv6/netfilter/nf_socket_ipv6.c |
3536 |
+index 6fd54744cbc38..aa5bb8789ba0b 100644 |
3537 |
+--- a/net/ipv6/netfilter/nf_socket_ipv6.c |
3538 |
++++ b/net/ipv6/netfilter/nf_socket_ipv6.c |
3539 |
+@@ -99,7 +99,7 @@ struct sock *nf_sk_lookup_slow_v6(struct net *net, const struct sk_buff *skb, |
3540 |
+ { |
3541 |
+ __be16 dport, sport; |
3542 |
+ const struct in6_addr *daddr = NULL, *saddr = NULL; |
3543 |
+- struct ipv6hdr *iph = ipv6_hdr(skb); |
3544 |
++ struct ipv6hdr *iph = ipv6_hdr(skb), ipv6_var; |
3545 |
+ struct sk_buff *data_skb = NULL; |
3546 |
+ int doff = 0; |
3547 |
+ int thoff = 0, tproto; |
3548 |
+@@ -129,8 +129,6 @@ struct sock *nf_sk_lookup_slow_v6(struct net *net, const struct sk_buff *skb, |
3549 |
+ thoff + sizeof(*hp); |
3550 |
+ |
3551 |
+ } else if (tproto == IPPROTO_ICMPV6) { |
3552 |
+- struct ipv6hdr ipv6_var; |
3553 |
+- |
3554 |
+ if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr, |
3555 |
+ &sport, &dport, &ipv6_var)) |
3556 |
+ return NULL; |
3557 |
+diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c |
3558 |
+index 203890e378cb0..561b6d67ab8b9 100644 |
3559 |
+--- a/net/l2tp/l2tp_core.c |
3560 |
++++ b/net/l2tp/l2tp_core.c |
3561 |
+@@ -869,8 +869,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb) |
3562 |
+ } |
3563 |
+ |
3564 |
+ if (tunnel->version == L2TP_HDR_VER_3 && |
3565 |
+- l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) |
3566 |
++ l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) { |
3567 |
++ l2tp_session_dec_refcount(session); |
3568 |
+ goto invalid; |
3569 |
++ } |
3570 |
+ |
3571 |
+ l2tp_recv_common(session, skb, ptr, optr, hdrflags, length); |
3572 |
+ l2tp_session_dec_refcount(session); |
3573 |
+diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c |
3574 |
+index b3f4a334f9d78..94001eb51ffe4 100644 |
3575 |
+--- a/net/netfilter/nf_conntrack_proto_dccp.c |
3576 |
++++ b/net/netfilter/nf_conntrack_proto_dccp.c |
3577 |
+@@ -397,6 +397,7 @@ dccp_new(struct nf_conn *ct, const struct sk_buff *skb, |
3578 |
+ msg = "not picking up existing connection "; |
3579 |
+ goto out_invalid; |
3580 |
+ } |
3581 |
++ break; |
3582 |
+ case CT_DCCP_REQUEST: |
3583 |
+ break; |
3584 |
+ case CT_DCCP_INVALID: |
3585 |
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c |
3586 |
+index 2b5f97e1d40b9..c605a3e713e76 100644 |
3587 |
+--- a/net/netfilter/nf_tables_api.c |
3588 |
++++ b/net/netfilter/nf_tables_api.c |
3589 |
+@@ -8394,6 +8394,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx, |
3590 |
+ data->verdict.chain); |
3591 |
+ if (err < 0) |
3592 |
+ return err; |
3593 |
++ break; |
3594 |
+ default: |
3595 |
+ break; |
3596 |
+ } |
3597 |
+diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c |
3598 |
+index 70d46e0bbf064..7fcb73ac2e6ed 100644 |
3599 |
+--- a/net/netfilter/nft_ct.c |
3600 |
++++ b/net/netfilter/nft_ct.c |
3601 |
+@@ -41,6 +41,7 @@ struct nft_ct_helper_obj { |
3602 |
+ #ifdef CONFIG_NF_CONNTRACK_ZONES |
3603 |
+ static DEFINE_PER_CPU(struct nf_conn *, nft_ct_pcpu_template); |
3604 |
+ static unsigned int nft_ct_pcpu_template_refcnt __read_mostly; |
3605 |
++static DEFINE_MUTEX(nft_ct_pcpu_mutex); |
3606 |
+ #endif |
3607 |
+ |
3608 |
+ static u64 nft_ct_get_eval_counter(const struct nf_conn_counter *c, |
3609 |
+@@ -526,8 +527,11 @@ static void __nft_ct_set_destroy(const struct nft_ctx *ctx, struct nft_ct *priv) |
3610 |
+ #endif |
3611 |
+ #ifdef CONFIG_NF_CONNTRACK_ZONES |
3612 |
+ case NFT_CT_ZONE: |
3613 |
++ mutex_lock(&nft_ct_pcpu_mutex); |
3614 |
+ if (--nft_ct_pcpu_template_refcnt == 0) |
3615 |
+ nft_ct_tmpl_put_pcpu(); |
3616 |
++ mutex_unlock(&nft_ct_pcpu_mutex); |
3617 |
++ break; |
3618 |
+ #endif |
3619 |
+ default: |
3620 |
+ break; |
3621 |
+@@ -564,9 +568,13 @@ static int nft_ct_set_init(const struct nft_ctx *ctx, |
3622 |
+ #endif |
3623 |
+ #ifdef CONFIG_NF_CONNTRACK_ZONES |
3624 |
+ case NFT_CT_ZONE: |
3625 |
+- if (!nft_ct_tmpl_alloc_pcpu()) |
3626 |
++ mutex_lock(&nft_ct_pcpu_mutex); |
3627 |
++ if (!nft_ct_tmpl_alloc_pcpu()) { |
3628 |
++ mutex_unlock(&nft_ct_pcpu_mutex); |
3629 |
+ return -ENOMEM; |
3630 |
++ } |
3631 |
+ nft_ct_pcpu_template_refcnt++; |
3632 |
++ mutex_unlock(&nft_ct_pcpu_mutex); |
3633 |
+ len = sizeof(u16); |
3634 |
+ break; |
3635 |
+ #endif |
3636 |
+diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c |
3637 |
+index bbd5f87536006..99e8db2621984 100644 |
3638 |
+--- a/net/sched/sch_fq_codel.c |
3639 |
++++ b/net/sched/sch_fq_codel.c |
3640 |
+@@ -369,6 +369,7 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt, |
3641 |
+ { |
3642 |
+ struct fq_codel_sched_data *q = qdisc_priv(sch); |
3643 |
+ struct nlattr *tb[TCA_FQ_CODEL_MAX + 1]; |
3644 |
++ u32 quantum = 0; |
3645 |
+ int err; |
3646 |
+ |
3647 |
+ if (!opt) |
3648 |
+@@ -386,6 +387,13 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt, |
3649 |
+ q->flows_cnt > 65536) |
3650 |
+ return -EINVAL; |
3651 |
+ } |
3652 |
++ if (tb[TCA_FQ_CODEL_QUANTUM]) { |
3653 |
++ quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM])); |
3654 |
++ if (quantum > FQ_CODEL_QUANTUM_MAX) { |
3655 |
++ NL_SET_ERR_MSG(extack, "Invalid quantum"); |
3656 |
++ return -EINVAL; |
3657 |
++ } |
3658 |
++ } |
3659 |
+ sch_tree_lock(sch); |
3660 |
+ |
3661 |
+ if (tb[TCA_FQ_CODEL_TARGET]) { |
3662 |
+@@ -412,8 +420,8 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt, |
3663 |
+ if (tb[TCA_FQ_CODEL_ECN]) |
3664 |
+ q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]); |
3665 |
+ |
3666 |
+- if (tb[TCA_FQ_CODEL_QUANTUM]) |
3667 |
+- q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM])); |
3668 |
++ if (quantum) |
3669 |
++ q->quantum = quantum; |
3670 |
+ |
3671 |
+ if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]) |
3672 |
+ q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])); |
3673 |
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c |
3674 |
+index 963047c57c27b..ce957ee5383c4 100644 |
3675 |
+--- a/net/tipc/socket.c |
3676 |
++++ b/net/tipc/socket.c |
3677 |
+@@ -1980,10 +1980,12 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m, |
3678 |
+ tipc_node_distr_xmit(sock_net(sk), &xmitq); |
3679 |
+ } |
3680 |
+ |
3681 |
+- if (!skb_cb->bytes_read) |
3682 |
+- tsk_advance_rx_queue(sk); |
3683 |
++ if (skb_cb->bytes_read) |
3684 |
++ goto exit; |
3685 |
++ |
3686 |
++ tsk_advance_rx_queue(sk); |
3687 |
+ |
3688 |
+- if (likely(!connected) || skb_cb->bytes_read) |
3689 |
++ if (likely(!connected)) |
3690 |
+ goto exit; |
3691 |
+ |
3692 |
+ /* Send connection flow control advertisement when applicable */ |
3693 |
+@@ -2420,7 +2422,7 @@ static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) |
3694 |
+ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, |
3695 |
+ u32 dport, struct sk_buff_head *xmitq) |
3696 |
+ { |
3697 |
+- unsigned long time_limit = jiffies + 2; |
3698 |
++ unsigned long time_limit = jiffies + usecs_to_jiffies(20000); |
3699 |
+ struct sk_buff *skb; |
3700 |
+ unsigned int lim; |
3701 |
+ atomic_t *dcnt; |
3702 |
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c |
3703 |
+index 37ffa7725cee2..d5c0ae34b1e45 100644 |
3704 |
+--- a/net/unix/af_unix.c |
3705 |
++++ b/net/unix/af_unix.c |
3706 |
+@@ -2769,7 +2769,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock, |
3707 |
+ |
3708 |
+ other = unix_peer(sk); |
3709 |
+ if (other && unix_peer(other) != sk && |
3710 |
+- unix_recvq_full(other) && |
3711 |
++ unix_recvq_full_lockless(other) && |
3712 |
+ unix_dgram_peer_wake_me(sk, other)) |
3713 |
+ writable = 0; |
3714 |
+ |
3715 |
+diff --git a/scripts/clang-tools/gen_compile_commands.py b/scripts/clang-tools/gen_compile_commands.py |
3716 |
+index 8ddb5d099029f..8bf55bb4f515c 100755 |
3717 |
+--- a/scripts/clang-tools/gen_compile_commands.py |
3718 |
++++ b/scripts/clang-tools/gen_compile_commands.py |
3719 |
+@@ -13,6 +13,7 @@ import logging |
3720 |
+ import os |
3721 |
+ import re |
3722 |
+ import subprocess |
3723 |
++import sys |
3724 |
+ |
3725 |
+ _DEFAULT_OUTPUT = 'compile_commands.json' |
3726 |
+ _DEFAULT_LOG_LEVEL = 'WARNING' |
3727 |
+diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config |
3728 |
+index 2abbd75fbf2e3..014b959575cae 100644 |
3729 |
+--- a/tools/perf/Makefile.config |
3730 |
++++ b/tools/perf/Makefile.config |
3731 |
+@@ -127,10 +127,10 @@ FEATURE_CHECK_LDFLAGS-libunwind = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS) |
3732 |
+ FEATURE_CHECK_CFLAGS-libunwind-debug-frame = $(LIBUNWIND_CFLAGS) |
3733 |
+ FEATURE_CHECK_LDFLAGS-libunwind-debug-frame = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS) |
3734 |
+ |
3735 |
+-FEATURE_CHECK_LDFLAGS-libunwind-arm = -lunwind -lunwind-arm |
3736 |
+-FEATURE_CHECK_LDFLAGS-libunwind-aarch64 = -lunwind -lunwind-aarch64 |
3737 |
+-FEATURE_CHECK_LDFLAGS-libunwind-x86 = -lunwind -llzma -lunwind-x86 |
3738 |
+-FEATURE_CHECK_LDFLAGS-libunwind-x86_64 = -lunwind -llzma -lunwind-x86_64 |
3739 |
++FEATURE_CHECK_LDFLAGS-libunwind-arm += -lunwind -lunwind-arm |
3740 |
++FEATURE_CHECK_LDFLAGS-libunwind-aarch64 += -lunwind -lunwind-aarch64 |
3741 |
++FEATURE_CHECK_LDFLAGS-libunwind-x86 += -lunwind -llzma -lunwind-x86 |
3742 |
++FEATURE_CHECK_LDFLAGS-libunwind-x86_64 += -lunwind -llzma -lunwind-x86_64 |
3743 |
+ |
3744 |
+ FEATURE_CHECK_LDFLAGS-libcrypto = -lcrypto |
3745 |
+ |
3746 |
+diff --git a/tools/perf/bench/inject-buildid.c b/tools/perf/bench/inject-buildid.c |
3747 |
+index 280227e3ffd7a..f4ec01da8da68 100644 |
3748 |
+--- a/tools/perf/bench/inject-buildid.c |
3749 |
++++ b/tools/perf/bench/inject-buildid.c |
3750 |
+@@ -133,7 +133,7 @@ static u64 dso_map_addr(struct bench_dso *dso) |
3751 |
+ return 0x400000ULL + dso->ino * 8192ULL; |
3752 |
+ } |
3753 |
+ |
3754 |
+-static u32 synthesize_attr(struct bench_data *data) |
3755 |
++static ssize_t synthesize_attr(struct bench_data *data) |
3756 |
+ { |
3757 |
+ union perf_event event; |
3758 |
+ |
3759 |
+@@ -151,7 +151,7 @@ static u32 synthesize_attr(struct bench_data *data) |
3760 |
+ return writen(data->input_pipe[1], &event, event.header.size); |
3761 |
+ } |
3762 |
+ |
3763 |
+-static u32 synthesize_fork(struct bench_data *data) |
3764 |
++static ssize_t synthesize_fork(struct bench_data *data) |
3765 |
+ { |
3766 |
+ union perf_event event; |
3767 |
+ |
3768 |
+@@ -169,8 +169,7 @@ static u32 synthesize_fork(struct bench_data *data) |
3769 |
+ return writen(data->input_pipe[1], &event, event.header.size); |
3770 |
+ } |
3771 |
+ |
3772 |
+-static u32 synthesize_mmap(struct bench_data *data, struct bench_dso *dso, |
3773 |
+- u64 timestamp) |
3774 |
++static ssize_t synthesize_mmap(struct bench_data *data, struct bench_dso *dso, u64 timestamp) |
3775 |
+ { |
3776 |
+ union perf_event event; |
3777 |
+ size_t len = offsetof(struct perf_record_mmap2, filename); |
3778 |
+@@ -198,23 +197,25 @@ static u32 synthesize_mmap(struct bench_data *data, struct bench_dso *dso, |
3779 |
+ |
3780 |
+ if (len > sizeof(event.mmap2)) { |
3781 |
+ /* write mmap2 event first */ |
3782 |
+- writen(data->input_pipe[1], &event, len - bench_id_hdr_size); |
3783 |
++ if (writen(data->input_pipe[1], &event, len - bench_id_hdr_size) < 0) |
3784 |
++ return -1; |
3785 |
+ /* zero-fill sample id header */ |
3786 |
+ memset(id_hdr_ptr, 0, bench_id_hdr_size); |
3787 |
+ /* put timestamp in the right position */ |
3788 |
+ ts_idx = (bench_id_hdr_size / sizeof(u64)) - 2; |
3789 |
+ id_hdr_ptr[ts_idx] = timestamp; |
3790 |
+- writen(data->input_pipe[1], id_hdr_ptr, bench_id_hdr_size); |
3791 |
+- } else { |
3792 |
+- ts_idx = (len / sizeof(u64)) - 2; |
3793 |
+- id_hdr_ptr[ts_idx] = timestamp; |
3794 |
+- writen(data->input_pipe[1], &event, len); |
3795 |
++ if (writen(data->input_pipe[1], id_hdr_ptr, bench_id_hdr_size) < 0) |
3796 |
++ return -1; |
3797 |
++ |
3798 |
++ return len; |
3799 |
+ } |
3800 |
+- return len; |
3801 |
++ |
3802 |
++ ts_idx = (len / sizeof(u64)) - 2; |
3803 |
++ id_hdr_ptr[ts_idx] = timestamp; |
3804 |
++ return writen(data->input_pipe[1], &event, len); |
3805 |
+ } |
3806 |
+ |
3807 |
+-static u32 synthesize_sample(struct bench_data *data, struct bench_dso *dso, |
3808 |
+- u64 timestamp) |
3809 |
++static ssize_t synthesize_sample(struct bench_data *data, struct bench_dso *dso, u64 timestamp) |
3810 |
+ { |
3811 |
+ union perf_event event; |
3812 |
+ struct perf_sample sample = { |
3813 |
+@@ -233,7 +234,7 @@ static u32 synthesize_sample(struct bench_data *data, struct bench_dso *dso, |
3814 |
+ return writen(data->input_pipe[1], &event, event.header.size); |
3815 |
+ } |
3816 |
+ |
3817 |
+-static u32 synthesize_flush(struct bench_data *data) |
3818 |
++static ssize_t synthesize_flush(struct bench_data *data) |
3819 |
+ { |
3820 |
+ struct perf_event_header header = { |
3821 |
+ .size = sizeof(header), |
3822 |
+@@ -348,14 +349,16 @@ static int inject_build_id(struct bench_data *data, u64 *max_rss) |
3823 |
+ int status; |
3824 |
+ unsigned int i, k; |
3825 |
+ struct rusage rusage; |
3826 |
+- u64 len = 0; |
3827 |
+ |
3828 |
+ /* this makes the child to run */ |
3829 |
+ if (perf_header__write_pipe(data->input_pipe[1]) < 0) |
3830 |
+ return -1; |
3831 |
+ |
3832 |
+- len += synthesize_attr(data); |
3833 |
+- len += synthesize_fork(data); |
3834 |
++ if (synthesize_attr(data) < 0) |
3835 |
++ return -1; |
3836 |
++ |
3837 |
++ if (synthesize_fork(data) < 0) |
3838 |
++ return -1; |
3839 |
+ |
3840 |
+ for (i = 0; i < nr_mmaps; i++) { |
3841 |
+ int idx = rand() % (nr_dsos - 1); |
3842 |
+@@ -363,13 +366,18 @@ static int inject_build_id(struct bench_data *data, u64 *max_rss) |
3843 |
+ u64 timestamp = rand() % 1000000; |
3844 |
+ |
3845 |
+ pr_debug2(" [%d] injecting: %s\n", i+1, dso->name); |
3846 |
+- len += synthesize_mmap(data, dso, timestamp); |
3847 |
++ if (synthesize_mmap(data, dso, timestamp) < 0) |
3848 |
++ return -1; |
3849 |
+ |
3850 |
+- for (k = 0; k < nr_samples; k++) |
3851 |
+- len += synthesize_sample(data, dso, timestamp + k * 1000); |
3852 |
++ for (k = 0; k < nr_samples; k++) { |
3853 |
++ if (synthesize_sample(data, dso, timestamp + k * 1000) < 0) |
3854 |
++ return -1; |
3855 |
++ } |
3856 |
+ |
3857 |
+- if ((i + 1) % 10 == 0) |
3858 |
+- len += synthesize_flush(data); |
3859 |
++ if ((i + 1) % 10 == 0) { |
3860 |
++ if (synthesize_flush(data) < 0) |
3861 |
++ return -1; |
3862 |
++ } |
3863 |
+ } |
3864 |
+ |
3865 |
+ /* tihs makes the child to finish */ |
3866 |
+diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c |
3867 |
+index 74bf480aa4f05..df515cd8d0184 100644 |
3868 |
+--- a/tools/perf/util/machine.c |
3869 |
++++ b/tools/perf/util/machine.c |
3870 |
+@@ -2100,6 +2100,7 @@ static int add_callchain_ip(struct thread *thread, |
3871 |
+ |
3872 |
+ al.filtered = 0; |
3873 |
+ al.sym = NULL; |
3874 |
++ al.srcline = NULL; |
3875 |
+ if (!cpumode) { |
3876 |
+ thread__find_cpumode_addr_location(thread, ip, &al); |
3877 |
+ } else { |
3878 |
+diff --git a/tools/testing/selftests/net/altnames.sh b/tools/testing/selftests/net/altnames.sh |
3879 |
+index 4254ddc3f70b5..1ef9e4159bba8 100755 |
3880 |
+--- a/tools/testing/selftests/net/altnames.sh |
3881 |
++++ b/tools/testing/selftests/net/altnames.sh |
3882 |
+@@ -45,7 +45,7 @@ altnames_test() |
3883 |
+ check_err $? "Got unexpected long alternative name from link show JSON" |
3884 |
+ |
3885 |
+ ip link property del $DUMMY_DEV altname $SHORT_NAME |
3886 |
+- check_err $? "Failed to add short alternative name" |
3887 |
++ check_err $? "Failed to delete short alternative name" |
3888 |
+ |
3889 |
+ ip -j -p link show $SHORT_NAME &>/dev/null |
3890 |
+ check_fail $? "Unexpected success while trying to do link show with deleted short alternative name" |
3891 |
+diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh |
3892 |
+index 2f649b431456a..8fcb289278182 100755 |
3893 |
+--- a/tools/testing/selftests/net/mptcp/simult_flows.sh |
3894 |
++++ b/tools/testing/selftests/net/mptcp/simult_flows.sh |
3895 |
+@@ -21,8 +21,8 @@ usage() { |
3896 |
+ |
3897 |
+ cleanup() |
3898 |
+ { |
3899 |
+- rm -f "$cin" "$cout" |
3900 |
+- rm -f "$sin" "$sout" |
3901 |
++ rm -f "$cout" "$sout" |
3902 |
++ rm -f "$large" "$small" |
3903 |
+ rm -f "$capout" |
3904 |
+ |
3905 |
+ local netns |