1 |
commit: aec749e76d0ff102281e67c557ca37523aeaaaec |
2 |
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
3 |
AuthorDate: Sat Feb 3 21:21:20 2018 +0000 |
4 |
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
5 |
CommitDate: Sat Feb 3 21:21:20 2018 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=aec749e7 |
7 |
|
8 |
Linux patch 4.14.17 |
9 |
|
10 |
0000_README | 8 + |
11 |
1016_linux-4.14.17.patch | 5960 ++++++++++++++++++++++++++++++++++++++++++++++ |
12 |
2 files changed, 5968 insertions(+) |
13 |
|
14 |
diff --git a/0000_README b/0000_README |
15 |
index 8311794..a02a4f5 100644 |
16 |
--- a/0000_README |
17 |
+++ b/0000_README |
18 |
@@ -107,6 +107,14 @@ Patch: 1015_linux-4.14.16.patch |
19 |
From: http://www.kernel.org |
20 |
Desc: Linux 4.14.16 |
21 |
|
22 |
+Patch: 1016_linux-4.14.17.patch |
23 |
+From: http://www.kernel.org |
24 |
+Desc: Linux 4.14.17 |
25 |
+ |
26 |
+Patch: 1017_linux-4.14.18.patch |
27 |
+From: http://www.kernel.org |
28 |
+Desc: Linux 4.14.18 |
29 |
+ |
30 |
Patch: 1500_XATTR_USER_PREFIX.patch |
31 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
32 |
Desc: Support for namespace user.pax.* on tmpfs. |
33 |
|
34 |
diff --git a/1016_linux-4.14.17.patch b/1016_linux-4.14.17.patch |
35 |
new file mode 100644 |
36 |
index 0000000..8d3dee3 |
37 |
--- /dev/null |
38 |
+++ b/1016_linux-4.14.17.patch |
39 |
@@ -0,0 +1,5960 @@ |
40 |
+diff --git a/Makefile b/Makefile |
41 |
+index 90a4bffa8446..7ed993896dd5 100644 |
42 |
+--- a/Makefile |
43 |
++++ b/Makefile |
44 |
+@@ -1,7 +1,7 @@ |
45 |
+ # SPDX-License-Identifier: GPL-2.0 |
46 |
+ VERSION = 4 |
47 |
+ PATCHLEVEL = 14 |
48 |
+-SUBLEVEL = 16 |
49 |
++SUBLEVEL = 17 |
50 |
+ EXTRAVERSION = |
51 |
+ NAME = Petit Gorille |
52 |
+ |
53 |
+diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi |
54 |
+index dff66974feed..d5f5e92e7488 100644 |
55 |
+--- a/arch/arm/boot/dts/bcm-nsp.dtsi |
56 |
++++ b/arch/arm/boot/dts/bcm-nsp.dtsi |
57 |
+@@ -85,7 +85,7 @@ |
58 |
+ timer@20200 { |
59 |
+ compatible = "arm,cortex-a9-global-timer"; |
60 |
+ reg = <0x20200 0x100>; |
61 |
+- interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>; |
62 |
++ interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>; |
63 |
+ clocks = <&periph_clk>; |
64 |
+ }; |
65 |
+ |
66 |
+@@ -93,7 +93,7 @@ |
67 |
+ compatible = "arm,cortex-a9-twd-timer"; |
68 |
+ reg = <0x20600 0x20>; |
69 |
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | |
70 |
+- IRQ_TYPE_LEVEL_HIGH)>; |
71 |
++ IRQ_TYPE_EDGE_RISING)>; |
72 |
+ clocks = <&periph_clk>; |
73 |
+ }; |
74 |
+ |
75 |
+diff --git a/arch/arm/boot/dts/bcm958623hr.dts b/arch/arm/boot/dts/bcm958623hr.dts |
76 |
+index 3bc50849d013..b8bde13de90a 100644 |
77 |
+--- a/arch/arm/boot/dts/bcm958623hr.dts |
78 |
++++ b/arch/arm/boot/dts/bcm958623hr.dts |
79 |
+@@ -141,10 +141,6 @@ |
80 |
+ status = "okay"; |
81 |
+ }; |
82 |
+ |
83 |
+-&sata { |
84 |
+- status = "okay"; |
85 |
+-}; |
86 |
+- |
87 |
+ &qspi { |
88 |
+ bspi-sel = <0>; |
89 |
+ flash: m25p80@0 { |
90 |
+diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts |
91 |
+index d94d14b3c745..6a44b8021702 100644 |
92 |
+--- a/arch/arm/boot/dts/bcm958625hr.dts |
93 |
++++ b/arch/arm/boot/dts/bcm958625hr.dts |
94 |
+@@ -177,10 +177,6 @@ |
95 |
+ status = "okay"; |
96 |
+ }; |
97 |
+ |
98 |
+-&sata { |
99 |
+- status = "okay"; |
100 |
+-}; |
101 |
+- |
102 |
+ &srab { |
103 |
+ compatible = "brcm,bcm58625-srab", "brcm,nsp-srab"; |
104 |
+ status = "okay"; |
105 |
+diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c |
106 |
+index d535edc01434..75fdeaa8c62f 100644 |
107 |
+--- a/arch/mips/kvm/mips.c |
108 |
++++ b/arch/mips/kvm/mips.c |
109 |
+@@ -445,10 +445,8 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
110 |
+ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) |
111 |
+ { |
112 |
+ int r = -EINTR; |
113 |
+- sigset_t sigsaved; |
114 |
+ |
115 |
+- if (vcpu->sigset_active) |
116 |
+- sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); |
117 |
++ kvm_sigset_activate(vcpu); |
118 |
+ |
119 |
+ if (vcpu->mmio_needed) { |
120 |
+ if (!vcpu->mmio_is_write) |
121 |
+@@ -480,8 +478,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) |
122 |
+ local_irq_enable(); |
123 |
+ |
124 |
+ out: |
125 |
+- if (vcpu->sigset_active) |
126 |
+- sigprocmask(SIG_SETMASK, &sigsaved, NULL); |
127 |
++ kvm_sigset_deactivate(vcpu); |
128 |
+ |
129 |
+ return r; |
130 |
+ } |
131 |
+diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c |
132 |
+index ee279c7f4802..2b02d51d14d8 100644 |
133 |
+--- a/arch/powerpc/kvm/powerpc.c |
134 |
++++ b/arch/powerpc/kvm/powerpc.c |
135 |
+@@ -1407,7 +1407,6 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) |
136 |
+ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) |
137 |
+ { |
138 |
+ int r; |
139 |
+- sigset_t sigsaved; |
140 |
+ |
141 |
+ if (vcpu->mmio_needed) { |
142 |
+ vcpu->mmio_needed = 0; |
143 |
+@@ -1448,16 +1447,14 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) |
144 |
+ #endif |
145 |
+ } |
146 |
+ |
147 |
+- if (vcpu->sigset_active) |
148 |
+- sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); |
149 |
++ kvm_sigset_activate(vcpu); |
150 |
+ |
151 |
+ if (run->immediate_exit) |
152 |
+ r = -EINTR; |
153 |
+ else |
154 |
+ r = kvmppc_vcpu_run(run, vcpu); |
155 |
+ |
156 |
+- if (vcpu->sigset_active) |
157 |
+- sigprocmask(SIG_SETMASK, &sigsaved, NULL); |
158 |
++ kvm_sigset_deactivate(vcpu); |
159 |
+ |
160 |
+ return r; |
161 |
+ } |
162 |
+diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h |
163 |
+index 43607bb12cc2..a6cc744ff5fb 100644 |
164 |
+--- a/arch/s390/include/asm/mmu_context.h |
165 |
++++ b/arch/s390/include/asm/mmu_context.h |
166 |
+@@ -28,7 +28,7 @@ static inline int init_new_context(struct task_struct *tsk, |
167 |
+ #ifdef CONFIG_PGSTE |
168 |
+ mm->context.alloc_pgste = page_table_allocate_pgste || |
169 |
+ test_thread_flag(TIF_PGSTE) || |
170 |
+- current->mm->context.alloc_pgste; |
171 |
++ (current->mm && current->mm->context.alloc_pgste); |
172 |
+ mm->context.has_pgste = 0; |
173 |
+ mm->context.use_skey = 0; |
174 |
+ mm->context.use_cmma = 0; |
175 |
+diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h |
176 |
+index 55de4eb73604..de0a8b17bcaa 100644 |
177 |
+--- a/arch/s390/include/asm/topology.h |
178 |
++++ b/arch/s390/include/asm/topology.h |
179 |
+@@ -51,6 +51,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu); |
180 |
+ static inline void topology_init_early(void) { } |
181 |
+ static inline void topology_schedule_update(void) { } |
182 |
+ static inline int topology_cpu_init(struct cpu *cpu) { return 0; } |
183 |
++static inline int topology_cpu_dedicated(int cpu_nr) { return 0; } |
184 |
+ static inline void topology_expect_change(void) { } |
185 |
+ |
186 |
+ #endif /* CONFIG_SCHED_TOPOLOGY */ |
187 |
+diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c |
188 |
+index 092c4154abd7..7ffaf9fd6d19 100644 |
189 |
+--- a/arch/s390/kernel/smp.c |
190 |
++++ b/arch/s390/kernel/smp.c |
191 |
+@@ -54,6 +54,7 @@ |
192 |
+ #include <asm/sigp.h> |
193 |
+ #include <asm/idle.h> |
194 |
+ #include <asm/nmi.h> |
195 |
++#include <asm/topology.h> |
196 |
+ #include "entry.h" |
197 |
+ |
198 |
+ enum { |
199 |
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c |
200 |
+index 6c88cb18ace2..6e3d80b2048e 100644 |
201 |
+--- a/arch/s390/kvm/kvm-s390.c |
202 |
++++ b/arch/s390/kvm/kvm-s390.c |
203 |
+@@ -3378,7 +3378,6 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
204 |
+ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
205 |
+ { |
206 |
+ int rc; |
207 |
+- sigset_t sigsaved; |
208 |
+ |
209 |
+ if (kvm_run->immediate_exit) |
210 |
+ return -EINTR; |
211 |
+@@ -3388,8 +3387,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
212 |
+ return 0; |
213 |
+ } |
214 |
+ |
215 |
+- if (vcpu->sigset_active) |
216 |
+- sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); |
217 |
++ kvm_sigset_activate(vcpu); |
218 |
+ |
219 |
+ if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { |
220 |
+ kvm_s390_vcpu_start(vcpu); |
221 |
+@@ -3423,8 +3421,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
222 |
+ disable_cpu_timer_accounting(vcpu); |
223 |
+ store_regs(vcpu, kvm_run); |
224 |
+ |
225 |
+- if (vcpu->sigset_active) |
226 |
+- sigprocmask(SIG_SETMASK, &sigsaved, NULL); |
227 |
++ kvm_sigset_deactivate(vcpu); |
228 |
+ |
229 |
+ vcpu->stat.exit_userspace++; |
230 |
+ return rc; |
231 |
+diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S |
232 |
+index 3d09e3aca18d..12e8484a8ee7 100644 |
233 |
+--- a/arch/x86/crypto/aesni-intel_asm.S |
234 |
++++ b/arch/x86/crypto/aesni-intel_asm.S |
235 |
+@@ -90,30 +90,6 @@ SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100 |
236 |
+ ALL_F: .octa 0xffffffffffffffffffffffffffffffff |
237 |
+ .octa 0x00000000000000000000000000000000 |
238 |
+ |
239 |
+-.section .rodata |
240 |
+-.align 16 |
241 |
+-.type aad_shift_arr, @object |
242 |
+-.size aad_shift_arr, 272 |
243 |
+-aad_shift_arr: |
244 |
+- .octa 0xffffffffffffffffffffffffffffffff |
245 |
+- .octa 0xffffffffffffffffffffffffffffff0C |
246 |
+- .octa 0xffffffffffffffffffffffffffff0D0C |
247 |
+- .octa 0xffffffffffffffffffffffffff0E0D0C |
248 |
+- .octa 0xffffffffffffffffffffffff0F0E0D0C |
249 |
+- .octa 0xffffffffffffffffffffff0C0B0A0908 |
250 |
+- .octa 0xffffffffffffffffffff0D0C0B0A0908 |
251 |
+- .octa 0xffffffffffffffffff0E0D0C0B0A0908 |
252 |
+- .octa 0xffffffffffffffff0F0E0D0C0B0A0908 |
253 |
+- .octa 0xffffffffffffff0C0B0A090807060504 |
254 |
+- .octa 0xffffffffffff0D0C0B0A090807060504 |
255 |
+- .octa 0xffffffffff0E0D0C0B0A090807060504 |
256 |
+- .octa 0xffffffff0F0E0D0C0B0A090807060504 |
257 |
+- .octa 0xffffff0C0B0A09080706050403020100 |
258 |
+- .octa 0xffff0D0C0B0A09080706050403020100 |
259 |
+- .octa 0xff0E0D0C0B0A09080706050403020100 |
260 |
+- .octa 0x0F0E0D0C0B0A09080706050403020100 |
261 |
+- |
262 |
+- |
263 |
+ .text |
264 |
+ |
265 |
+ |
266 |
+@@ -257,6 +233,37 @@ aad_shift_arr: |
267 |
+ pxor \TMP1, \GH # result is in TMP1 |
268 |
+ .endm |
269 |
+ |
270 |
++# Reads DLEN bytes starting at DPTR and stores in XMMDst |
271 |
++# where 0 < DLEN < 16 |
272 |
++# Clobbers %rax, DLEN and XMM1 |
273 |
++.macro READ_PARTIAL_BLOCK DPTR DLEN XMM1 XMMDst |
274 |
++ cmp $8, \DLEN |
275 |
++ jl _read_lt8_\@ |
276 |
++ mov (\DPTR), %rax |
277 |
++ MOVQ_R64_XMM %rax, \XMMDst |
278 |
++ sub $8, \DLEN |
279 |
++ jz _done_read_partial_block_\@ |
280 |
++ xor %eax, %eax |
281 |
++_read_next_byte_\@: |
282 |
++ shl $8, %rax |
283 |
++ mov 7(\DPTR, \DLEN, 1), %al |
284 |
++ dec \DLEN |
285 |
++ jnz _read_next_byte_\@ |
286 |
++ MOVQ_R64_XMM %rax, \XMM1 |
287 |
++ pslldq $8, \XMM1 |
288 |
++ por \XMM1, \XMMDst |
289 |
++ jmp _done_read_partial_block_\@ |
290 |
++_read_lt8_\@: |
291 |
++ xor %eax, %eax |
292 |
++_read_next_byte_lt8_\@: |
293 |
++ shl $8, %rax |
294 |
++ mov -1(\DPTR, \DLEN, 1), %al |
295 |
++ dec \DLEN |
296 |
++ jnz _read_next_byte_lt8_\@ |
297 |
++ MOVQ_R64_XMM %rax, \XMMDst |
298 |
++_done_read_partial_block_\@: |
299 |
++.endm |
300 |
++ |
301 |
+ /* |
302 |
+ * if a = number of total plaintext bytes |
303 |
+ * b = floor(a/16) |
304 |
+@@ -273,62 +280,30 @@ aad_shift_arr: |
305 |
+ XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation |
306 |
+ MOVADQ SHUF_MASK(%rip), %xmm14 |
307 |
+ mov arg7, %r10 # %r10 = AAD |
308 |
+- mov arg8, %r12 # %r12 = aadLen |
309 |
+- mov %r12, %r11 |
310 |
++ mov arg8, %r11 # %r11 = aadLen |
311 |
+ pxor %xmm\i, %xmm\i |
312 |
+ pxor \XMM2, \XMM2 |
313 |
+ |
314 |
+ cmp $16, %r11 |
315 |
+- jl _get_AAD_rest8\num_initial_blocks\operation |
316 |
++ jl _get_AAD_rest\num_initial_blocks\operation |
317 |
+ _get_AAD_blocks\num_initial_blocks\operation: |
318 |
+ movdqu (%r10), %xmm\i |
319 |
+ PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data |
320 |
+ pxor %xmm\i, \XMM2 |
321 |
+ GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 |
322 |
+ add $16, %r10 |
323 |
+- sub $16, %r12 |
324 |
+ sub $16, %r11 |
325 |
+ cmp $16, %r11 |
326 |
+ jge _get_AAD_blocks\num_initial_blocks\operation |
327 |
+ |
328 |
+ movdqu \XMM2, %xmm\i |
329 |
++ |
330 |
++ /* read the last <16B of AAD */ |
331 |
++_get_AAD_rest\num_initial_blocks\operation: |
332 |
+ cmp $0, %r11 |
333 |
+ je _get_AAD_done\num_initial_blocks\operation |
334 |
+ |
335 |
+- pxor %xmm\i,%xmm\i |
336 |
+- |
337 |
+- /* read the last <16B of AAD. since we have at least 4B of |
338 |
+- data right after the AAD (the ICV, and maybe some CT), we can |
339 |
+- read 4B/8B blocks safely, and then get rid of the extra stuff */ |
340 |
+-_get_AAD_rest8\num_initial_blocks\operation: |
341 |
+- cmp $4, %r11 |
342 |
+- jle _get_AAD_rest4\num_initial_blocks\operation |
343 |
+- movq (%r10), \TMP1 |
344 |
+- add $8, %r10 |
345 |
+- sub $8, %r11 |
346 |
+- pslldq $8, \TMP1 |
347 |
+- psrldq $8, %xmm\i |
348 |
+- pxor \TMP1, %xmm\i |
349 |
+- jmp _get_AAD_rest8\num_initial_blocks\operation |
350 |
+-_get_AAD_rest4\num_initial_blocks\operation: |
351 |
+- cmp $0, %r11 |
352 |
+- jle _get_AAD_rest0\num_initial_blocks\operation |
353 |
+- mov (%r10), %eax |
354 |
+- movq %rax, \TMP1 |
355 |
+- add $4, %r10 |
356 |
+- sub $4, %r10 |
357 |
+- pslldq $12, \TMP1 |
358 |
+- psrldq $4, %xmm\i |
359 |
+- pxor \TMP1, %xmm\i |
360 |
+-_get_AAD_rest0\num_initial_blocks\operation: |
361 |
+- /* finalize: shift out the extra bytes we read, and align |
362 |
+- left. since pslldq can only shift by an immediate, we use |
363 |
+- vpshufb and an array of shuffle masks */ |
364 |
+- movq %r12, %r11 |
365 |
+- salq $4, %r11 |
366 |
+- movdqu aad_shift_arr(%r11), \TMP1 |
367 |
+- PSHUFB_XMM \TMP1, %xmm\i |
368 |
+-_get_AAD_rest_final\num_initial_blocks\operation: |
369 |
++ READ_PARTIAL_BLOCK %r10, %r11, \TMP1, %xmm\i |
370 |
+ PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data |
371 |
+ pxor \XMM2, %xmm\i |
372 |
+ GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 |
373 |
+@@ -532,62 +507,30 @@ _initial_blocks_done\num_initial_blocks\operation: |
374 |
+ XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation |
375 |
+ MOVADQ SHUF_MASK(%rip), %xmm14 |
376 |
+ mov arg7, %r10 # %r10 = AAD |
377 |
+- mov arg8, %r12 # %r12 = aadLen |
378 |
+- mov %r12, %r11 |
379 |
++ mov arg8, %r11 # %r11 = aadLen |
380 |
+ pxor %xmm\i, %xmm\i |
381 |
+ pxor \XMM2, \XMM2 |
382 |
+ |
383 |
+ cmp $16, %r11 |
384 |
+- jl _get_AAD_rest8\num_initial_blocks\operation |
385 |
++ jl _get_AAD_rest\num_initial_blocks\operation |
386 |
+ _get_AAD_blocks\num_initial_blocks\operation: |
387 |
+ movdqu (%r10), %xmm\i |
388 |
+ PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data |
389 |
+ pxor %xmm\i, \XMM2 |
390 |
+ GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 |
391 |
+ add $16, %r10 |
392 |
+- sub $16, %r12 |
393 |
+ sub $16, %r11 |
394 |
+ cmp $16, %r11 |
395 |
+ jge _get_AAD_blocks\num_initial_blocks\operation |
396 |
+ |
397 |
+ movdqu \XMM2, %xmm\i |
398 |
++ |
399 |
++ /* read the last <16B of AAD */ |
400 |
++_get_AAD_rest\num_initial_blocks\operation: |
401 |
+ cmp $0, %r11 |
402 |
+ je _get_AAD_done\num_initial_blocks\operation |
403 |
+ |
404 |
+- pxor %xmm\i,%xmm\i |
405 |
+- |
406 |
+- /* read the last <16B of AAD. since we have at least 4B of |
407 |
+- data right after the AAD (the ICV, and maybe some PT), we can |
408 |
+- read 4B/8B blocks safely, and then get rid of the extra stuff */ |
409 |
+-_get_AAD_rest8\num_initial_blocks\operation: |
410 |
+- cmp $4, %r11 |
411 |
+- jle _get_AAD_rest4\num_initial_blocks\operation |
412 |
+- movq (%r10), \TMP1 |
413 |
+- add $8, %r10 |
414 |
+- sub $8, %r11 |
415 |
+- pslldq $8, \TMP1 |
416 |
+- psrldq $8, %xmm\i |
417 |
+- pxor \TMP1, %xmm\i |
418 |
+- jmp _get_AAD_rest8\num_initial_blocks\operation |
419 |
+-_get_AAD_rest4\num_initial_blocks\operation: |
420 |
+- cmp $0, %r11 |
421 |
+- jle _get_AAD_rest0\num_initial_blocks\operation |
422 |
+- mov (%r10), %eax |
423 |
+- movq %rax, \TMP1 |
424 |
+- add $4, %r10 |
425 |
+- sub $4, %r10 |
426 |
+- pslldq $12, \TMP1 |
427 |
+- psrldq $4, %xmm\i |
428 |
+- pxor \TMP1, %xmm\i |
429 |
+-_get_AAD_rest0\num_initial_blocks\operation: |
430 |
+- /* finalize: shift out the extra bytes we read, and align |
431 |
+- left. since pslldq can only shift by an immediate, we use |
432 |
+- vpshufb and an array of shuffle masks */ |
433 |
+- movq %r12, %r11 |
434 |
+- salq $4, %r11 |
435 |
+- movdqu aad_shift_arr(%r11), \TMP1 |
436 |
+- PSHUFB_XMM \TMP1, %xmm\i |
437 |
+-_get_AAD_rest_final\num_initial_blocks\operation: |
438 |
++ READ_PARTIAL_BLOCK %r10, %r11, \TMP1, %xmm\i |
439 |
+ PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data |
440 |
+ pxor \XMM2, %xmm\i |
441 |
+ GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 |
442 |
+@@ -1386,14 +1329,6 @@ _esb_loop_\@: |
443 |
+ * |
444 |
+ * AAD Format with 64-bit Extended Sequence Number |
445 |
+ * |
446 |
+-* aadLen: |
447 |
+-* from the definition of the spec, aadLen can only be 8 or 12 bytes. |
448 |
+-* The code supports 16 too but for other sizes, the code will fail. |
449 |
+-* |
450 |
+-* TLen: |
451 |
+-* from the definition of the spec, TLen can only be 8, 12 or 16 bytes. |
452 |
+-* For other sizes, the code will fail. |
453 |
+-* |
454 |
+ * poly = x^128 + x^127 + x^126 + x^121 + 1 |
455 |
+ * |
456 |
+ *****************************************************************************/ |
457 |
+@@ -1487,19 +1422,16 @@ _zero_cipher_left_decrypt: |
458 |
+ PSHUFB_XMM %xmm10, %xmm0 |
459 |
+ |
460 |
+ ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Yn) |
461 |
+- sub $16, %r11 |
462 |
+- add %r13, %r11 |
463 |
+- movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block |
464 |
+- lea SHIFT_MASK+16(%rip), %r12 |
465 |
+- sub %r13, %r12 |
466 |
+-# adjust the shuffle mask pointer to be able to shift 16-%r13 bytes |
467 |
+-# (%r13 is the number of bytes in plaintext mod 16) |
468 |
+- movdqu (%r12), %xmm2 # get the appropriate shuffle mask |
469 |
+- PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes |
470 |
+ |
471 |
++ lea (%arg3,%r11,1), %r10 |
472 |
++ mov %r13, %r12 |
473 |
++ READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1 |
474 |
++ |
475 |
++ lea ALL_F+16(%rip), %r12 |
476 |
++ sub %r13, %r12 |
477 |
+ movdqa %xmm1, %xmm2 |
478 |
+ pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn) |
479 |
+- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1 |
480 |
++ movdqu (%r12), %xmm1 |
481 |
+ # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0 |
482 |
+ pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0 |
483 |
+ pand %xmm1, %xmm2 |
484 |
+@@ -1508,9 +1440,6 @@ _zero_cipher_left_decrypt: |
485 |
+ |
486 |
+ pxor %xmm2, %xmm8 |
487 |
+ GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6 |
488 |
+- # GHASH computation for the last <16 byte block |
489 |
+- sub %r13, %r11 |
490 |
+- add $16, %r11 |
491 |
+ |
492 |
+ # output %r13 bytes |
493 |
+ MOVQ_R64_XMM %xmm0, %rax |
494 |
+@@ -1664,14 +1593,6 @@ ENDPROC(aesni_gcm_dec) |
495 |
+ * |
496 |
+ * AAD Format with 64-bit Extended Sequence Number |
497 |
+ * |
498 |
+-* aadLen: |
499 |
+-* from the definition of the spec, aadLen can only be 8 or 12 bytes. |
500 |
+-* The code supports 16 too but for other sizes, the code will fail. |
501 |
+-* |
502 |
+-* TLen: |
503 |
+-* from the definition of the spec, TLen can only be 8, 12 or 16 bytes. |
504 |
+-* For other sizes, the code will fail. |
505 |
+-* |
506 |
+ * poly = x^128 + x^127 + x^126 + x^121 + 1 |
507 |
+ ***************************************************************************/ |
508 |
+ ENTRY(aesni_gcm_enc) |
509 |
+@@ -1764,19 +1685,16 @@ _zero_cipher_left_encrypt: |
510 |
+ movdqa SHUF_MASK(%rip), %xmm10 |
511 |
+ PSHUFB_XMM %xmm10, %xmm0 |
512 |
+ |
513 |
+- |
514 |
+ ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn) |
515 |
+- sub $16, %r11 |
516 |
+- add %r13, %r11 |
517 |
+- movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks |
518 |
+- lea SHIFT_MASK+16(%rip), %r12 |
519 |
++ |
520 |
++ lea (%arg3,%r11,1), %r10 |
521 |
++ mov %r13, %r12 |
522 |
++ READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1 |
523 |
++ |
524 |
++ lea ALL_F+16(%rip), %r12 |
525 |
+ sub %r13, %r12 |
526 |
+- # adjust the shuffle mask pointer to be able to shift 16-r13 bytes |
527 |
+- # (%r13 is the number of bytes in plaintext mod 16) |
528 |
+- movdqu (%r12), %xmm2 # get the appropriate shuffle mask |
529 |
+- PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte |
530 |
+ pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn) |
531 |
+- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1 |
532 |
++ movdqu (%r12), %xmm1 |
533 |
+ # get the appropriate mask to mask out top 16-r13 bytes of xmm0 |
534 |
+ pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0 |
535 |
+ movdqa SHUF_MASK(%rip), %xmm10 |
536 |
+@@ -1785,9 +1703,6 @@ _zero_cipher_left_encrypt: |
537 |
+ pxor %xmm0, %xmm8 |
538 |
+ GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6 |
539 |
+ # GHASH computation for the last <16 byte block |
540 |
+- sub %r13, %r11 |
541 |
+- add $16, %r11 |
542 |
+- |
543 |
+ movdqa SHUF_MASK(%rip), %xmm10 |
544 |
+ PSHUFB_XMM %xmm10, %xmm0 |
545 |
+ |
546 |
+diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c |
547 |
+index 5c15d6b57329..c690ddc78c03 100644 |
548 |
+--- a/arch/x86/crypto/aesni-intel_glue.c |
549 |
++++ b/arch/x86/crypto/aesni-intel_glue.c |
550 |
+@@ -28,6 +28,7 @@ |
551 |
+ #include <crypto/cryptd.h> |
552 |
+ #include <crypto/ctr.h> |
553 |
+ #include <crypto/b128ops.h> |
554 |
++#include <crypto/gcm.h> |
555 |
+ #include <crypto/xts.h> |
556 |
+ #include <asm/cpu_device_id.h> |
557 |
+ #include <asm/fpu/api.h> |
558 |
+@@ -689,8 +690,8 @@ static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key, |
559 |
+ rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); |
560 |
+ } |
561 |
+ |
562 |
+-static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, |
563 |
+- unsigned int key_len) |
564 |
++static int gcmaes_wrapper_set_key(struct crypto_aead *parent, const u8 *key, |
565 |
++ unsigned int key_len) |
566 |
+ { |
567 |
+ struct cryptd_aead **ctx = crypto_aead_ctx(parent); |
568 |
+ struct cryptd_aead *cryptd_tfm = *ctx; |
569 |
+@@ -715,8 +716,8 @@ static int common_rfc4106_set_authsize(struct crypto_aead *aead, |
570 |
+ |
571 |
+ /* This is the Integrity Check Value (aka the authentication tag length and can |
572 |
+ * be 8, 12 or 16 bytes long. */ |
573 |
+-static int rfc4106_set_authsize(struct crypto_aead *parent, |
574 |
+- unsigned int authsize) |
575 |
++static int gcmaes_wrapper_set_authsize(struct crypto_aead *parent, |
576 |
++ unsigned int authsize) |
577 |
+ { |
578 |
+ struct cryptd_aead **ctx = crypto_aead_ctx(parent); |
579 |
+ struct cryptd_aead *cryptd_tfm = *ctx; |
580 |
+@@ -823,7 +824,7 @@ static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen, |
581 |
+ if (sg_is_last(req->src) && |
582 |
+ (!PageHighMem(sg_page(req->src)) || |
583 |
+ req->src->offset + req->src->length <= PAGE_SIZE) && |
584 |
+- sg_is_last(req->dst) && |
585 |
++ sg_is_last(req->dst) && req->dst->length && |
586 |
+ (!PageHighMem(sg_page(req->dst)) || |
587 |
+ req->dst->offset + req->dst->length <= PAGE_SIZE)) { |
588 |
+ one_entry_in_sg = 1; |
589 |
+@@ -928,7 +929,7 @@ static int helper_rfc4106_decrypt(struct aead_request *req) |
590 |
+ aes_ctx); |
591 |
+ } |
592 |
+ |
593 |
+-static int rfc4106_encrypt(struct aead_request *req) |
594 |
++static int gcmaes_wrapper_encrypt(struct aead_request *req) |
595 |
+ { |
596 |
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
597 |
+ struct cryptd_aead **ctx = crypto_aead_ctx(tfm); |
598 |
+@@ -944,7 +945,7 @@ static int rfc4106_encrypt(struct aead_request *req) |
599 |
+ return crypto_aead_encrypt(req); |
600 |
+ } |
601 |
+ |
602 |
+-static int rfc4106_decrypt(struct aead_request *req) |
603 |
++static int gcmaes_wrapper_decrypt(struct aead_request *req) |
604 |
+ { |
605 |
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
606 |
+ struct cryptd_aead **ctx = crypto_aead_ctx(tfm); |
607 |
+@@ -1115,7 +1116,7 @@ static int generic_gcmaes_decrypt(struct aead_request *req) |
608 |
+ { |
609 |
+ __be32 counter = cpu_to_be32(1); |
610 |
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
611 |
+- struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); |
612 |
++ struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm); |
613 |
+ void *aes_ctx = &(ctx->aes_key_expanded); |
614 |
+ u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); |
615 |
+ |
616 |
+@@ -1126,12 +1127,36 @@ static int generic_gcmaes_decrypt(struct aead_request *req) |
617 |
+ aes_ctx); |
618 |
+ } |
619 |
+ |
620 |
++static int generic_gcmaes_init(struct crypto_aead *aead) |
621 |
++{ |
622 |
++ struct cryptd_aead *cryptd_tfm; |
623 |
++ struct cryptd_aead **ctx = crypto_aead_ctx(aead); |
624 |
++ |
625 |
++ cryptd_tfm = cryptd_alloc_aead("__driver-generic-gcm-aes-aesni", |
626 |
++ CRYPTO_ALG_INTERNAL, |
627 |
++ CRYPTO_ALG_INTERNAL); |
628 |
++ if (IS_ERR(cryptd_tfm)) |
629 |
++ return PTR_ERR(cryptd_tfm); |
630 |
++ |
631 |
++ *ctx = cryptd_tfm; |
632 |
++ crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base)); |
633 |
++ |
634 |
++ return 0; |
635 |
++} |
636 |
++ |
637 |
++static void generic_gcmaes_exit(struct crypto_aead *aead) |
638 |
++{ |
639 |
++ struct cryptd_aead **ctx = crypto_aead_ctx(aead); |
640 |
++ |
641 |
++ cryptd_free_aead(*ctx); |
642 |
++} |
643 |
++ |
644 |
+ static struct aead_alg aesni_aead_algs[] = { { |
645 |
+ .setkey = common_rfc4106_set_key, |
646 |
+ .setauthsize = common_rfc4106_set_authsize, |
647 |
+ .encrypt = helper_rfc4106_encrypt, |
648 |
+ .decrypt = helper_rfc4106_decrypt, |
649 |
+- .ivsize = 8, |
650 |
++ .ivsize = GCM_RFC4106_IV_SIZE, |
651 |
+ .maxauthsize = 16, |
652 |
+ .base = { |
653 |
+ .cra_name = "__gcm-aes-aesni", |
654 |
+@@ -1145,11 +1170,11 @@ static struct aead_alg aesni_aead_algs[] = { { |
655 |
+ }, { |
656 |
+ .init = rfc4106_init, |
657 |
+ .exit = rfc4106_exit, |
658 |
+- .setkey = rfc4106_set_key, |
659 |
+- .setauthsize = rfc4106_set_authsize, |
660 |
+- .encrypt = rfc4106_encrypt, |
661 |
+- .decrypt = rfc4106_decrypt, |
662 |
+- .ivsize = 8, |
663 |
++ .setkey = gcmaes_wrapper_set_key, |
664 |
++ .setauthsize = gcmaes_wrapper_set_authsize, |
665 |
++ .encrypt = gcmaes_wrapper_encrypt, |
666 |
++ .decrypt = gcmaes_wrapper_decrypt, |
667 |
++ .ivsize = GCM_RFC4106_IV_SIZE, |
668 |
+ .maxauthsize = 16, |
669 |
+ .base = { |
670 |
+ .cra_name = "rfc4106(gcm(aes))", |
671 |
+@@ -1165,7 +1190,26 @@ static struct aead_alg aesni_aead_algs[] = { { |
672 |
+ .setauthsize = generic_gcmaes_set_authsize, |
673 |
+ .encrypt = generic_gcmaes_encrypt, |
674 |
+ .decrypt = generic_gcmaes_decrypt, |
675 |
+- .ivsize = 12, |
676 |
++ .ivsize = GCM_AES_IV_SIZE, |
677 |
++ .maxauthsize = 16, |
678 |
++ .base = { |
679 |
++ .cra_name = "__generic-gcm-aes-aesni", |
680 |
++ .cra_driver_name = "__driver-generic-gcm-aes-aesni", |
681 |
++ .cra_priority = 0, |
682 |
++ .cra_flags = CRYPTO_ALG_INTERNAL, |
683 |
++ .cra_blocksize = 1, |
684 |
++ .cra_ctxsize = sizeof(struct generic_gcmaes_ctx), |
685 |
++ .cra_alignmask = AESNI_ALIGN - 1, |
686 |
++ .cra_module = THIS_MODULE, |
687 |
++ }, |
688 |
++}, { |
689 |
++ .init = generic_gcmaes_init, |
690 |
++ .exit = generic_gcmaes_exit, |
691 |
++ .setkey = gcmaes_wrapper_set_key, |
692 |
++ .setauthsize = gcmaes_wrapper_set_authsize, |
693 |
++ .encrypt = gcmaes_wrapper_encrypt, |
694 |
++ .decrypt = gcmaes_wrapper_decrypt, |
695 |
++ .ivsize = GCM_AES_IV_SIZE, |
696 |
+ .maxauthsize = 16, |
697 |
+ .base = { |
698 |
+ .cra_name = "gcm(aes)", |
699 |
+@@ -1173,8 +1217,7 @@ static struct aead_alg aesni_aead_algs[] = { { |
700 |
+ .cra_priority = 400, |
701 |
+ .cra_flags = CRYPTO_ALG_ASYNC, |
702 |
+ .cra_blocksize = 1, |
703 |
+- .cra_ctxsize = sizeof(struct generic_gcmaes_ctx), |
704 |
+- .cra_alignmask = AESNI_ALIGN - 1, |
705 |
++ .cra_ctxsize = sizeof(struct cryptd_aead *), |
706 |
+ .cra_module = THIS_MODULE, |
707 |
+ }, |
708 |
+ } }; |
709 |
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h |
710 |
+index eb38ac9d9a31..4f8b80199672 100644 |
711 |
+--- a/arch/x86/include/asm/kvm_host.h |
712 |
++++ b/arch/x86/include/asm/kvm_host.h |
713 |
+@@ -1156,7 +1156,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, |
714 |
+ static inline int emulate_instruction(struct kvm_vcpu *vcpu, |
715 |
+ int emulation_type) |
716 |
+ { |
717 |
+- return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); |
718 |
++ return x86_emulate_instruction(vcpu, 0, |
719 |
++ emulation_type | EMULTYPE_NO_REEXECUTE, NULL, 0); |
720 |
+ } |
721 |
+ |
722 |
+ void kvm_enable_efer_bits(u64); |
723 |
+diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h |
724 |
+index b20f9d623f9c..8f09012b92e7 100644 |
725 |
+--- a/arch/x86/include/asm/segment.h |
726 |
++++ b/arch/x86/include/asm/segment.h |
727 |
+@@ -236,11 +236,23 @@ |
728 |
+ */ |
729 |
+ #define EARLY_IDT_HANDLER_SIZE 9 |
730 |
+ |
731 |
++/* |
732 |
++ * xen_early_idt_handler_array is for Xen pv guests: for each entry in |
733 |
++ * early_idt_handler_array it contains a prequel in the form of |
734 |
++ * pop %rcx; pop %r11; jmp early_idt_handler_array[i]; summing up to |
735 |
++ * max 8 bytes. |
736 |
++ */ |
737 |
++#define XEN_EARLY_IDT_HANDLER_SIZE 8 |
738 |
++ |
739 |
+ #ifndef __ASSEMBLY__ |
740 |
+ |
741 |
+ extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE]; |
742 |
+ extern void early_ignore_irq(void); |
743 |
+ |
744 |
++#if defined(CONFIG_X86_64) && defined(CONFIG_XEN_PV) |
745 |
++extern const char xen_early_idt_handler_array[NUM_EXCEPTION_VECTORS][XEN_EARLY_IDT_HANDLER_SIZE]; |
746 |
++#endif |
747 |
++ |
748 |
+ /* |
749 |
+ * Load a segment. Fall back on loading the zero segment if something goes |
750 |
+ * wrong. This variant assumes that loading zero fully clears the segment. |
751 |
+diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h |
752 |
+index cdc70a3a6583..c2cea6651279 100644 |
753 |
+--- a/arch/x86/kvm/cpuid.h |
754 |
++++ b/arch/x86/kvm/cpuid.h |
755 |
+@@ -44,7 +44,7 @@ static const struct cpuid_reg reverse_cpuid[] = { |
756 |
+ [CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX}, |
757 |
+ [CPUID_1_ECX] = { 1, 0, CPUID_ECX}, |
758 |
+ [CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX}, |
759 |
+- [CPUID_8000_0001_ECX] = {0xc0000001, 0, CPUID_ECX}, |
760 |
++ [CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX}, |
761 |
+ [CPUID_7_0_EBX] = { 7, 0, CPUID_EBX}, |
762 |
+ [CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX}, |
763 |
+ [CPUID_F_0_EDX] = { 0xf, 0, CPUID_EDX}, |
764 |
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c |
765 |
+index 7bbb5da2b49d..eca6a89f2326 100644 |
766 |
+--- a/arch/x86/kvm/emulate.c |
767 |
++++ b/arch/x86/kvm/emulate.c |
768 |
+@@ -4023,6 +4023,26 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt) |
769 |
+ fxstate_size(ctxt)); |
770 |
+ } |
771 |
+ |
772 |
++/* |
773 |
++ * FXRSTOR might restore XMM registers not provided by the guest. Fill |
774 |
++ * in the host registers (via FXSAVE) instead, so they won't be modified. |
775 |
++ * (preemption has to stay disabled until FXRSTOR). |
776 |
++ * |
777 |
++ * Use noinline to keep the stack for other functions called by callers small. |
778 |
++ */ |
779 |
++static noinline int fxregs_fixup(struct fxregs_state *fx_state, |
780 |
++ const size_t used_size) |
781 |
++{ |
782 |
++ struct fxregs_state fx_tmp; |
783 |
++ int rc; |
784 |
++ |
785 |
++ rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp)); |
786 |
++ memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size, |
787 |
++ __fxstate_size(16) - used_size); |
788 |
++ |
789 |
++ return rc; |
790 |
++} |
791 |
++ |
792 |
+ static int em_fxrstor(struct x86_emulate_ctxt *ctxt) |
793 |
+ { |
794 |
+ struct fxregs_state fx_state; |
795 |
+@@ -4033,19 +4053,19 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt) |
796 |
+ if (rc != X86EMUL_CONTINUE) |
797 |
+ return rc; |
798 |
+ |
799 |
++ size = fxstate_size(ctxt); |
800 |
++ rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size); |
801 |
++ if (rc != X86EMUL_CONTINUE) |
802 |
++ return rc; |
803 |
++ |
804 |
+ ctxt->ops->get_fpu(ctxt); |
805 |
+ |
806 |
+- size = fxstate_size(ctxt); |
807 |
+ if (size < __fxstate_size(16)) { |
808 |
+- rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state)); |
809 |
++ rc = fxregs_fixup(&fx_state, size); |
810 |
+ if (rc != X86EMUL_CONTINUE) |
811 |
+ goto out; |
812 |
+ } |
813 |
+ |
814 |
+- rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size); |
815 |
+- if (rc != X86EMUL_CONTINUE) |
816 |
+- goto out; |
817 |
+- |
818 |
+ if (fx_state.mxcsr >> 16) { |
819 |
+ rc = emulate_gp(ctxt, 0); |
820 |
+ goto out; |
821 |
+@@ -5009,6 +5029,8 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) |
822 |
+ bool op_prefix = false; |
823 |
+ bool has_seg_override = false; |
824 |
+ struct opcode opcode; |
825 |
++ u16 dummy; |
826 |
++ struct desc_struct desc; |
827 |
+ |
828 |
+ ctxt->memop.type = OP_NONE; |
829 |
+ ctxt->memopp = NULL; |
830 |
+@@ -5027,6 +5049,11 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) |
831 |
+ switch (mode) { |
832 |
+ case X86EMUL_MODE_REAL: |
833 |
+ case X86EMUL_MODE_VM86: |
834 |
++ def_op_bytes = def_ad_bytes = 2; |
835 |
++ ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS); |
836 |
++ if (desc.d) |
837 |
++ def_op_bytes = def_ad_bytes = 4; |
838 |
++ break; |
839 |
+ case X86EMUL_MODE_PROT16: |
840 |
+ def_op_bytes = def_ad_bytes = 2; |
841 |
+ break; |
842 |
+diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c |
843 |
+index bdff437acbcb..9d270ba9643c 100644 |
844 |
+--- a/arch/x86/kvm/ioapic.c |
845 |
++++ b/arch/x86/kvm/ioapic.c |
846 |
+@@ -257,8 +257,7 @@ void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors) |
847 |
+ index == RTC_GSI) { |
848 |
+ if (kvm_apic_match_dest(vcpu, NULL, 0, |
849 |
+ e->fields.dest_id, e->fields.dest_mode) || |
850 |
+- (e->fields.trig_mode == IOAPIC_EDGE_TRIG && |
851 |
+- kvm_apic_pending_eoi(vcpu, e->fields.vector))) |
852 |
++ kvm_apic_pending_eoi(vcpu, e->fields.vector)) |
853 |
+ __set_bit(e->fields.vector, |
854 |
+ ioapic_handled_vectors); |
855 |
+ } |
856 |
+@@ -277,6 +276,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) |
857 |
+ { |
858 |
+ unsigned index; |
859 |
+ bool mask_before, mask_after; |
860 |
++ int old_remote_irr, old_delivery_status; |
861 |
+ union kvm_ioapic_redirect_entry *e; |
862 |
+ |
863 |
+ switch (ioapic->ioregsel) { |
864 |
+@@ -299,14 +299,28 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) |
865 |
+ return; |
866 |
+ e = &ioapic->redirtbl[index]; |
867 |
+ mask_before = e->fields.mask; |
868 |
++ /* Preserve read-only fields */ |
869 |
++ old_remote_irr = e->fields.remote_irr; |
870 |
++ old_delivery_status = e->fields.delivery_status; |
871 |
+ if (ioapic->ioregsel & 1) { |
872 |
+ e->bits &= 0xffffffff; |
873 |
+ e->bits |= (u64) val << 32; |
874 |
+ } else { |
875 |
+ e->bits &= ~0xffffffffULL; |
876 |
+ e->bits |= (u32) val; |
877 |
+- e->fields.remote_irr = 0; |
878 |
+ } |
879 |
++ e->fields.remote_irr = old_remote_irr; |
880 |
++ e->fields.delivery_status = old_delivery_status; |
881 |
++ |
882 |
++ /* |
883 |
++ * Some OSes (Linux, Xen) assume that Remote IRR bit will |
884 |
++ * be cleared by IOAPIC hardware when the entry is configured |
885 |
++ * as edge-triggered. This behavior is used to simulate an |
886 |
++ * explicit EOI on IOAPICs that don't have the EOI register. |
887 |
++ */ |
888 |
++ if (e->fields.trig_mode == IOAPIC_EDGE_TRIG) |
889 |
++ e->fields.remote_irr = 0; |
890 |
++ |
891 |
+ mask_after = e->fields.mask; |
892 |
+ if (mask_before != mask_after) |
893 |
+ kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after); |
894 |
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c |
895 |
+index ef16cf0f7cfd..a45063a9219c 100644 |
896 |
+--- a/arch/x86/kvm/vmx.c |
897 |
++++ b/arch/x86/kvm/vmx.c |
898 |
+@@ -5606,7 +5606,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) |
899 |
+ vmcs_write64(GUEST_IA32_DEBUGCTL, 0); |
900 |
+ } |
901 |
+ |
902 |
+- vmcs_writel(GUEST_RFLAGS, 0x02); |
903 |
++ kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); |
904 |
+ kvm_rip_write(vcpu, 0xfff0); |
905 |
+ |
906 |
+ vmcs_writel(GUEST_GDTR_BASE, 0); |
907 |
+@@ -5919,10 +5919,6 @@ static int handle_exception(struct kvm_vcpu *vcpu) |
908 |
+ return 1; /* already handled by vmx_vcpu_run() */ |
909 |
+ |
910 |
+ if (is_invalid_opcode(intr_info)) { |
911 |
+- if (is_guest_mode(vcpu)) { |
912 |
+- kvm_queue_exception(vcpu, UD_VECTOR); |
913 |
+- return 1; |
914 |
+- } |
915 |
+ er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD); |
916 |
+ if (er == EMULATE_USER_EXIT) |
917 |
+ return 0; |
918 |
+@@ -6608,7 +6604,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) |
919 |
+ if (kvm_test_request(KVM_REQ_EVENT, vcpu)) |
920 |
+ return 1; |
921 |
+ |
922 |
+- err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE); |
923 |
++ err = emulate_instruction(vcpu, 0); |
924 |
+ |
925 |
+ if (err == EMULATE_USER_EXIT) { |
926 |
+ ++vcpu->stat.mmio_exits; |
927 |
+@@ -11115,13 +11111,12 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) |
928 |
+ { |
929 |
+ struct vcpu_vmx *vmx = to_vmx(vcpu); |
930 |
+ unsigned long exit_qual; |
931 |
+- |
932 |
+- if (kvm_event_needs_reinjection(vcpu)) |
933 |
+- return -EBUSY; |
934 |
++ bool block_nested_events = |
935 |
++ vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu); |
936 |
+ |
937 |
+ if (vcpu->arch.exception.pending && |
938 |
+ nested_vmx_check_exception(vcpu, &exit_qual)) { |
939 |
+- if (vmx->nested.nested_run_pending) |
940 |
++ if (block_nested_events) |
941 |
+ return -EBUSY; |
942 |
+ nested_vmx_inject_exception_vmexit(vcpu, exit_qual); |
943 |
+ vcpu->arch.exception.pending = false; |
944 |
+@@ -11130,14 +11125,14 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) |
945 |
+ |
946 |
+ if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) && |
947 |
+ vmx->nested.preemption_timer_expired) { |
948 |
+- if (vmx->nested.nested_run_pending) |
949 |
++ if (block_nested_events) |
950 |
+ return -EBUSY; |
951 |
+ nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0); |
952 |
+ return 0; |
953 |
+ } |
954 |
+ |
955 |
+ if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) { |
956 |
+- if (vmx->nested.nested_run_pending) |
957 |
++ if (block_nested_events) |
958 |
+ return -EBUSY; |
959 |
+ nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, |
960 |
+ NMI_VECTOR | INTR_TYPE_NMI_INTR | |
961 |
+@@ -11153,7 +11148,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) |
962 |
+ |
963 |
+ if ((kvm_cpu_has_interrupt(vcpu) || external_intr) && |
964 |
+ nested_exit_on_intr(vcpu)) { |
965 |
+- if (vmx->nested.nested_run_pending) |
966 |
++ if (block_nested_events) |
967 |
+ return -EBUSY; |
968 |
+ nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); |
969 |
+ return 0; |
970 |
+@@ -11340,6 +11335,24 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, |
971 |
+ kvm_clear_interrupt_queue(vcpu); |
972 |
+ } |
973 |
+ |
974 |
++static void load_vmcs12_mmu_host_state(struct kvm_vcpu *vcpu, |
975 |
++ struct vmcs12 *vmcs12) |
976 |
++{ |
977 |
++ u32 entry_failure_code; |
978 |
++ |
979 |
++ nested_ept_uninit_mmu_context(vcpu); |
980 |
++ |
981 |
++ /* |
982 |
++ * Only PDPTE load can fail as the value of cr3 was checked on entry and |
983 |
++ * couldn't have changed. |
984 |
++ */ |
985 |
++ if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code)) |
986 |
++ nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL); |
987 |
++ |
988 |
++ if (!enable_ept) |
989 |
++ vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; |
990 |
++} |
991 |
++ |
992 |
+ /* |
993 |
+ * A part of what we need to when the nested L2 guest exits and we want to |
994 |
+ * run its L1 parent, is to reset L1's guest state to the host state specified |
995 |
+@@ -11353,7 +11366,6 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, |
996 |
+ struct vmcs12 *vmcs12) |
997 |
+ { |
998 |
+ struct kvm_segment seg; |
999 |
+- u32 entry_failure_code; |
1000 |
+ |
1001 |
+ if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) |
1002 |
+ vcpu->arch.efer = vmcs12->host_ia32_efer; |
1003 |
+@@ -11380,17 +11392,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, |
1004 |
+ vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); |
1005 |
+ vmx_set_cr4(vcpu, vmcs12->host_cr4); |
1006 |
+ |
1007 |
+- nested_ept_uninit_mmu_context(vcpu); |
1008 |
+- |
1009 |
+- /* |
1010 |
+- * Only PDPTE load can fail as the value of cr3 was checked on entry and |
1011 |
+- * couldn't have changed. |
1012 |
+- */ |
1013 |
+- if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code)) |
1014 |
+- nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL); |
1015 |
+- |
1016 |
+- if (!enable_ept) |
1017 |
+- vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; |
1018 |
++ load_vmcs12_mmu_host_state(vcpu, vmcs12); |
1019 |
+ |
1020 |
+ if (enable_vpid) { |
1021 |
+ /* |
1022 |
+@@ -11616,6 +11618,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, |
1023 |
+ * accordingly. |
1024 |
+ */ |
1025 |
+ nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); |
1026 |
++ |
1027 |
++ load_vmcs12_mmu_host_state(vcpu, vmcs12); |
1028 |
++ |
1029 |
+ /* |
1030 |
+ * The emulated instruction was already skipped in |
1031 |
+ * nested_vmx_run, but the updated RIP was never |
1032 |
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c |
1033 |
+index 575c8953cc9a..8c28023a43b1 100644 |
1034 |
+--- a/arch/x86/kvm/x86.c |
1035 |
++++ b/arch/x86/kvm/x86.c |
1036 |
+@@ -1795,10 +1795,13 @@ u64 get_kvmclock_ns(struct kvm *kvm) |
1037 |
+ /* both __this_cpu_read() and rdtsc() should be on the same cpu */ |
1038 |
+ get_cpu(); |
1039 |
+ |
1040 |
+- kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL, |
1041 |
+- &hv_clock.tsc_shift, |
1042 |
+- &hv_clock.tsc_to_system_mul); |
1043 |
+- ret = __pvclock_read_cycles(&hv_clock, rdtsc()); |
1044 |
++ if (__this_cpu_read(cpu_tsc_khz)) { |
1045 |
++ kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL, |
1046 |
++ &hv_clock.tsc_shift, |
1047 |
++ &hv_clock.tsc_to_system_mul); |
1048 |
++ ret = __pvclock_read_cycles(&hv_clock, rdtsc()); |
1049 |
++ } else |
1050 |
++ ret = ktime_get_boot_ns() + ka->kvmclock_offset; |
1051 |
+ |
1052 |
+ put_cpu(); |
1053 |
+ |
1054 |
+@@ -5416,7 +5419,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu) |
1055 |
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
1056 |
+ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; |
1057 |
+ vcpu->run->internal.ndata = 0; |
1058 |
+- r = EMULATE_FAIL; |
1059 |
++ r = EMULATE_USER_EXIT; |
1060 |
+ } |
1061 |
+ kvm_queue_exception(vcpu, UD_VECTOR); |
1062 |
+ |
1063 |
+@@ -7242,12 +7245,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
1064 |
+ { |
1065 |
+ struct fpu *fpu = ¤t->thread.fpu; |
1066 |
+ int r; |
1067 |
+- sigset_t sigsaved; |
1068 |
+ |
1069 |
+ fpu__initialize(fpu); |
1070 |
+ |
1071 |
+- if (vcpu->sigset_active) |
1072 |
+- sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); |
1073 |
++ kvm_sigset_activate(vcpu); |
1074 |
+ |
1075 |
+ if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { |
1076 |
+ if (kvm_run->immediate_exit) { |
1077 |
+@@ -7290,8 +7291,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
1078 |
+ |
1079 |
+ out: |
1080 |
+ post_kvm_run_save(vcpu); |
1081 |
+- if (vcpu->sigset_active) |
1082 |
+- sigprocmask(SIG_SETMASK, &sigsaved, NULL); |
1083 |
++ kvm_sigset_deactivate(vcpu); |
1084 |
+ |
1085 |
+ return r; |
1086 |
+ } |
1087 |
+diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c |
1088 |
+index 30bc4812ceb8..9fe656c42aa5 100644 |
1089 |
+--- a/arch/x86/mm/extable.c |
1090 |
++++ b/arch/x86/mm/extable.c |
1091 |
+@@ -1,6 +1,7 @@ |
1092 |
+ #include <linux/extable.h> |
1093 |
+ #include <linux/uaccess.h> |
1094 |
+ #include <linux/sched/debug.h> |
1095 |
++#include <xen/xen.h> |
1096 |
+ |
1097 |
+ #include <asm/fpu/internal.h> |
1098 |
+ #include <asm/traps.h> |
1099 |
+@@ -212,8 +213,9 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr) |
1100 |
+ * Old CPUs leave the high bits of CS on the stack |
1101 |
+ * undefined. I'm not sure which CPUs do this, but at least |
1102 |
+ * the 486 DX works this way. |
1103 |
++ * Xen pv domains are not using the default __KERNEL_CS. |
1104 |
+ */ |
1105 |
+- if (regs->cs != __KERNEL_CS) |
1106 |
++ if (!xen_pv_domain() && regs->cs != __KERNEL_CS) |
1107 |
+ goto fail; |
1108 |
+ |
1109 |
+ /* |
1110 |
+diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c |
1111 |
+index ae3a071e1d0f..899a22a02e95 100644 |
1112 |
+--- a/arch/x86/xen/enlighten_pv.c |
1113 |
++++ b/arch/x86/xen/enlighten_pv.c |
1114 |
+@@ -622,7 +622,7 @@ static struct trap_array_entry trap_array[] = { |
1115 |
+ { simd_coprocessor_error, xen_simd_coprocessor_error, false }, |
1116 |
+ }; |
1117 |
+ |
1118 |
+-static bool get_trap_addr(void **addr, unsigned int ist) |
1119 |
++static bool __ref get_trap_addr(void **addr, unsigned int ist) |
1120 |
+ { |
1121 |
+ unsigned int nr; |
1122 |
+ bool ist_okay = false; |
1123 |
+@@ -644,6 +644,14 @@ static bool get_trap_addr(void **addr, unsigned int ist) |
1124 |
+ } |
1125 |
+ } |
1126 |
+ |
1127 |
++ if (nr == ARRAY_SIZE(trap_array) && |
1128 |
++ *addr >= (void *)early_idt_handler_array[0] && |
1129 |
++ *addr < (void *)early_idt_handler_array[NUM_EXCEPTION_VECTORS]) { |
1130 |
++ nr = (*addr - (void *)early_idt_handler_array[0]) / |
1131 |
++ EARLY_IDT_HANDLER_SIZE; |
1132 |
++ *addr = (void *)xen_early_idt_handler_array[nr]; |
1133 |
++ } |
1134 |
++ |
1135 |
+ if (WARN_ON(ist != 0 && !ist_okay)) |
1136 |
+ return false; |
1137 |
+ |
1138 |
+@@ -1261,6 +1269,21 @@ asmlinkage __visible void __init xen_start_kernel(void) |
1139 |
+ xen_setup_gdt(0); |
1140 |
+ |
1141 |
+ xen_init_irq_ops(); |
1142 |
++ |
1143 |
++ /* Let's presume PV guests always boot on vCPU with id 0. */ |
1144 |
++ per_cpu(xen_vcpu_id, 0) = 0; |
1145 |
++ |
1146 |
++ /* |
1147 |
++ * Setup xen_vcpu early because idt_setup_early_handler needs it for |
1148 |
++ * local_irq_disable(), irqs_disabled(). |
1149 |
++ * |
1150 |
++ * Don't do the full vcpu_info placement stuff until we have |
1151 |
++ * the cpu_possible_mask and a non-dummy shared_info. |
1152 |
++ */ |
1153 |
++ xen_vcpu_info_reset(0); |
1154 |
++ |
1155 |
++ idt_setup_early_handler(); |
1156 |
++ |
1157 |
+ xen_init_capabilities(); |
1158 |
+ |
1159 |
+ #ifdef CONFIG_X86_LOCAL_APIC |
1160 |
+@@ -1294,18 +1317,6 @@ asmlinkage __visible void __init xen_start_kernel(void) |
1161 |
+ */ |
1162 |
+ acpi_numa = -1; |
1163 |
+ #endif |
1164 |
+- /* Let's presume PV guests always boot on vCPU with id 0. */ |
1165 |
+- per_cpu(xen_vcpu_id, 0) = 0; |
1166 |
+- |
1167 |
+- /* |
1168 |
+- * Setup xen_vcpu early because start_kernel needs it for |
1169 |
+- * local_irq_disable(), irqs_disabled(). |
1170 |
+- * |
1171 |
+- * Don't do the full vcpu_info placement stuff until we have |
1172 |
+- * the cpu_possible_mask and a non-dummy shared_info. |
1173 |
+- */ |
1174 |
+- xen_vcpu_info_reset(0); |
1175 |
+- |
1176 |
+ WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv)); |
1177 |
+ |
1178 |
+ local_irq_disable(); |
1179 |
+diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S |
1180 |
+index 8a10c9a9e2b5..417b339e5c8e 100644 |
1181 |
+--- a/arch/x86/xen/xen-asm_64.S |
1182 |
++++ b/arch/x86/xen/xen-asm_64.S |
1183 |
+@@ -15,6 +15,7 @@ |
1184 |
+ |
1185 |
+ #include <xen/interface/xen.h> |
1186 |
+ |
1187 |
++#include <linux/init.h> |
1188 |
+ #include <linux/linkage.h> |
1189 |
+ |
1190 |
+ .macro xen_pv_trap name |
1191 |
+@@ -54,6 +55,19 @@ xen_pv_trap entry_INT80_compat |
1192 |
+ #endif |
1193 |
+ xen_pv_trap hypervisor_callback |
1194 |
+ |
1195 |
++ __INIT |
1196 |
++ENTRY(xen_early_idt_handler_array) |
1197 |
++ i = 0 |
1198 |
++ .rept NUM_EXCEPTION_VECTORS |
1199 |
++ pop %rcx |
1200 |
++ pop %r11 |
1201 |
++ jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE |
1202 |
++ i = i + 1 |
1203 |
++ .fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc |
1204 |
++ .endr |
1205 |
++END(xen_early_idt_handler_array) |
1206 |
++ __FINIT |
1207 |
++ |
1208 |
+ hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 |
1209 |
+ /* |
1210 |
+ * Xen64 iret frame: |
1211 |
+diff --git a/crypto/Kconfig b/crypto/Kconfig |
1212 |
+index ac5fb37e6f4b..42212b60a0ee 100644 |
1213 |
+--- a/crypto/Kconfig |
1214 |
++++ b/crypto/Kconfig |
1215 |
+@@ -130,7 +130,7 @@ config CRYPTO_DH |
1216 |
+ |
1217 |
+ config CRYPTO_ECDH |
1218 |
+ tristate "ECDH algorithm" |
1219 |
+- select CRYTPO_KPP |
1220 |
++ select CRYPTO_KPP |
1221 |
+ select CRYPTO_RNG_DEFAULT |
1222 |
+ help |
1223 |
+ Generic implementation of the ECDH algorithm |
1224 |
+diff --git a/crypto/af_alg.c b/crypto/af_alg.c |
1225 |
+index 6ec360213107..53b7fa4cf4ab 100644 |
1226 |
+--- a/crypto/af_alg.c |
1227 |
++++ b/crypto/af_alg.c |
1228 |
+@@ -150,7 +150,7 @@ EXPORT_SYMBOL_GPL(af_alg_release_parent); |
1229 |
+ |
1230 |
+ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) |
1231 |
+ { |
1232 |
+- const u32 forbidden = CRYPTO_ALG_INTERNAL; |
1233 |
++ const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY; |
1234 |
+ struct sock *sk = sock->sk; |
1235 |
+ struct alg_sock *ask = alg_sk(sk); |
1236 |
+ struct sockaddr_alg *sa = (void *)uaddr; |
1237 |
+@@ -158,6 +158,10 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) |
1238 |
+ void *private; |
1239 |
+ int err; |
1240 |
+ |
1241 |
++ /* If caller uses non-allowed flag, return error. */ |
1242 |
++ if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed)) |
1243 |
++ return -EINVAL; |
1244 |
++ |
1245 |
+ if (sock->state == SS_CONNECTED) |
1246 |
+ return -EINVAL; |
1247 |
+ |
1248 |
+@@ -176,9 +180,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) |
1249 |
+ if (IS_ERR(type)) |
1250 |
+ return PTR_ERR(type); |
1251 |
+ |
1252 |
+- private = type->bind(sa->salg_name, |
1253 |
+- sa->salg_feat & ~forbidden, |
1254 |
+- sa->salg_mask & ~forbidden); |
1255 |
++ private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask); |
1256 |
+ if (IS_ERR(private)) { |
1257 |
+ module_put(type->owner); |
1258 |
+ return PTR_ERR(private); |
1259 |
+diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c |
1260 |
+index 7e8ed96236ce..a68be626017c 100644 |
1261 |
+--- a/crypto/sha3_generic.c |
1262 |
++++ b/crypto/sha3_generic.c |
1263 |
+@@ -18,6 +18,7 @@ |
1264 |
+ #include <linux/types.h> |
1265 |
+ #include <crypto/sha3.h> |
1266 |
+ #include <asm/byteorder.h> |
1267 |
++#include <asm/unaligned.h> |
1268 |
+ |
1269 |
+ #define KECCAK_ROUNDS 24 |
1270 |
+ |
1271 |
+@@ -149,7 +150,7 @@ static int sha3_update(struct shash_desc *desc, const u8 *data, |
1272 |
+ unsigned int i; |
1273 |
+ |
1274 |
+ for (i = 0; i < sctx->rsizw; i++) |
1275 |
+- sctx->st[i] ^= ((u64 *) src)[i]; |
1276 |
++ sctx->st[i] ^= get_unaligned_le64(src + 8 * i); |
1277 |
+ keccakf(sctx->st); |
1278 |
+ |
1279 |
+ done += sctx->rsiz; |
1280 |
+@@ -174,7 +175,7 @@ static int sha3_final(struct shash_desc *desc, u8 *out) |
1281 |
+ sctx->buf[sctx->rsiz - 1] |= 0x80; |
1282 |
+ |
1283 |
+ for (i = 0; i < sctx->rsizw; i++) |
1284 |
+- sctx->st[i] ^= ((u64 *) sctx->buf)[i]; |
1285 |
++ sctx->st[i] ^= get_unaligned_le64(sctx->buf + 8 * i); |
1286 |
+ |
1287 |
+ keccakf(sctx->st); |
1288 |
+ |
1289 |
+diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c |
1290 |
+index 24418932612e..a041689e5701 100644 |
1291 |
+--- a/drivers/acpi/device_sysfs.c |
1292 |
++++ b/drivers/acpi/device_sysfs.c |
1293 |
+@@ -146,6 +146,10 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias, |
1294 |
+ int count; |
1295 |
+ struct acpi_hardware_id *id; |
1296 |
+ |
1297 |
++ /* Avoid unnecessarily loading modules for non present devices. */ |
1298 |
++ if (!acpi_device_is_present(acpi_dev)) |
1299 |
++ return 0; |
1300 |
++ |
1301 |
+ /* |
1302 |
+ * Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should |
1303 |
+ * be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the |
1304 |
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c |
1305 |
+index a340766b51fe..2ef8bd29e188 100644 |
1306 |
+--- a/drivers/android/binder.c |
1307 |
++++ b/drivers/android/binder.c |
1308 |
+@@ -4302,6 +4302,18 @@ static int binder_thread_release(struct binder_proc *proc, |
1309 |
+ if (t) |
1310 |
+ spin_lock(&t->lock); |
1311 |
+ } |
1312 |
++ |
1313 |
++ /* |
1314 |
++ * If this thread used poll, make sure we remove the waitqueue |
1315 |
++ * from any epoll data structures holding it with POLLFREE. |
1316 |
++ * waitqueue_active() is safe to use here because we're holding |
1317 |
++ * the inner lock. |
1318 |
++ */ |
1319 |
++ if ((thread->looper & BINDER_LOOPER_STATE_POLL) && |
1320 |
++ waitqueue_active(&thread->wait)) { |
1321 |
++ wake_up_poll(&thread->wait, POLLHUP | POLLFREE); |
1322 |
++ } |
1323 |
++ |
1324 |
+ binder_inner_proc_unlock(thread->proc); |
1325 |
+ |
1326 |
+ if (send_reply) |
1327 |
+diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c |
1328 |
+index c2819a3d58a6..6cb148268676 100644 |
1329 |
+--- a/drivers/android/binder_alloc.c |
1330 |
++++ b/drivers/android/binder_alloc.c |
1331 |
+@@ -668,7 +668,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, |
1332 |
+ goto err_already_mapped; |
1333 |
+ } |
1334 |
+ |
1335 |
+- area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); |
1336 |
++ area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC); |
1337 |
+ if (area == NULL) { |
1338 |
+ ret = -ENOMEM; |
1339 |
+ failure_string = "get_vm_area"; |
1340 |
+diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig |
1341 |
+index d7d21118d3e0..2c2ed9cf8796 100644 |
1342 |
+--- a/drivers/auxdisplay/Kconfig |
1343 |
++++ b/drivers/auxdisplay/Kconfig |
1344 |
+@@ -136,6 +136,7 @@ config CFAG12864B_RATE |
1345 |
+ |
1346 |
+ config IMG_ASCII_LCD |
1347 |
+ tristate "Imagination Technologies ASCII LCD Display" |
1348 |
++ depends on HAS_IOMEM |
1349 |
+ default y if MIPS_MALTA || MIPS_SEAD3 |
1350 |
+ select SYSCON |
1351 |
+ help |
1352 |
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c |
1353 |
+index 85de67334695..a2a0dce5114e 100644 |
1354 |
+--- a/drivers/block/loop.c |
1355 |
++++ b/drivers/block/loop.c |
1356 |
+@@ -1576,9 +1576,8 @@ static int lo_open(struct block_device *bdev, fmode_t mode) |
1357 |
+ return err; |
1358 |
+ } |
1359 |
+ |
1360 |
+-static void lo_release(struct gendisk *disk, fmode_t mode) |
1361 |
++static void __lo_release(struct loop_device *lo) |
1362 |
+ { |
1363 |
+- struct loop_device *lo = disk->private_data; |
1364 |
+ int err; |
1365 |
+ |
1366 |
+ if (atomic_dec_return(&lo->lo_refcnt)) |
1367 |
+@@ -1605,6 +1604,13 @@ static void lo_release(struct gendisk *disk, fmode_t mode) |
1368 |
+ mutex_unlock(&lo->lo_ctl_mutex); |
1369 |
+ } |
1370 |
+ |
1371 |
++static void lo_release(struct gendisk *disk, fmode_t mode) |
1372 |
++{ |
1373 |
++ mutex_lock(&loop_index_mutex); |
1374 |
++ __lo_release(disk->private_data); |
1375 |
++ mutex_unlock(&loop_index_mutex); |
1376 |
++} |
1377 |
++ |
1378 |
+ static const struct block_device_operations lo_fops = { |
1379 |
+ .owner = THIS_MODULE, |
1380 |
+ .open = lo_open, |
1381 |
+diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c |
1382 |
+index 4d55af5c6e5b..69dfa1d3f453 100644 |
1383 |
+--- a/drivers/block/null_blk.c |
1384 |
++++ b/drivers/block/null_blk.c |
1385 |
+@@ -467,7 +467,6 @@ static void nullb_device_release(struct config_item *item) |
1386 |
+ { |
1387 |
+ struct nullb_device *dev = to_nullb_device(item); |
1388 |
+ |
1389 |
+- badblocks_exit(&dev->badblocks); |
1390 |
+ null_free_device_storage(dev, false); |
1391 |
+ null_free_dev(dev); |
1392 |
+ } |
1393 |
+@@ -578,6 +577,10 @@ static struct nullb_device *null_alloc_dev(void) |
1394 |
+ |
1395 |
+ static void null_free_dev(struct nullb_device *dev) |
1396 |
+ { |
1397 |
++ if (!dev) |
1398 |
++ return; |
1399 |
++ |
1400 |
++ badblocks_exit(&dev->badblocks); |
1401 |
+ kfree(dev); |
1402 |
+ } |
1403 |
+ |
1404 |
+diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig |
1405 |
+index 4ebae43118ef..d8addbce40bc 100644 |
1406 |
+--- a/drivers/cpufreq/Kconfig |
1407 |
++++ b/drivers/cpufreq/Kconfig |
1408 |
+@@ -275,6 +275,7 @@ config BMIPS_CPUFREQ |
1409 |
+ |
1410 |
+ config LOONGSON2_CPUFREQ |
1411 |
+ tristate "Loongson2 CPUFreq Driver" |
1412 |
++ depends on LEMOTE_MACH2F |
1413 |
+ help |
1414 |
+ This option adds a CPUFreq driver for loongson processors which |
1415 |
+ support software configurable cpu frequency. |
1416 |
+@@ -287,6 +288,7 @@ config LOONGSON2_CPUFREQ |
1417 |
+ |
1418 |
+ config LOONGSON1_CPUFREQ |
1419 |
+ tristate "Loongson1 CPUFreq Driver" |
1420 |
++ depends on LOONGSON1_LS1B |
1421 |
+ help |
1422 |
+ This option adds a CPUFreq driver for loongson1 processors which |
1423 |
+ support software configurable cpu frequency. |
1424 |
+diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c |
1425 |
+index 3980f946874f..0626b33d2886 100644 |
1426 |
+--- a/drivers/crypto/inside-secure/safexcel_hash.c |
1427 |
++++ b/drivers/crypto/inside-secure/safexcel_hash.c |
1428 |
+@@ -33,6 +33,8 @@ struct safexcel_ahash_req { |
1429 |
+ bool finish; |
1430 |
+ bool hmac; |
1431 |
+ |
1432 |
++ int nents; |
1433 |
++ |
1434 |
+ u8 state_sz; /* expected sate size, only set once */ |
1435 |
+ u32 state[SHA256_DIGEST_SIZE / sizeof(u32)]; |
1436 |
+ |
1437 |
+@@ -151,8 +153,10 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, |
1438 |
+ result_sz = crypto_ahash_digestsize(ahash); |
1439 |
+ memcpy(sreq->state, areq->result, result_sz); |
1440 |
+ |
1441 |
+- dma_unmap_sg(priv->dev, areq->src, |
1442 |
+- sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE); |
1443 |
++ if (sreq->nents) { |
1444 |
++ dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE); |
1445 |
++ sreq->nents = 0; |
1446 |
++ } |
1447 |
+ |
1448 |
+ safexcel_free_context(priv, async, sreq->state_sz); |
1449 |
+ |
1450 |
+@@ -177,7 +181,7 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring, |
1451 |
+ struct safexcel_command_desc *cdesc, *first_cdesc = NULL; |
1452 |
+ struct safexcel_result_desc *rdesc; |
1453 |
+ struct scatterlist *sg; |
1454 |
+- int i, nents, queued, len, cache_len, extra, n_cdesc = 0, ret = 0; |
1455 |
++ int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0; |
1456 |
+ |
1457 |
+ queued = len = req->len - req->processed; |
1458 |
+ if (queued < crypto_ahash_blocksize(ahash)) |
1459 |
+@@ -185,17 +189,31 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring, |
1460 |
+ else |
1461 |
+ cache_len = queued - areq->nbytes; |
1462 |
+ |
1463 |
+- /* |
1464 |
+- * If this is not the last request and the queued data does not fit |
1465 |
+- * into full blocks, cache it for the next send() call. |
1466 |
+- */ |
1467 |
+- extra = queued & (crypto_ahash_blocksize(ahash) - 1); |
1468 |
+- if (!req->last_req && extra) { |
1469 |
+- sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), |
1470 |
+- req->cache_next, extra, areq->nbytes - extra); |
1471 |
+- |
1472 |
+- queued -= extra; |
1473 |
+- len -= extra; |
1474 |
++ if (!req->last_req) { |
1475 |
++ /* If this is not the last request and the queued data does not |
1476 |
++ * fit into full blocks, cache it for the next send() call. |
1477 |
++ */ |
1478 |
++ extra = queued & (crypto_ahash_blocksize(ahash) - 1); |
1479 |
++ if (!extra) |
1480 |
++ /* If this is not the last request and the queued data |
1481 |
++ * is a multiple of a block, cache the last one for now. |
1482 |
++ */ |
1483 |
++ extra = queued - crypto_ahash_blocksize(ahash); |
1484 |
++ |
1485 |
++ if (extra) { |
1486 |
++ sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), |
1487 |
++ req->cache_next, extra, |
1488 |
++ areq->nbytes - extra); |
1489 |
++ |
1490 |
++ queued -= extra; |
1491 |
++ len -= extra; |
1492 |
++ |
1493 |
++ if (!queued) { |
1494 |
++ *commands = 0; |
1495 |
++ *results = 0; |
1496 |
++ return 0; |
1497 |
++ } |
1498 |
++ } |
1499 |
+ } |
1500 |
+ |
1501 |
+ spin_lock_bh(&priv->ring[ring].egress_lock); |
1502 |
+@@ -233,15 +251,15 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring, |
1503 |
+ } |
1504 |
+ |
1505 |
+ /* Now handle the current ahash request buffer(s) */ |
1506 |
+- nents = dma_map_sg(priv->dev, areq->src, |
1507 |
+- sg_nents_for_len(areq->src, areq->nbytes), |
1508 |
+- DMA_TO_DEVICE); |
1509 |
+- if (!nents) { |
1510 |
++ req->nents = dma_map_sg(priv->dev, areq->src, |
1511 |
++ sg_nents_for_len(areq->src, areq->nbytes), |
1512 |
++ DMA_TO_DEVICE); |
1513 |
++ if (!req->nents) { |
1514 |
+ ret = -ENOMEM; |
1515 |
+ goto cdesc_rollback; |
1516 |
+ } |
1517 |
+ |
1518 |
+- for_each_sg(areq->src, sg, nents, i) { |
1519 |
++ for_each_sg(areq->src, sg, req->nents, i) { |
1520 |
+ int sglen = sg_dma_len(sg); |
1521 |
+ |
1522 |
+ /* Do not overflow the request */ |
1523 |
+diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig |
1524 |
+index 2b4c39fdfa91..86210f75d233 100644 |
1525 |
+--- a/drivers/firmware/efi/Kconfig |
1526 |
++++ b/drivers/firmware/efi/Kconfig |
1527 |
+@@ -159,7 +159,10 @@ config RESET_ATTACK_MITIGATION |
1528 |
+ using the TCG Platform Reset Attack Mitigation specification. This |
1529 |
+ protects against an attacker forcibly rebooting the system while it |
1530 |
+ still contains secrets in RAM, booting another OS and extracting the |
1531 |
+- secrets. |
1532 |
++ secrets. This should only be enabled when userland is configured to |
1533 |
++ clear the MemoryOverwriteRequest flag on clean shutdown after secrets |
1534 |
++ have been evicted, since otherwise it will trigger even on clean |
1535 |
++ reboots. |
1536 |
+ |
1537 |
+ endmenu |
1538 |
+ |
1539 |
+diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c |
1540 |
+index f33d4a5fe671..af0baf8da295 100644 |
1541 |
+--- a/drivers/gpio/gpio-ath79.c |
1542 |
++++ b/drivers/gpio/gpio-ath79.c |
1543 |
+@@ -323,3 +323,6 @@ static struct platform_driver ath79_gpio_driver = { |
1544 |
+ }; |
1545 |
+ |
1546 |
+ module_platform_driver(ath79_gpio_driver); |
1547 |
++ |
1548 |
++MODULE_DESCRIPTION("Atheros AR71XX/AR724X/AR913X GPIO API support"); |
1549 |
++MODULE_LICENSE("GPL v2"); |
1550 |
+diff --git a/drivers/gpio/gpio-iop.c b/drivers/gpio/gpio-iop.c |
1551 |
+index 98c7ff2a76e7..8d62db447ec1 100644 |
1552 |
+--- a/drivers/gpio/gpio-iop.c |
1553 |
++++ b/drivers/gpio/gpio-iop.c |
1554 |
+@@ -58,3 +58,7 @@ static int __init iop3xx_gpio_init(void) |
1555 |
+ return platform_driver_register(&iop3xx_gpio_driver); |
1556 |
+ } |
1557 |
+ arch_initcall(iop3xx_gpio_init); |
1558 |
++ |
1559 |
++MODULE_DESCRIPTION("GPIO handling for Intel IOP3xx processors"); |
1560 |
++MODULE_AUTHOR("Lennert Buytenhek <buytenh@××××××××××.org>"); |
1561 |
++MODULE_LICENSE("GPL"); |
1562 |
+diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c |
1563 |
+index 16cbc5702865..491b0974c0fe 100644 |
1564 |
+--- a/drivers/gpio/gpio-stmpe.c |
1565 |
++++ b/drivers/gpio/gpio-stmpe.c |
1566 |
+@@ -190,6 +190,16 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d) |
1567 |
+ }; |
1568 |
+ int i, j; |
1569 |
+ |
1570 |
++ /* |
1571 |
++ * STMPE1600: to be able to get IRQ from pins, |
1572 |
++ * a read must be done on GPMR register, or a write in |
1573 |
++ * GPSR or GPCR registers |
1574 |
++ */ |
1575 |
++ if (stmpe->partnum == STMPE1600) { |
1576 |
++ stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_LSB]); |
1577 |
++ stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_CSB]); |
1578 |
++ } |
1579 |
++ |
1580 |
+ for (i = 0; i < CACHE_NR_REGS; i++) { |
1581 |
+ /* STMPE801 and STMPE1600 don't have RE and FE registers */ |
1582 |
+ if ((stmpe->partnum == STMPE801 || |
1583 |
+@@ -227,21 +237,11 @@ static void stmpe_gpio_irq_unmask(struct irq_data *d) |
1584 |
+ { |
1585 |
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d); |
1586 |
+ struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(gc); |
1587 |
+- struct stmpe *stmpe = stmpe_gpio->stmpe; |
1588 |
+ int offset = d->hwirq; |
1589 |
+ int regoffset = offset / 8; |
1590 |
+ int mask = BIT(offset % 8); |
1591 |
+ |
1592 |
+ stmpe_gpio->regs[REG_IE][regoffset] |= mask; |
1593 |
+- |
1594 |
+- /* |
1595 |
+- * STMPE1600 workaround: to be able to get IRQ from pins, |
1596 |
+- * a read must be done on GPMR register, or a write in |
1597 |
+- * GPSR or GPCR registers |
1598 |
+- */ |
1599 |
+- if (stmpe->partnum == STMPE1600) |
1600 |
+- stmpe_reg_read(stmpe, |
1601 |
+- stmpe->regs[STMPE_IDX_GPMR_LSB + regoffset]); |
1602 |
+ } |
1603 |
+ |
1604 |
+ static void stmpe_dbg_show_one(struct seq_file *s, |
1605 |
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c |
1606 |
+index eb80dac4e26a..bdd68ff197dc 100644 |
1607 |
+--- a/drivers/gpio/gpiolib.c |
1608 |
++++ b/drivers/gpio/gpiolib.c |
1609 |
+@@ -723,6 +723,9 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p) |
1610 |
+ struct gpioevent_data ge; |
1611 |
+ int ret, level; |
1612 |
+ |
1613 |
++ /* Do not leak kernel stack to userspace */ |
1614 |
++ memset(&ge, 0, sizeof(ge)); |
1615 |
++ |
1616 |
+ ge.timestamp = ktime_get_real_ns(); |
1617 |
+ level = gpiod_get_value_cansleep(le->desc); |
1618 |
+ |
1619 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c |
1620 |
+index b9dbbf9cb8b0..bdabaa3399db 100644 |
1621 |
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c |
1622 |
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c |
1623 |
+@@ -369,29 +369,50 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd) |
1624 |
+ { |
1625 |
+ struct amdgpu_device *adev = get_amdgpu_device(kgd); |
1626 |
+ struct cik_sdma_rlc_registers *m; |
1627 |
++ unsigned long end_jiffies; |
1628 |
+ uint32_t sdma_base_addr; |
1629 |
++ uint32_t data; |
1630 |
+ |
1631 |
+ m = get_sdma_mqd(mqd); |
1632 |
+ sdma_base_addr = get_sdma_base_addr(m); |
1633 |
+ |
1634 |
+- WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR, |
1635 |
+- m->sdma_rlc_virtual_addr); |
1636 |
++ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, |
1637 |
++ m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); |
1638 |
+ |
1639 |
+- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, |
1640 |
+- m->sdma_rlc_rb_base); |
1641 |
++ end_jiffies = msecs_to_jiffies(2000) + jiffies; |
1642 |
++ while (true) { |
1643 |
++ data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); |
1644 |
++ if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) |
1645 |
++ break; |
1646 |
++ if (time_after(jiffies, end_jiffies)) |
1647 |
++ return -ETIME; |
1648 |
++ usleep_range(500, 1000); |
1649 |
++ } |
1650 |
++ if (m->sdma_engine_id) { |
1651 |
++ data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL); |
1652 |
++ data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL, |
1653 |
++ RESUME_CTX, 0); |
1654 |
++ WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data); |
1655 |
++ } else { |
1656 |
++ data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL); |
1657 |
++ data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL, |
1658 |
++ RESUME_CTX, 0); |
1659 |
++ WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data); |
1660 |
++ } |
1661 |
+ |
1662 |
++ WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, |
1663 |
++ m->sdma_rlc_doorbell); |
1664 |
++ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0); |
1665 |
++ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0); |
1666 |
++ WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR, |
1667 |
++ m->sdma_rlc_virtual_addr); |
1668 |
++ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base); |
1669 |
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, |
1670 |
+ m->sdma_rlc_rb_base_hi); |
1671 |
+- |
1672 |
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, |
1673 |
+ m->sdma_rlc_rb_rptr_addr_lo); |
1674 |
+- |
1675 |
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, |
1676 |
+ m->sdma_rlc_rb_rptr_addr_hi); |
1677 |
+- |
1678 |
+- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, |
1679 |
+- m->sdma_rlc_doorbell); |
1680 |
+- |
1681 |
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, |
1682 |
+ m->sdma_rlc_rb_cntl); |
1683 |
+ |
1684 |
+@@ -564,9 +585,9 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, |
1685 |
+ } |
1686 |
+ |
1687 |
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); |
1688 |
+- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0); |
1689 |
+- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0); |
1690 |
+- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0); |
1691 |
++ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, |
1692 |
++ RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) | |
1693 |
++ SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); |
1694 |
+ |
1695 |
+ return 0; |
1696 |
+ } |
1697 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c |
1698 |
+index 60d8bedb694d..b5aa8e6f8e0b 100644 |
1699 |
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c |
1700 |
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c |
1701 |
+@@ -403,6 +403,10 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, |
1702 |
+ if (candidate->robj == validated) |
1703 |
+ break; |
1704 |
+ |
1705 |
++ /* We can't move pinned BOs here */ |
1706 |
++ if (bo->pin_count) |
1707 |
++ continue; |
1708 |
++ |
1709 |
+ other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); |
1710 |
+ |
1711 |
+ /* Check if this BO is in one of the domains we need space for */ |
1712 |
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c |
1713 |
+index 44ffd23348fc..164fa4b1f9a9 100644 |
1714 |
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c |
1715 |
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c |
1716 |
+@@ -205,8 +205,8 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd, |
1717 |
+ struct cik_sdma_rlc_registers *m; |
1718 |
+ |
1719 |
+ m = get_sdma_mqd(mqd); |
1720 |
+- m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) << |
1721 |
+- SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT | |
1722 |
++ m->sdma_rlc_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1) |
1723 |
++ << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT | |
1724 |
+ q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT | |
1725 |
+ 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT | |
1726 |
+ 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT; |
1727 |
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c |
1728 |
+index 03bec765b03d..f9a1a4db9be7 100644 |
1729 |
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c |
1730 |
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c |
1731 |
+@@ -184,6 +184,24 @@ int pqm_create_queue(struct process_queue_manager *pqm, |
1732 |
+ |
1733 |
+ switch (type) { |
1734 |
+ case KFD_QUEUE_TYPE_SDMA: |
1735 |
++ if (dev->dqm->queue_count >= |
1736 |
++ CIK_SDMA_QUEUES_PER_ENGINE * CIK_SDMA_ENGINE_NUM) { |
1737 |
++ pr_err("Over-subscription is not allowed for SDMA.\n"); |
1738 |
++ retval = -EPERM; |
1739 |
++ goto err_create_queue; |
1740 |
++ } |
1741 |
++ |
1742 |
++ retval = create_cp_queue(pqm, dev, &q, properties, f, *qid); |
1743 |
++ if (retval != 0) |
1744 |
++ goto err_create_queue; |
1745 |
++ pqn->q = q; |
1746 |
++ pqn->kq = NULL; |
1747 |
++ retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, |
1748 |
++ &q->properties.vmid); |
1749 |
++ pr_debug("DQM returned %d for create_queue\n", retval); |
1750 |
++ print_queue(q); |
1751 |
++ break; |
1752 |
++ |
1753 |
+ case KFD_QUEUE_TYPE_COMPUTE: |
1754 |
+ /* check if there is over subscription */ |
1755 |
+ if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) && |
1756 |
+diff --git a/drivers/gpu/drm/bridge/lvds-encoder.c b/drivers/gpu/drm/bridge/lvds-encoder.c |
1757 |
+index 0903ba574f61..75b0d3f6e4de 100644 |
1758 |
+--- a/drivers/gpu/drm/bridge/lvds-encoder.c |
1759 |
++++ b/drivers/gpu/drm/bridge/lvds-encoder.c |
1760 |
+@@ -13,13 +13,37 @@ |
1761 |
+ |
1762 |
+ #include <linux/of_graph.h> |
1763 |
+ |
1764 |
++struct lvds_encoder { |
1765 |
++ struct drm_bridge bridge; |
1766 |
++ struct drm_bridge *panel_bridge; |
1767 |
++}; |
1768 |
++ |
1769 |
++static int lvds_encoder_attach(struct drm_bridge *bridge) |
1770 |
++{ |
1771 |
++ struct lvds_encoder *lvds_encoder = container_of(bridge, |
1772 |
++ struct lvds_encoder, |
1773 |
++ bridge); |
1774 |
++ |
1775 |
++ return drm_bridge_attach(bridge->encoder, lvds_encoder->panel_bridge, |
1776 |
++ bridge); |
1777 |
++} |
1778 |
++ |
1779 |
++static struct drm_bridge_funcs funcs = { |
1780 |
++ .attach = lvds_encoder_attach, |
1781 |
++}; |
1782 |
++ |
1783 |
+ static int lvds_encoder_probe(struct platform_device *pdev) |
1784 |
+ { |
1785 |
+ struct device_node *port; |
1786 |
+ struct device_node *endpoint; |
1787 |
+ struct device_node *panel_node; |
1788 |
+ struct drm_panel *panel; |
1789 |
+- struct drm_bridge *bridge; |
1790 |
++ struct lvds_encoder *lvds_encoder; |
1791 |
++ |
1792 |
++ lvds_encoder = devm_kzalloc(&pdev->dev, sizeof(*lvds_encoder), |
1793 |
++ GFP_KERNEL); |
1794 |
++ if (!lvds_encoder) |
1795 |
++ return -ENOMEM; |
1796 |
+ |
1797 |
+ /* Locate the panel DT node. */ |
1798 |
+ port = of_graph_get_port_by_id(pdev->dev.of_node, 1); |
1799 |
+@@ -49,20 +73,30 @@ static int lvds_encoder_probe(struct platform_device *pdev) |
1800 |
+ return -EPROBE_DEFER; |
1801 |
+ } |
1802 |
+ |
1803 |
+- bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_LVDS); |
1804 |
+- if (IS_ERR(bridge)) |
1805 |
+- return PTR_ERR(bridge); |
1806 |
++ lvds_encoder->panel_bridge = |
1807 |
++ devm_drm_panel_bridge_add(&pdev->dev, |
1808 |
++ panel, DRM_MODE_CONNECTOR_LVDS); |
1809 |
++ if (IS_ERR(lvds_encoder->panel_bridge)) |
1810 |
++ return PTR_ERR(lvds_encoder->panel_bridge); |
1811 |
++ |
1812 |
++ /* The panel_bridge bridge is attached to the panel's of_node, |
1813 |
++ * but we need a bridge attached to our of_node for our user |
1814 |
++ * to look up. |
1815 |
++ */ |
1816 |
++ lvds_encoder->bridge.of_node = pdev->dev.of_node; |
1817 |
++ lvds_encoder->bridge.funcs = &funcs; |
1818 |
++ drm_bridge_add(&lvds_encoder->bridge); |
1819 |
+ |
1820 |
+- platform_set_drvdata(pdev, bridge); |
1821 |
++ platform_set_drvdata(pdev, lvds_encoder); |
1822 |
+ |
1823 |
+ return 0; |
1824 |
+ } |
1825 |
+ |
1826 |
+ static int lvds_encoder_remove(struct platform_device *pdev) |
1827 |
+ { |
1828 |
+- struct drm_bridge *bridge = platform_get_drvdata(pdev); |
1829 |
++ struct lvds_encoder *lvds_encoder = platform_get_drvdata(pdev); |
1830 |
+ |
1831 |
+- drm_bridge_remove(bridge); |
1832 |
++ drm_bridge_remove(&lvds_encoder->bridge); |
1833 |
+ |
1834 |
+ return 0; |
1835 |
+ } |
1836 |
+diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c |
1837 |
+index 8571cfd877c5..8636e7eeb731 100644 |
1838 |
+--- a/drivers/gpu/drm/bridge/tc358767.c |
1839 |
++++ b/drivers/gpu/drm/bridge/tc358767.c |
1840 |
+@@ -97,7 +97,7 @@ |
1841 |
+ #define DP0_ACTIVEVAL 0x0650 |
1842 |
+ #define DP0_SYNCVAL 0x0654 |
1843 |
+ #define DP0_MISC 0x0658 |
1844 |
+-#define TU_SIZE_RECOMMENDED (0x3f << 16) /* LSCLK cycles per TU */ |
1845 |
++#define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */ |
1846 |
+ #define BPC_6 (0 << 5) |
1847 |
+ #define BPC_8 (1 << 5) |
1848 |
+ |
1849 |
+@@ -318,7 +318,7 @@ static ssize_t tc_aux_transfer(struct drm_dp_aux *aux, |
1850 |
+ tmp = (tmp << 8) | buf[i]; |
1851 |
+ i++; |
1852 |
+ if (((i % 4) == 0) || (i == size)) { |
1853 |
+- tc_write(DP0_AUXWDATA(i >> 2), tmp); |
1854 |
++ tc_write(DP0_AUXWDATA((i - 1) >> 2), tmp); |
1855 |
+ tmp = 0; |
1856 |
+ } |
1857 |
+ } |
1858 |
+@@ -603,8 +603,15 @@ static int tc_get_display_props(struct tc_data *tc) |
1859 |
+ ret = drm_dp_link_probe(&tc->aux, &tc->link.base); |
1860 |
+ if (ret < 0) |
1861 |
+ goto err_dpcd_read; |
1862 |
+- if ((tc->link.base.rate != 162000) && (tc->link.base.rate != 270000)) |
1863 |
+- goto err_dpcd_inval; |
1864 |
++ if (tc->link.base.rate != 162000 && tc->link.base.rate != 270000) { |
1865 |
++ dev_dbg(tc->dev, "Falling to 2.7 Gbps rate\n"); |
1866 |
++ tc->link.base.rate = 270000; |
1867 |
++ } |
1868 |
++ |
1869 |
++ if (tc->link.base.num_lanes > 2) { |
1870 |
++ dev_dbg(tc->dev, "Falling to 2 lanes\n"); |
1871 |
++ tc->link.base.num_lanes = 2; |
1872 |
++ } |
1873 |
+ |
1874 |
+ ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, tmp); |
1875 |
+ if (ret < 0) |
1876 |
+@@ -637,9 +644,6 @@ static int tc_get_display_props(struct tc_data *tc) |
1877 |
+ err_dpcd_read: |
1878 |
+ dev_err(tc->dev, "failed to read DPCD: %d\n", ret); |
1879 |
+ return ret; |
1880 |
+-err_dpcd_inval: |
1881 |
+- dev_err(tc->dev, "invalid DPCD\n"); |
1882 |
+- return -EINVAL; |
1883 |
+ } |
1884 |
+ |
1885 |
+ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) |
1886 |
+@@ -655,6 +659,14 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) |
1887 |
+ int lower_margin = mode->vsync_start - mode->vdisplay; |
1888 |
+ int vsync_len = mode->vsync_end - mode->vsync_start; |
1889 |
+ |
1890 |
++ /* |
1891 |
++ * Recommended maximum number of symbols transferred in a transfer unit: |
1892 |
++ * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size, |
1893 |
++ * (output active video bandwidth in bytes)) |
1894 |
++ * Must be less than tu_size. |
1895 |
++ */ |
1896 |
++ max_tu_symbol = TU_SIZE_RECOMMENDED - 1; |
1897 |
++ |
1898 |
+ dev_dbg(tc->dev, "set mode %dx%d\n", |
1899 |
+ mode->hdisplay, mode->vdisplay); |
1900 |
+ dev_dbg(tc->dev, "H margin %d,%d sync %d\n", |
1901 |
+@@ -664,13 +676,18 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) |
1902 |
+ dev_dbg(tc->dev, "total: %dx%d\n", mode->htotal, mode->vtotal); |
1903 |
+ |
1904 |
+ |
1905 |
+- /* LCD Ctl Frame Size */ |
1906 |
+- tc_write(VPCTRL0, (0x40 << 20) /* VSDELAY */ | |
1907 |
++ /* |
1908 |
++ * LCD Ctl Frame Size |
1909 |
++ * datasheet is not clear of vsdelay in case of DPI |
1910 |
++ * assume we do not need any delay when DPI is a source of |
1911 |
++ * sync signals |
1912 |
++ */ |
1913 |
++ tc_write(VPCTRL0, (0 << 20) /* VSDELAY */ | |
1914 |
+ OPXLFMT_RGB888 | FRMSYNC_DISABLED | MSF_DISABLED); |
1915 |
+- tc_write(HTIM01, (left_margin << 16) | /* H back porch */ |
1916 |
+- (hsync_len << 0)); /* Hsync */ |
1917 |
+- tc_write(HTIM02, (right_margin << 16) | /* H front porch */ |
1918 |
+- (mode->hdisplay << 0)); /* width */ |
1919 |
++ tc_write(HTIM01, (ALIGN(left_margin, 2) << 16) | /* H back porch */ |
1920 |
++ (ALIGN(hsync_len, 2) << 0)); /* Hsync */ |
1921 |
++ tc_write(HTIM02, (ALIGN(right_margin, 2) << 16) | /* H front porch */ |
1922 |
++ (ALIGN(mode->hdisplay, 2) << 0)); /* width */ |
1923 |
+ tc_write(VTIM01, (upper_margin << 16) | /* V back porch */ |
1924 |
+ (vsync_len << 0)); /* Vsync */ |
1925 |
+ tc_write(VTIM02, (lower_margin << 16) | /* V front porch */ |
1926 |
+@@ -689,7 +706,7 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) |
1927 |
+ /* DP Main Stream Attributes */ |
1928 |
+ vid_sync_dly = hsync_len + left_margin + mode->hdisplay; |
1929 |
+ tc_write(DP0_VIDSYNCDELAY, |
1930 |
+- (0x003e << 16) | /* thresh_dly */ |
1931 |
++ (max_tu_symbol << 16) | /* thresh_dly */ |
1932 |
+ (vid_sync_dly << 0)); |
1933 |
+ |
1934 |
+ tc_write(DP0_TOTALVAL, (mode->vtotal << 16) | (mode->htotal)); |
1935 |
+@@ -705,14 +722,8 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) |
1936 |
+ tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW | |
1937 |
+ DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888); |
1938 |
+ |
1939 |
+- /* |
1940 |
+- * Recommended maximum number of symbols transferred in a transfer unit: |
1941 |
+- * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size, |
1942 |
+- * (output active video bandwidth in bytes)) |
1943 |
+- * Must be less than tu_size. |
1944 |
+- */ |
1945 |
+- max_tu_symbol = TU_SIZE_RECOMMENDED - 1; |
1946 |
+- tc_write(DP0_MISC, (max_tu_symbol << 23) | TU_SIZE_RECOMMENDED | BPC_8); |
1947 |
++ tc_write(DP0_MISC, (max_tu_symbol << 23) | (TU_SIZE_RECOMMENDED << 16) | |
1948 |
++ BPC_8); |
1949 |
+ |
1950 |
+ return 0; |
1951 |
+ err: |
1952 |
+@@ -808,8 +819,6 @@ static int tc_main_link_setup(struct tc_data *tc) |
1953 |
+ unsigned int rate; |
1954 |
+ u32 dp_phy_ctrl; |
1955 |
+ int timeout; |
1956 |
+- bool aligned; |
1957 |
+- bool ready; |
1958 |
+ u32 value; |
1959 |
+ int ret; |
1960 |
+ u8 tmp[8]; |
1961 |
+@@ -954,16 +963,15 @@ static int tc_main_link_setup(struct tc_data *tc) |
1962 |
+ ret = drm_dp_dpcd_read_link_status(aux, tmp + 2); |
1963 |
+ if (ret < 0) |
1964 |
+ goto err_dpcd_read; |
1965 |
+- ready = (tmp[2] == ((DP_CHANNEL_EQ_BITS << 4) | /* Lane1 */ |
1966 |
+- DP_CHANNEL_EQ_BITS)); /* Lane0 */ |
1967 |
+- aligned = tmp[4] & DP_INTERLANE_ALIGN_DONE; |
1968 |
+- } while ((--timeout) && !(ready && aligned)); |
1969 |
++ } while ((--timeout) && |
1970 |
++ !(drm_dp_channel_eq_ok(tmp + 2, tc->link.base.num_lanes))); |
1971 |
+ |
1972 |
+ if (timeout == 0) { |
1973 |
+ /* Read DPCD 0x200-0x201 */ |
1974 |
+ ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT, tmp, 2); |
1975 |
+ if (ret < 0) |
1976 |
+ goto err_dpcd_read; |
1977 |
++ dev_err(dev, "channel(s) EQ not ok\n"); |
1978 |
+ dev_info(dev, "0x0200 SINK_COUNT: 0x%02x\n", tmp[0]); |
1979 |
+ dev_info(dev, "0x0201 DEVICE_SERVICE_IRQ_VECTOR: 0x%02x\n", |
1980 |
+ tmp[1]); |
1981 |
+@@ -974,10 +982,6 @@ static int tc_main_link_setup(struct tc_data *tc) |
1982 |
+ dev_info(dev, "0x0206 ADJUST_REQUEST_LANE0_1: 0x%02x\n", |
1983 |
+ tmp[6]); |
1984 |
+ |
1985 |
+- if (!ready) |
1986 |
+- dev_err(dev, "Lane0/1 not ready\n"); |
1987 |
+- if (!aligned) |
1988 |
+- dev_err(dev, "Lane0/1 not aligned\n"); |
1989 |
+ return -EAGAIN; |
1990 |
+ } |
1991 |
+ |
1992 |
+@@ -1099,7 +1103,10 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge, |
1993 |
+ static int tc_connector_mode_valid(struct drm_connector *connector, |
1994 |
+ struct drm_display_mode *mode) |
1995 |
+ { |
1996 |
+- /* Accept any mode */ |
1997 |
++ /* DPI interface clock limitation: upto 154 MHz */ |
1998 |
++ if (mode->clock > 154000) |
1999 |
++ return MODE_CLOCK_HIGH; |
2000 |
++ |
2001 |
+ return MODE_OK; |
2002 |
+ } |
2003 |
+ |
2004 |
+diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig |
2005 |
+index c226da145fb3..a349cb61961e 100644 |
2006 |
+--- a/drivers/gpu/drm/omapdrm/displays/Kconfig |
2007 |
++++ b/drivers/gpu/drm/omapdrm/displays/Kconfig |
2008 |
+@@ -35,6 +35,7 @@ config DRM_OMAP_CONNECTOR_ANALOG_TV |
2009 |
+ |
2010 |
+ config DRM_OMAP_PANEL_DPI |
2011 |
+ tristate "Generic DPI panel" |
2012 |
++ depends on BACKLIGHT_CLASS_DEVICE |
2013 |
+ help |
2014 |
+ Driver for generic DPI panels. |
2015 |
+ |
2016 |
+diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c |
2017 |
+index 1dd3dafc59af..c60a85e82c6d 100644 |
2018 |
+--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c |
2019 |
++++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c |
2020 |
+@@ -638,7 +638,8 @@ static int omap_dmm_probe(struct platform_device *dev) |
2021 |
+ match = of_match_node(dmm_of_match, dev->dev.of_node); |
2022 |
+ if (!match) { |
2023 |
+ dev_err(&dev->dev, "failed to find matching device node\n"); |
2024 |
+- return -ENODEV; |
2025 |
++ ret = -ENODEV; |
2026 |
++ goto fail; |
2027 |
+ } |
2028 |
+ |
2029 |
+ omap_dmm->plat_data = match->data; |
2030 |
+diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c |
2031 |
+index 9a20b9dc27c8..f7fc652b0027 100644 |
2032 |
+--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c |
2033 |
++++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c |
2034 |
+@@ -1275,8 +1275,6 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master, |
2035 |
+ goto err_pllref; |
2036 |
+ } |
2037 |
+ |
2038 |
+- pm_runtime_enable(dev); |
2039 |
+- |
2040 |
+ dsi->dsi_host.ops = &dw_mipi_dsi_host_ops; |
2041 |
+ dsi->dsi_host.dev = dev; |
2042 |
+ ret = mipi_dsi_host_register(&dsi->dsi_host); |
2043 |
+@@ -1291,6 +1289,7 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master, |
2044 |
+ } |
2045 |
+ |
2046 |
+ dev_set_drvdata(dev, dsi); |
2047 |
++ pm_runtime_enable(dev); |
2048 |
+ return 0; |
2049 |
+ |
2050 |
+ err_mipi_dsi_host: |
2051 |
+diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c |
2052 |
+index 7d7af3a93d94..521addec831e 100644 |
2053 |
+--- a/drivers/gpu/drm/vc4/vc4_irq.c |
2054 |
++++ b/drivers/gpu/drm/vc4/vc4_irq.c |
2055 |
+@@ -225,6 +225,9 @@ vc4_irq_uninstall(struct drm_device *dev) |
2056 |
+ /* Clear any pending interrupts we might have left. */ |
2057 |
+ V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS); |
2058 |
+ |
2059 |
++ /* Finish any interrupt handler still in flight. */ |
2060 |
++ disable_irq(dev->irq); |
2061 |
++ |
2062 |
+ cancel_work_sync(&vc4->overflow_mem_work); |
2063 |
+ } |
2064 |
+ |
2065 |
+diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c |
2066 |
+index 622cd43840b8..493f392b3a0a 100644 |
2067 |
+--- a/drivers/gpu/drm/vc4/vc4_v3d.c |
2068 |
++++ b/drivers/gpu/drm/vc4/vc4_v3d.c |
2069 |
+@@ -327,6 +327,9 @@ static int vc4_v3d_runtime_resume(struct device *dev) |
2070 |
+ return ret; |
2071 |
+ |
2072 |
+ vc4_v3d_init_hw(vc4->dev); |
2073 |
++ |
2074 |
++ /* We disabled the IRQ as part of vc4_irq_uninstall in suspend. */ |
2075 |
++ enable_irq(vc4->dev->irq); |
2076 |
+ vc4_irq_postinstall(vc4->dev); |
2077 |
+ |
2078 |
+ return 0; |
2079 |
+diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c |
2080 |
+index 906e654fb0ba..65f1cfbbe7fe 100644 |
2081 |
+--- a/drivers/hid/wacom_sys.c |
2082 |
++++ b/drivers/hid/wacom_sys.c |
2083 |
+@@ -2340,23 +2340,23 @@ static void wacom_remote_destroy_one(struct wacom *wacom, unsigned int index) |
2084 |
+ int i; |
2085 |
+ unsigned long flags; |
2086 |
+ |
2087 |
+- spin_lock_irqsave(&remote->remote_lock, flags); |
2088 |
+- remote->remotes[index].registered = false; |
2089 |
+- spin_unlock_irqrestore(&remote->remote_lock, flags); |
2090 |
++ for (i = 0; i < WACOM_MAX_REMOTES; i++) { |
2091 |
++ if (remote->remotes[i].serial == serial) { |
2092 |
+ |
2093 |
+- if (remote->remotes[index].battery.battery) |
2094 |
+- devres_release_group(&wacom->hdev->dev, |
2095 |
+- &remote->remotes[index].battery.bat_desc); |
2096 |
++ spin_lock_irqsave(&remote->remote_lock, flags); |
2097 |
++ remote->remotes[i].registered = false; |
2098 |
++ spin_unlock_irqrestore(&remote->remote_lock, flags); |
2099 |
+ |
2100 |
+- if (remote->remotes[index].group.name) |
2101 |
+- devres_release_group(&wacom->hdev->dev, |
2102 |
+- &remote->remotes[index]); |
2103 |
++ if (remote->remotes[i].battery.battery) |
2104 |
++ devres_release_group(&wacom->hdev->dev, |
2105 |
++ &remote->remotes[i].battery.bat_desc); |
2106 |
++ |
2107 |
++ if (remote->remotes[i].group.name) |
2108 |
++ devres_release_group(&wacom->hdev->dev, |
2109 |
++ &remote->remotes[i]); |
2110 |
+ |
2111 |
+- for (i = 0; i < WACOM_MAX_REMOTES; i++) { |
2112 |
+- if (remote->remotes[i].serial == serial) { |
2113 |
+ remote->remotes[i].serial = 0; |
2114 |
+ remote->remotes[i].group.name = NULL; |
2115 |
+- remote->remotes[i].registered = false; |
2116 |
+ remote->remotes[i].battery.battery = NULL; |
2117 |
+ wacom->led.groups[i].select = WACOM_STATUS_UNKNOWN; |
2118 |
+ } |
2119 |
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c |
2120 |
+index aa692e28b2cd..70cbe1e5a3d2 100644 |
2121 |
+--- a/drivers/hid/wacom_wac.c |
2122 |
++++ b/drivers/hid/wacom_wac.c |
2123 |
+@@ -1924,7 +1924,6 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field |
2124 |
+ struct wacom_features *features = &wacom_wac->features; |
2125 |
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid); |
2126 |
+ int i; |
2127 |
+- bool is_touch_on = value; |
2128 |
+ bool do_report = false; |
2129 |
+ |
2130 |
+ /* |
2131 |
+@@ -1969,16 +1968,17 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field |
2132 |
+ break; |
2133 |
+ |
2134 |
+ case WACOM_HID_WD_MUTE_DEVICE: |
2135 |
+- if (wacom_wac->shared->touch_input && value) { |
2136 |
+- wacom_wac->shared->is_touch_on = !wacom_wac->shared->is_touch_on; |
2137 |
+- is_touch_on = wacom_wac->shared->is_touch_on; |
2138 |
+- } |
2139 |
+- |
2140 |
+- /* fall through*/ |
2141 |
+ case WACOM_HID_WD_TOUCHONOFF: |
2142 |
+ if (wacom_wac->shared->touch_input) { |
2143 |
++ bool *is_touch_on = &wacom_wac->shared->is_touch_on; |
2144 |
++ |
2145 |
++ if (equivalent_usage == WACOM_HID_WD_MUTE_DEVICE && value) |
2146 |
++ *is_touch_on = !(*is_touch_on); |
2147 |
++ else if (equivalent_usage == WACOM_HID_WD_TOUCHONOFF) |
2148 |
++ *is_touch_on = value; |
2149 |
++ |
2150 |
+ input_report_switch(wacom_wac->shared->touch_input, |
2151 |
+- SW_MUTE_DEVICE, !is_touch_on); |
2152 |
++ SW_MUTE_DEVICE, !(*is_touch_on)); |
2153 |
+ input_sync(wacom_wac->shared->touch_input); |
2154 |
+ } |
2155 |
+ break; |
2156 |
+diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c |
2157 |
+index 52a58b8b6e1b..a139940cd991 100644 |
2158 |
+--- a/drivers/hwmon/pmbus/pmbus_core.c |
2159 |
++++ b/drivers/hwmon/pmbus/pmbus_core.c |
2160 |
+@@ -21,6 +21,7 @@ |
2161 |
+ |
2162 |
+ #include <linux/debugfs.h> |
2163 |
+ #include <linux/kernel.h> |
2164 |
++#include <linux/math64.h> |
2165 |
+ #include <linux/module.h> |
2166 |
+ #include <linux/init.h> |
2167 |
+ #include <linux/err.h> |
2168 |
+@@ -499,8 +500,8 @@ static long pmbus_reg2data_linear(struct pmbus_data *data, |
2169 |
+ static long pmbus_reg2data_direct(struct pmbus_data *data, |
2170 |
+ struct pmbus_sensor *sensor) |
2171 |
+ { |
2172 |
+- long val = (s16) sensor->data; |
2173 |
+- long m, b, R; |
2174 |
++ s64 b, val = (s16)sensor->data; |
2175 |
++ s32 m, R; |
2176 |
+ |
2177 |
+ m = data->info->m[sensor->class]; |
2178 |
+ b = data->info->b[sensor->class]; |
2179 |
+@@ -528,11 +529,12 @@ static long pmbus_reg2data_direct(struct pmbus_data *data, |
2180 |
+ R--; |
2181 |
+ } |
2182 |
+ while (R < 0) { |
2183 |
+- val = DIV_ROUND_CLOSEST(val, 10); |
2184 |
++ val = div_s64(val + 5LL, 10L); /* round closest */ |
2185 |
+ R++; |
2186 |
+ } |
2187 |
+ |
2188 |
+- return (val - b) / m; |
2189 |
++ val = div_s64(val - b, m); |
2190 |
++ return clamp_val(val, LONG_MIN, LONG_MAX); |
2191 |
+ } |
2192 |
+ |
2193 |
+ /* |
2194 |
+@@ -656,7 +658,8 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data, |
2195 |
+ static u16 pmbus_data2reg_direct(struct pmbus_data *data, |
2196 |
+ struct pmbus_sensor *sensor, long val) |
2197 |
+ { |
2198 |
+- long m, b, R; |
2199 |
++ s64 b, val64 = val; |
2200 |
++ s32 m, R; |
2201 |
+ |
2202 |
+ m = data->info->m[sensor->class]; |
2203 |
+ b = data->info->b[sensor->class]; |
2204 |
+@@ -673,18 +676,18 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data, |
2205 |
+ R -= 3; /* Adjust R and b for data in milli-units */ |
2206 |
+ b *= 1000; |
2207 |
+ } |
2208 |
+- val = val * m + b; |
2209 |
++ val64 = val64 * m + b; |
2210 |
+ |
2211 |
+ while (R > 0) { |
2212 |
+- val *= 10; |
2213 |
++ val64 *= 10; |
2214 |
+ R--; |
2215 |
+ } |
2216 |
+ while (R < 0) { |
2217 |
+- val = DIV_ROUND_CLOSEST(val, 10); |
2218 |
++ val64 = div_s64(val64 + 5LL, 10L); /* round closest */ |
2219 |
+ R++; |
2220 |
+ } |
2221 |
+ |
2222 |
+- return val; |
2223 |
++ return (u16)clamp_val(val64, S16_MIN, S16_MAX); |
2224 |
+ } |
2225 |
+ |
2226 |
+ static u16 pmbus_data2reg_vid(struct pmbus_data *data, |
2227 |
+diff --git a/drivers/i2c/i2c-boardinfo.c b/drivers/i2c/i2c-boardinfo.c |
2228 |
+index 31186ead5a40..509a6007cdf6 100644 |
2229 |
+--- a/drivers/i2c/i2c-boardinfo.c |
2230 |
++++ b/drivers/i2c/i2c-boardinfo.c |
2231 |
+@@ -86,6 +86,7 @@ int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsig |
2232 |
+ property_entries_dup(info->properties); |
2233 |
+ if (IS_ERR(devinfo->board_info.properties)) { |
2234 |
+ status = PTR_ERR(devinfo->board_info.properties); |
2235 |
++ kfree(devinfo); |
2236 |
+ break; |
2237 |
+ } |
2238 |
+ } |
2239 |
+@@ -98,6 +99,7 @@ int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsig |
2240 |
+ GFP_KERNEL); |
2241 |
+ if (!devinfo->board_info.resources) { |
2242 |
+ status = -ENOMEM; |
2243 |
++ kfree(devinfo); |
2244 |
+ break; |
2245 |
+ } |
2246 |
+ } |
2247 |
+diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c |
2248 |
+index 4df32cf1650e..172753b14a4f 100644 |
2249 |
+--- a/drivers/iio/adc/stm32-adc.c |
2250 |
++++ b/drivers/iio/adc/stm32-adc.c |
2251 |
+@@ -1314,6 +1314,7 @@ static int stm32_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val) |
2252 |
+ { |
2253 |
+ struct stm32_adc *adc = iio_priv(indio_dev); |
2254 |
+ unsigned int watermark = STM32_DMA_BUFFER_SIZE / 2; |
2255 |
++ unsigned int rx_buf_sz = STM32_DMA_BUFFER_SIZE; |
2256 |
+ |
2257 |
+ /* |
2258 |
+ * dma cyclic transfers are used, buffer is split into two periods. |
2259 |
+@@ -1322,7 +1323,7 @@ static int stm32_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val) |
2260 |
+ * - one buffer (period) driver can push with iio_trigger_poll(). |
2261 |
+ */ |
2262 |
+ watermark = min(watermark, val * (unsigned)(sizeof(u16))); |
2263 |
+- adc->rx_buf_sz = watermark * 2; |
2264 |
++ adc->rx_buf_sz = min(rx_buf_sz, watermark * 2 * adc->num_conv); |
2265 |
+ |
2266 |
+ return 0; |
2267 |
+ } |
2268 |
+diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c |
2269 |
+index 840a6cbd5f0f..8cfac6d1cec4 100644 |
2270 |
+--- a/drivers/iio/chemical/ccs811.c |
2271 |
++++ b/drivers/iio/chemical/ccs811.c |
2272 |
+@@ -91,7 +91,6 @@ static const struct iio_chan_spec ccs811_channels[] = { |
2273 |
+ .channel2 = IIO_MOD_CO2, |
2274 |
+ .modified = 1, |
2275 |
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | |
2276 |
+- BIT(IIO_CHAN_INFO_OFFSET) | |
2277 |
+ BIT(IIO_CHAN_INFO_SCALE), |
2278 |
+ .scan_index = 0, |
2279 |
+ .scan_type = { |
2280 |
+@@ -245,24 +244,18 @@ static int ccs811_read_raw(struct iio_dev *indio_dev, |
2281 |
+ switch (chan->channel2) { |
2282 |
+ case IIO_MOD_CO2: |
2283 |
+ *val = 0; |
2284 |
+- *val2 = 12834; |
2285 |
++ *val2 = 100; |
2286 |
+ return IIO_VAL_INT_PLUS_MICRO; |
2287 |
+ case IIO_MOD_VOC: |
2288 |
+ *val = 0; |
2289 |
+- *val2 = 84246; |
2290 |
+- return IIO_VAL_INT_PLUS_MICRO; |
2291 |
++ *val2 = 100; |
2292 |
++ return IIO_VAL_INT_PLUS_NANO; |
2293 |
+ default: |
2294 |
+ return -EINVAL; |
2295 |
+ } |
2296 |
+ default: |
2297 |
+ return -EINVAL; |
2298 |
+ } |
2299 |
+- case IIO_CHAN_INFO_OFFSET: |
2300 |
+- if (!(chan->type == IIO_CONCENTRATION && |
2301 |
+- chan->channel2 == IIO_MOD_CO2)) |
2302 |
+- return -EINVAL; |
2303 |
+- *val = -400; |
2304 |
+- return IIO_VAL_INT; |
2305 |
+ default: |
2306 |
+ return -EINVAL; |
2307 |
+ } |
2308 |
+diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c |
2309 |
+index 141ea228aac6..f5954981e9ee 100644 |
2310 |
+--- a/drivers/input/rmi4/rmi_driver.c |
2311 |
++++ b/drivers/input/rmi4/rmi_driver.c |
2312 |
+@@ -41,6 +41,13 @@ void rmi_free_function_list(struct rmi_device *rmi_dev) |
2313 |
+ |
2314 |
+ rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Freeing function list\n"); |
2315 |
+ |
2316 |
++ /* Doing it in the reverse order so F01 will be removed last */ |
2317 |
++ list_for_each_entry_safe_reverse(fn, tmp, |
2318 |
++ &data->function_list, node) { |
2319 |
++ list_del(&fn->node); |
2320 |
++ rmi_unregister_function(fn); |
2321 |
++ } |
2322 |
++ |
2323 |
+ devm_kfree(&rmi_dev->dev, data->irq_memory); |
2324 |
+ data->irq_memory = NULL; |
2325 |
+ data->irq_status = NULL; |
2326 |
+@@ -50,13 +57,6 @@ void rmi_free_function_list(struct rmi_device *rmi_dev) |
2327 |
+ |
2328 |
+ data->f01_container = NULL; |
2329 |
+ data->f34_container = NULL; |
2330 |
+- |
2331 |
+- /* Doing it in the reverse order so F01 will be removed last */ |
2332 |
+- list_for_each_entry_safe_reverse(fn, tmp, |
2333 |
+- &data->function_list, node) { |
2334 |
+- list_del(&fn->node); |
2335 |
+- rmi_unregister_function(fn); |
2336 |
+- } |
2337 |
+ } |
2338 |
+ |
2339 |
+ static int reset_one_function(struct rmi_function *fn) |
2340 |
+diff --git a/drivers/input/rmi4/rmi_f03.c b/drivers/input/rmi4/rmi_f03.c |
2341 |
+index ad71a5e768dc..7ccbb370a9a8 100644 |
2342 |
+--- a/drivers/input/rmi4/rmi_f03.c |
2343 |
++++ b/drivers/input/rmi4/rmi_f03.c |
2344 |
+@@ -32,6 +32,7 @@ struct f03_data { |
2345 |
+ struct rmi_function *fn; |
2346 |
+ |
2347 |
+ struct serio *serio; |
2348 |
++ bool serio_registered; |
2349 |
+ |
2350 |
+ unsigned int overwrite_buttons; |
2351 |
+ |
2352 |
+@@ -138,6 +139,37 @@ static int rmi_f03_initialize(struct f03_data *f03) |
2353 |
+ return 0; |
2354 |
+ } |
2355 |
+ |
2356 |
++static int rmi_f03_pt_open(struct serio *serio) |
2357 |
++{ |
2358 |
++ struct f03_data *f03 = serio->port_data; |
2359 |
++ struct rmi_function *fn = f03->fn; |
2360 |
++ const u8 ob_len = f03->rx_queue_length * RMI_F03_OB_SIZE; |
2361 |
++ const u16 data_addr = fn->fd.data_base_addr + RMI_F03_OB_OFFSET; |
2362 |
++ u8 obs[RMI_F03_QUEUE_LENGTH * RMI_F03_OB_SIZE]; |
2363 |
++ int error; |
2364 |
++ |
2365 |
++ /* |
2366 |
++ * Consume any pending data. Some devices like to spam with |
2367 |
++ * 0xaa 0x00 announcements which may confuse us as we try to |
2368 |
++ * probe the device. |
2369 |
++ */ |
2370 |
++ error = rmi_read_block(fn->rmi_dev, data_addr, &obs, ob_len); |
2371 |
++ if (!error) |
2372 |
++ rmi_dbg(RMI_DEBUG_FN, &fn->dev, |
2373 |
++ "%s: Consumed %*ph (%d) from PS2 guest\n", |
2374 |
++ __func__, ob_len, obs, ob_len); |
2375 |
++ |
2376 |
++ return fn->rmi_dev->driver->set_irq_bits(fn->rmi_dev, fn->irq_mask); |
2377 |
++} |
2378 |
++ |
2379 |
++static void rmi_f03_pt_close(struct serio *serio) |
2380 |
++{ |
2381 |
++ struct f03_data *f03 = serio->port_data; |
2382 |
++ struct rmi_function *fn = f03->fn; |
2383 |
++ |
2384 |
++ fn->rmi_dev->driver->clear_irq_bits(fn->rmi_dev, fn->irq_mask); |
2385 |
++} |
2386 |
++ |
2387 |
+ static int rmi_f03_register_pt(struct f03_data *f03) |
2388 |
+ { |
2389 |
+ struct serio *serio; |
2390 |
+@@ -148,6 +180,8 @@ static int rmi_f03_register_pt(struct f03_data *f03) |
2391 |
+ |
2392 |
+ serio->id.type = SERIO_PS_PSTHRU; |
2393 |
+ serio->write = rmi_f03_pt_write; |
2394 |
++ serio->open = rmi_f03_pt_open; |
2395 |
++ serio->close = rmi_f03_pt_close; |
2396 |
+ serio->port_data = f03; |
2397 |
+ |
2398 |
+ strlcpy(serio->name, "Synaptics RMI4 PS/2 pass-through", |
2399 |
+@@ -184,17 +218,27 @@ static int rmi_f03_probe(struct rmi_function *fn) |
2400 |
+ f03->device_count); |
2401 |
+ |
2402 |
+ dev_set_drvdata(dev, f03); |
2403 |
+- |
2404 |
+- error = rmi_f03_register_pt(f03); |
2405 |
+- if (error) |
2406 |
+- return error; |
2407 |
+- |
2408 |
+ return 0; |
2409 |
+ } |
2410 |
+ |
2411 |
+ static int rmi_f03_config(struct rmi_function *fn) |
2412 |
+ { |
2413 |
+- fn->rmi_dev->driver->set_irq_bits(fn->rmi_dev, fn->irq_mask); |
2414 |
++ struct f03_data *f03 = dev_get_drvdata(&fn->dev); |
2415 |
++ int error; |
2416 |
++ |
2417 |
++ if (!f03->serio_registered) { |
2418 |
++ error = rmi_f03_register_pt(f03); |
2419 |
++ if (error) |
2420 |
++ return error; |
2421 |
++ |
2422 |
++ f03->serio_registered = true; |
2423 |
++ } else { |
2424 |
++ /* |
2425 |
++ * We must be re-configuring the sensor, just enable |
2426 |
++ * interrupts for this function. |
2427 |
++ */ |
2428 |
++ fn->rmi_dev->driver->set_irq_bits(fn->rmi_dev, fn->irq_mask); |
2429 |
++ } |
2430 |
+ |
2431 |
+ return 0; |
2432 |
+ } |
2433 |
+@@ -204,7 +248,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits) |
2434 |
+ struct rmi_device *rmi_dev = fn->rmi_dev; |
2435 |
+ struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev); |
2436 |
+ struct f03_data *f03 = dev_get_drvdata(&fn->dev); |
2437 |
+- u16 data_addr = fn->fd.data_base_addr; |
2438 |
++ const u16 data_addr = fn->fd.data_base_addr + RMI_F03_OB_OFFSET; |
2439 |
+ const u8 ob_len = f03->rx_queue_length * RMI_F03_OB_SIZE; |
2440 |
+ u8 obs[RMI_F03_QUEUE_LENGTH * RMI_F03_OB_SIZE]; |
2441 |
+ u8 ob_status; |
2442 |
+@@ -226,8 +270,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits) |
2443 |
+ drvdata->attn_data.size -= ob_len; |
2444 |
+ } else { |
2445 |
+ /* Grab all of the data registers, and check them for data */ |
2446 |
+- error = rmi_read_block(fn->rmi_dev, data_addr + RMI_F03_OB_OFFSET, |
2447 |
+- &obs, ob_len); |
2448 |
++ error = rmi_read_block(fn->rmi_dev, data_addr, &obs, ob_len); |
2449 |
+ if (error) { |
2450 |
+ dev_err(&fn->dev, |
2451 |
+ "%s: Failed to read F03 output buffers: %d\n", |
2452 |
+@@ -266,7 +309,8 @@ static void rmi_f03_remove(struct rmi_function *fn) |
2453 |
+ { |
2454 |
+ struct f03_data *f03 = dev_get_drvdata(&fn->dev); |
2455 |
+ |
2456 |
+- serio_unregister_port(f03->serio); |
2457 |
++ if (f03->serio_registered) |
2458 |
++ serio_unregister_port(f03->serio); |
2459 |
+ } |
2460 |
+ |
2461 |
+ struct rmi_function_handler rmi_f03_handler = { |
2462 |
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c |
2463 |
+index 658c54b3b07a..1598d1e04989 100644 |
2464 |
+--- a/drivers/md/bcache/btree.c |
2465 |
++++ b/drivers/md/bcache/btree.c |
2466 |
+@@ -807,7 +807,10 @@ int bch_btree_cache_alloc(struct cache_set *c) |
2467 |
+ c->shrink.scan_objects = bch_mca_scan; |
2468 |
+ c->shrink.seeks = 4; |
2469 |
+ c->shrink.batch = c->btree_pages * 2; |
2470 |
+- register_shrinker(&c->shrink); |
2471 |
++ |
2472 |
++ if (register_shrinker(&c->shrink)) |
2473 |
++ pr_warn("bcache: %s: could not register shrinker", |
2474 |
++ __func__); |
2475 |
+ |
2476 |
+ return 0; |
2477 |
+ } |
2478 |
+diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c |
2479 |
+index f06f09a0876e..71fb5734995b 100644 |
2480 |
+--- a/drivers/media/usb/usbtv/usbtv-core.c |
2481 |
++++ b/drivers/media/usb/usbtv/usbtv-core.c |
2482 |
+@@ -144,6 +144,7 @@ static void usbtv_disconnect(struct usb_interface *intf) |
2483 |
+ |
2484 |
+ static const struct usb_device_id usbtv_id_table[] = { |
2485 |
+ { USB_DEVICE(0x1b71, 0x3002) }, |
2486 |
++ { USB_DEVICE(0x1f71, 0x3301) }, |
2487 |
+ {} |
2488 |
+ }; |
2489 |
+ MODULE_DEVICE_TABLE(usb, usbtv_id_table); |
2490 |
+diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c |
2491 |
+index 78b3172c8e6e..d46cb1f0868f 100644 |
2492 |
+--- a/drivers/misc/mei/pci-me.c |
2493 |
++++ b/drivers/misc/mei/pci-me.c |
2494 |
+@@ -238,8 +238,11 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
2495 |
+ */ |
2496 |
+ mei_me_set_pm_domain(dev); |
2497 |
+ |
2498 |
+- if (mei_pg_is_enabled(dev)) |
2499 |
++ if (mei_pg_is_enabled(dev)) { |
2500 |
+ pm_runtime_put_noidle(&pdev->dev); |
2501 |
++ if (hw->d0i3_supported) |
2502 |
++ pm_runtime_allow(&pdev->dev); |
2503 |
++ } |
2504 |
+ |
2505 |
+ dev_dbg(&pdev->dev, "initialization successful.\n"); |
2506 |
+ |
2507 |
+diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c |
2508 |
+index 81370c79aa48..7ad0db65a6fa 100644 |
2509 |
+--- a/drivers/mtd/nand/denali_pci.c |
2510 |
++++ b/drivers/mtd/nand/denali_pci.c |
2511 |
+@@ -124,3 +124,7 @@ static struct pci_driver denali_pci_driver = { |
2512 |
+ }; |
2513 |
+ |
2514 |
+ module_pci_driver(denali_pci_driver); |
2515 |
++ |
2516 |
++MODULE_DESCRIPTION("PCI driver for Denali NAND controller"); |
2517 |
++MODULE_AUTHOR("Intel Corporation and its suppliers"); |
2518 |
++MODULE_LICENSE("GPL v2"); |
2519 |
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c |
2520 |
+index 3cbe771b3352..a22336fef66b 100644 |
2521 |
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c |
2522 |
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c |
2523 |
+@@ -2133,8 +2133,8 @@ static int bnxt_get_module_eeprom(struct net_device *dev, |
2524 |
+ /* Read A2 portion of the EEPROM */ |
2525 |
+ if (length) { |
2526 |
+ start -= ETH_MODULE_SFF_8436_LEN; |
2527 |
+- bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, start, |
2528 |
+- length, data); |
2529 |
++ rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, |
2530 |
++ start, length, data); |
2531 |
+ } |
2532 |
+ return rc; |
2533 |
+ } |
2534 |
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c |
2535 |
+index 667dbc7d4a4e..d1a44a84c97e 100644 |
2536 |
+--- a/drivers/net/ethernet/intel/igb/igb_main.c |
2537 |
++++ b/drivers/net/ethernet/intel/igb/igb_main.c |
2538 |
+@@ -3331,7 +3331,7 @@ static int __igb_close(struct net_device *netdev, bool suspending) |
2539 |
+ |
2540 |
+ int igb_close(struct net_device *netdev) |
2541 |
+ { |
2542 |
+- if (netif_device_present(netdev)) |
2543 |
++ if (netif_device_present(netdev) || netdev->dismantle) |
2544 |
+ return __igb_close(netdev, false); |
2545 |
+ return 0; |
2546 |
+ } |
2547 |
+diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c |
2548 |
+index d147dc7d0f77..1dd3a1264a53 100644 |
2549 |
+--- a/drivers/net/ethernet/marvell/mvpp2.c |
2550 |
++++ b/drivers/net/ethernet/marvell/mvpp2.c |
2551 |
+@@ -5597,7 +5597,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port, |
2552 |
+ sizeof(*txq_pcpu->buffs), |
2553 |
+ GFP_KERNEL); |
2554 |
+ if (!txq_pcpu->buffs) |
2555 |
+- goto cleanup; |
2556 |
++ return -ENOMEM; |
2557 |
+ |
2558 |
+ txq_pcpu->count = 0; |
2559 |
+ txq_pcpu->reserved_num = 0; |
2560 |
+@@ -5610,26 +5610,10 @@ static int mvpp2_txq_init(struct mvpp2_port *port, |
2561 |
+ &txq_pcpu->tso_headers_dma, |
2562 |
+ GFP_KERNEL); |
2563 |
+ if (!txq_pcpu->tso_headers) |
2564 |
+- goto cleanup; |
2565 |
++ return -ENOMEM; |
2566 |
+ } |
2567 |
+ |
2568 |
+ return 0; |
2569 |
+-cleanup: |
2570 |
+- for_each_present_cpu(cpu) { |
2571 |
+- txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); |
2572 |
+- kfree(txq_pcpu->buffs); |
2573 |
+- |
2574 |
+- dma_free_coherent(port->dev->dev.parent, |
2575 |
+- txq_pcpu->size * TSO_HEADER_SIZE, |
2576 |
+- txq_pcpu->tso_headers, |
2577 |
+- txq_pcpu->tso_headers_dma); |
2578 |
+- } |
2579 |
+- |
2580 |
+- dma_free_coherent(port->dev->dev.parent, |
2581 |
+- txq->size * MVPP2_DESC_ALIGNED_SIZE, |
2582 |
+- txq->descs, txq->descs_dma); |
2583 |
+- |
2584 |
+- return -ENOMEM; |
2585 |
+ } |
2586 |
+ |
2587 |
+ /* Free allocated TXQ resources */ |
2588 |
+diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig |
2589 |
+index 6d68c8a8f4f2..da4ec575ccf9 100644 |
2590 |
+--- a/drivers/net/ethernet/xilinx/Kconfig |
2591 |
++++ b/drivers/net/ethernet/xilinx/Kconfig |
2592 |
+@@ -34,6 +34,7 @@ config XILINX_AXI_EMAC |
2593 |
+ config XILINX_LL_TEMAC |
2594 |
+ tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver" |
2595 |
+ depends on (PPC || MICROBLAZE) |
2596 |
++ depends on !64BIT || BROKEN |
2597 |
+ select PHYLIB |
2598 |
+ ---help--- |
2599 |
+ This driver supports the Xilinx 10/100/1000 LocalLink TEMAC |
2600 |
+diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c |
2601 |
+index aebc08beceba..21b3f36e023a 100644 |
2602 |
+--- a/drivers/net/phy/marvell10g.c |
2603 |
++++ b/drivers/net/phy/marvell10g.c |
2604 |
+@@ -16,6 +16,7 @@ |
2605 |
+ * link takes priority and the other port is completely locked out. |
2606 |
+ */ |
2607 |
+ #include <linux/phy.h> |
2608 |
++#include <linux/marvell_phy.h> |
2609 |
+ |
2610 |
+ enum { |
2611 |
+ MV_PCS_BASE_T = 0x0000, |
2612 |
+@@ -338,7 +339,7 @@ static int mv3310_read_status(struct phy_device *phydev) |
2613 |
+ static struct phy_driver mv3310_drivers[] = { |
2614 |
+ { |
2615 |
+ .phy_id = 0x002b09aa, |
2616 |
+- .phy_id_mask = 0xffffffff, |
2617 |
++ .phy_id_mask = MARVELL_PHY_ID_MASK, |
2618 |
+ .name = "mv88x3310", |
2619 |
+ .features = SUPPORTED_10baseT_Full | |
2620 |
+ SUPPORTED_100baseT_Full | |
2621 |
+@@ -360,7 +361,7 @@ static struct phy_driver mv3310_drivers[] = { |
2622 |
+ module_phy_driver(mv3310_drivers); |
2623 |
+ |
2624 |
+ static struct mdio_device_id __maybe_unused mv3310_tbl[] = { |
2625 |
+- { 0x002b09aa, 0xffffffff }, |
2626 |
++ { 0x002b09aa, MARVELL_PHY_ID_MASK }, |
2627 |
+ { }, |
2628 |
+ }; |
2629 |
+ MODULE_DEVICE_TABLE(mdio, mv3310_tbl); |
2630 |
+diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c |
2631 |
+index f0439f2d566b..173891b11b2d 100644 |
2632 |
+--- a/drivers/net/wireless/ath/ath9k/channel.c |
2633 |
++++ b/drivers/net/wireless/ath/ath9k/channel.c |
2634 |
+@@ -1112,7 +1112,7 @@ ath_chanctx_send_vif_ps_frame(struct ath_softc *sc, struct ath_vif *avp, |
2635 |
+ if (!avp->assoc) |
2636 |
+ return false; |
2637 |
+ |
2638 |
+- skb = ieee80211_nullfunc_get(sc->hw, vif); |
2639 |
++ skb = ieee80211_nullfunc_get(sc->hw, vif, false); |
2640 |
+ if (!skb) |
2641 |
+ return false; |
2642 |
+ |
2643 |
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h |
2644 |
+index 9c889a32fe24..223fb77a3aa9 100644 |
2645 |
+--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h |
2646 |
++++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h |
2647 |
+@@ -209,8 +209,6 @@ static inline void iwl_fw_dbg_stop_recording(struct iwl_fw_runtime *fwrt) |
2648 |
+ |
2649 |
+ static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt) |
2650 |
+ { |
2651 |
+- iwl_fw_dbg_stop_recording(fwrt); |
2652 |
+- |
2653 |
+ fwrt->dump.conf = FW_DBG_INVALID; |
2654 |
+ } |
2655 |
+ |
2656 |
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c |
2657 |
+index 53e269d54050..0ae7624eac9d 100644 |
2658 |
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c |
2659 |
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c |
2660 |
+@@ -1181,6 +1181,8 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, |
2661 |
+ return le32_to_cpu(txq_timer->p2p_go); |
2662 |
+ case NL80211_IFTYPE_P2P_DEVICE: |
2663 |
+ return le32_to_cpu(txq_timer->p2p_device); |
2664 |
++ case NL80211_IFTYPE_MONITOR: |
2665 |
++ return default_timeout; |
2666 |
+ default: |
2667 |
+ WARN_ON(1); |
2668 |
+ return mvm->cfg->base_params->wd_timeout; |
2669 |
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c |
2670 |
+index c59f4581e972..ac05fd1e74c4 100644 |
2671 |
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c |
2672 |
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c |
2673 |
+@@ -49,6 +49,7 @@ |
2674 |
+ * |
2675 |
+ *****************************************************************************/ |
2676 |
+ #include "iwl-trans.h" |
2677 |
++#include "iwl-prph.h" |
2678 |
+ #include "iwl-context-info.h" |
2679 |
+ #include "internal.h" |
2680 |
+ |
2681 |
+@@ -156,6 +157,11 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power) |
2682 |
+ |
2683 |
+ trans_pcie->is_down = true; |
2684 |
+ |
2685 |
++ /* Stop dbgc before stopping device */ |
2686 |
++ iwl_write_prph(trans, DBGC_IN_SAMPLE, 0); |
2687 |
++ udelay(100); |
2688 |
++ iwl_write_prph(trans, DBGC_OUT_CTRL, 0); |
2689 |
++ |
2690 |
+ /* tell the device to stop sending interrupts */ |
2691 |
+ iwl_disable_interrupts(trans); |
2692 |
+ |
2693 |
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c |
2694 |
+index 2e3e013ec95a..12a9b86d71ea 100644 |
2695 |
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c |
2696 |
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c |
2697 |
+@@ -1138,6 +1138,15 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) |
2698 |
+ |
2699 |
+ trans_pcie->is_down = true; |
2700 |
+ |
2701 |
++ /* Stop dbgc before stopping device */ |
2702 |
++ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { |
2703 |
++ iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100); |
2704 |
++ } else { |
2705 |
++ iwl_write_prph(trans, DBGC_IN_SAMPLE, 0); |
2706 |
++ udelay(100); |
2707 |
++ iwl_write_prph(trans, DBGC_OUT_CTRL, 0); |
2708 |
++ } |
2709 |
++ |
2710 |
+ /* tell the device to stop sending interrupts */ |
2711 |
+ iwl_disable_interrupts(trans); |
2712 |
+ |
2713 |
+diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c |
2714 |
+index a52224836a2b..666b88cb2cfe 100644 |
2715 |
+--- a/drivers/net/wireless/st/cw1200/sta.c |
2716 |
++++ b/drivers/net/wireless/st/cw1200/sta.c |
2717 |
+@@ -198,7 +198,7 @@ void __cw1200_cqm_bssloss_sm(struct cw1200_common *priv, |
2718 |
+ |
2719 |
+ priv->bss_loss_state++; |
2720 |
+ |
2721 |
+- skb = ieee80211_nullfunc_get(priv->hw, priv->vif); |
2722 |
++ skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false); |
2723 |
+ WARN_ON(!skb); |
2724 |
+ if (skb) |
2725 |
+ cw1200_tx(priv->hw, NULL, skb); |
2726 |
+@@ -2266,7 +2266,7 @@ static int cw1200_upload_null(struct cw1200_common *priv) |
2727 |
+ .rate = 0xFF, |
2728 |
+ }; |
2729 |
+ |
2730 |
+- frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif); |
2731 |
++ frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false); |
2732 |
+ if (!frame.skb) |
2733 |
+ return -ENOMEM; |
2734 |
+ |
2735 |
+diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c |
2736 |
+index 9915d83a4a30..6d02c660b4ab 100644 |
2737 |
+--- a/drivers/net/wireless/ti/wl1251/main.c |
2738 |
++++ b/drivers/net/wireless/ti/wl1251/main.c |
2739 |
+@@ -566,7 +566,7 @@ static int wl1251_build_null_data(struct wl1251 *wl) |
2740 |
+ size = sizeof(struct wl12xx_null_data_template); |
2741 |
+ ptr = NULL; |
2742 |
+ } else { |
2743 |
+- skb = ieee80211_nullfunc_get(wl->hw, wl->vif); |
2744 |
++ skb = ieee80211_nullfunc_get(wl->hw, wl->vif, false); |
2745 |
+ if (!skb) |
2746 |
+ goto out; |
2747 |
+ size = skb->len; |
2748 |
+diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c |
2749 |
+index 2bfc12fdc929..761cf8573a80 100644 |
2750 |
+--- a/drivers/net/wireless/ti/wlcore/cmd.c |
2751 |
++++ b/drivers/net/wireless/ti/wlcore/cmd.c |
2752 |
+@@ -1069,7 +1069,8 @@ int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif) |
2753 |
+ ptr = NULL; |
2754 |
+ } else { |
2755 |
+ skb = ieee80211_nullfunc_get(wl->hw, |
2756 |
+- wl12xx_wlvif_to_vif(wlvif)); |
2757 |
++ wl12xx_wlvif_to_vif(wlvif), |
2758 |
++ false); |
2759 |
+ if (!skb) |
2760 |
+ goto out; |
2761 |
+ size = skb->len; |
2762 |
+@@ -1096,7 +1097,7 @@ int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl, |
2763 |
+ struct sk_buff *skb = NULL; |
2764 |
+ int ret = -ENOMEM; |
2765 |
+ |
2766 |
+- skb = ieee80211_nullfunc_get(wl->hw, vif); |
2767 |
++ skb = ieee80211_nullfunc_get(wl->hw, vif, false); |
2768 |
+ if (!skb) |
2769 |
+ goto out; |
2770 |
+ |
2771 |
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c |
2772 |
+index 8b8689c6d887..391432e2725d 100644 |
2773 |
+--- a/drivers/net/xen-netfront.c |
2774 |
++++ b/drivers/net/xen-netfront.c |
2775 |
+@@ -87,6 +87,8 @@ struct netfront_cb { |
2776 |
+ /* IRQ name is queue name with "-tx" or "-rx" appended */ |
2777 |
+ #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) |
2778 |
+ |
2779 |
++static DECLARE_WAIT_QUEUE_HEAD(module_unload_q); |
2780 |
++ |
2781 |
+ struct netfront_stats { |
2782 |
+ u64 packets; |
2783 |
+ u64 bytes; |
2784 |
+@@ -2021,10 +2023,12 @@ static void netback_changed(struct xenbus_device *dev, |
2785 |
+ break; |
2786 |
+ |
2787 |
+ case XenbusStateClosed: |
2788 |
++ wake_up_all(&module_unload_q); |
2789 |
+ if (dev->state == XenbusStateClosed) |
2790 |
+ break; |
2791 |
+ /* Missed the backend's CLOSING state -- fallthrough */ |
2792 |
+ case XenbusStateClosing: |
2793 |
++ wake_up_all(&module_unload_q); |
2794 |
+ xenbus_frontend_closed(dev); |
2795 |
+ break; |
2796 |
+ } |
2797 |
+@@ -2130,6 +2134,20 @@ static int xennet_remove(struct xenbus_device *dev) |
2798 |
+ |
2799 |
+ dev_dbg(&dev->dev, "%s\n", dev->nodename); |
2800 |
+ |
2801 |
++ if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) { |
2802 |
++ xenbus_switch_state(dev, XenbusStateClosing); |
2803 |
++ wait_event(module_unload_q, |
2804 |
++ xenbus_read_driver_state(dev->otherend) == |
2805 |
++ XenbusStateClosing); |
2806 |
++ |
2807 |
++ xenbus_switch_state(dev, XenbusStateClosed); |
2808 |
++ wait_event(module_unload_q, |
2809 |
++ xenbus_read_driver_state(dev->otherend) == |
2810 |
++ XenbusStateClosed || |
2811 |
++ xenbus_read_driver_state(dev->otherend) == |
2812 |
++ XenbusStateUnknown); |
2813 |
++ } |
2814 |
++ |
2815 |
+ xennet_disconnect_backend(info); |
2816 |
+ |
2817 |
+ unregister_netdev(info->netdev); |
2818 |
+diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h |
2819 |
+index bf33663218cd..9ff8529a64a9 100644 |
2820 |
+--- a/drivers/nvme/host/fabrics.h |
2821 |
++++ b/drivers/nvme/host/fabrics.h |
2822 |
+@@ -142,4 +142,34 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts); |
2823 |
+ int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size); |
2824 |
+ bool nvmf_should_reconnect(struct nvme_ctrl *ctrl); |
2825 |
+ |
2826 |
++static inline blk_status_t nvmf_check_init_req(struct nvme_ctrl *ctrl, |
2827 |
++ struct request *rq) |
2828 |
++{ |
2829 |
++ struct nvme_command *cmd = nvme_req(rq)->cmd; |
2830 |
++ |
2831 |
++ /* |
2832 |
++ * We cannot accept any other command until the connect command has |
2833 |
++ * completed, so only allow connect to pass. |
2834 |
++ */ |
2835 |
++ if (!blk_rq_is_passthrough(rq) || |
2836 |
++ cmd->common.opcode != nvme_fabrics_command || |
2837 |
++ cmd->fabrics.fctype != nvme_fabrics_type_connect) { |
2838 |
++ /* |
2839 |
++ * Reconnecting state means transport disruption, which can take |
2840 |
++ * a long time and even might fail permanently, fail fast to |
2841 |
++ * give upper layers a chance to failover. |
2842 |
++ * Deleting state means that the ctrl will never accept commands |
2843 |
++ * again, fail it permanently. |
2844 |
++ */ |
2845 |
++ if (ctrl->state == NVME_CTRL_RECONNECTING || |
2846 |
++ ctrl->state == NVME_CTRL_DELETING) { |
2847 |
++ nvme_req(rq)->status = NVME_SC_ABORT_REQ; |
2848 |
++ return BLK_STS_IOERR; |
2849 |
++ } |
2850 |
++ return BLK_STS_RESOURCE; /* try again later */ |
2851 |
++ } |
2852 |
++ |
2853 |
++ return BLK_STS_OK; |
2854 |
++} |
2855 |
++ |
2856 |
+ #endif /* _NVME_FABRICS_H */ |
2857 |
+diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c |
2858 |
+index be49d0f79381..3148d760d825 100644 |
2859 |
+--- a/drivers/nvme/host/fc.c |
2860 |
++++ b/drivers/nvme/host/fc.c |
2861 |
+@@ -41,6 +41,7 @@ |
2862 |
+ |
2863 |
+ enum nvme_fc_queue_flags { |
2864 |
+ NVME_FC_Q_CONNECTED = (1 << 0), |
2865 |
++ NVME_FC_Q_LIVE = (1 << 1), |
2866 |
+ }; |
2867 |
+ |
2868 |
+ #define NVMEFC_QUEUE_DELAY 3 /* ms units */ |
2869 |
+@@ -1654,6 +1655,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue) |
2870 |
+ if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags)) |
2871 |
+ return; |
2872 |
+ |
2873 |
++ clear_bit(NVME_FC_Q_LIVE, &queue->flags); |
2874 |
+ /* |
2875 |
+ * Current implementation never disconnects a single queue. |
2876 |
+ * It always terminates a whole association. So there is never |
2877 |
+@@ -1661,7 +1663,6 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue) |
2878 |
+ */ |
2879 |
+ |
2880 |
+ queue->connection_id = 0; |
2881 |
+- clear_bit(NVME_FC_Q_CONNECTED, &queue->flags); |
2882 |
+ } |
2883 |
+ |
2884 |
+ static void |
2885 |
+@@ -1740,6 +1741,8 @@ nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) |
2886 |
+ ret = nvmf_connect_io_queue(&ctrl->ctrl, i); |
2887 |
+ if (ret) |
2888 |
+ break; |
2889 |
++ |
2890 |
++ set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags); |
2891 |
+ } |
2892 |
+ |
2893 |
+ return ret; |
2894 |
+@@ -2048,6 +2051,14 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, |
2895 |
+ return BLK_STS_RESOURCE; |
2896 |
+ } |
2897 |
+ |
2898 |
++static inline blk_status_t nvme_fc_is_ready(struct nvme_fc_queue *queue, |
2899 |
++ struct request *rq) |
2900 |
++{ |
2901 |
++ if (unlikely(!test_bit(NVME_FC_Q_LIVE, &queue->flags))) |
2902 |
++ return nvmf_check_init_req(&queue->ctrl->ctrl, rq); |
2903 |
++ return BLK_STS_OK; |
2904 |
++} |
2905 |
++ |
2906 |
+ static blk_status_t |
2907 |
+ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, |
2908 |
+ const struct blk_mq_queue_data *bd) |
2909 |
+@@ -2063,6 +2074,10 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, |
2910 |
+ u32 data_len; |
2911 |
+ blk_status_t ret; |
2912 |
+ |
2913 |
++ ret = nvme_fc_is_ready(queue, rq); |
2914 |
++ if (unlikely(ret)) |
2915 |
++ return ret; |
2916 |
++ |
2917 |
+ ret = nvme_setup_cmd(ns, rq, sqe); |
2918 |
+ if (ret) |
2919 |
+ return ret; |
2920 |
+@@ -2398,6 +2413,8 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) |
2921 |
+ if (ret) |
2922 |
+ goto out_disconnect_admin_queue; |
2923 |
+ |
2924 |
++ set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags); |
2925 |
++ |
2926 |
+ /* |
2927 |
+ * Check controller capabilities |
2928 |
+ * |
2929 |
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c |
2930 |
+index 75539f7c58b9..cdd2fd509ddc 100644 |
2931 |
+--- a/drivers/nvme/host/pci.c |
2932 |
++++ b/drivers/nvme/host/pci.c |
2933 |
+@@ -1617,6 +1617,7 @@ static void nvme_free_host_mem(struct nvme_dev *dev) |
2934 |
+ dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs), |
2935 |
+ dev->host_mem_descs, dev->host_mem_descs_dma); |
2936 |
+ dev->host_mem_descs = NULL; |
2937 |
++ dev->nr_host_mem_descs = 0; |
2938 |
+ } |
2939 |
+ |
2940 |
+ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, |
2941 |
+@@ -1645,7 +1646,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, |
2942 |
+ if (!bufs) |
2943 |
+ goto out_free_descs; |
2944 |
+ |
2945 |
+- for (size = 0; size < preferred; size += len) { |
2946 |
++ for (size = 0; size < preferred && i < max_entries; size += len) { |
2947 |
+ dma_addr_t dma_addr; |
2948 |
+ |
2949 |
+ len = min_t(u64, chunk_size, preferred - size); |
2950 |
+@@ -2282,7 +2283,7 @@ static int nvme_dev_map(struct nvme_dev *dev) |
2951 |
+ return -ENODEV; |
2952 |
+ } |
2953 |
+ |
2954 |
+-static unsigned long check_dell_samsung_bug(struct pci_dev *pdev) |
2955 |
++static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) |
2956 |
+ { |
2957 |
+ if (pdev->vendor == 0x144d && pdev->device == 0xa802) { |
2958 |
+ /* |
2959 |
+@@ -2297,6 +2298,14 @@ static unsigned long check_dell_samsung_bug(struct pci_dev *pdev) |
2960 |
+ (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") || |
2961 |
+ dmi_match(DMI_PRODUCT_NAME, "Precision 5510"))) |
2962 |
+ return NVME_QUIRK_NO_DEEPEST_PS; |
2963 |
++ } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) { |
2964 |
++ /* |
2965 |
++ * Samsung SSD 960 EVO drops off the PCIe bus after system |
2966 |
++ * suspend on a Ryzen board, ASUS PRIME B350M-A. |
2967 |
++ */ |
2968 |
++ if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") && |
2969 |
++ dmi_match(DMI_BOARD_NAME, "PRIME B350M-A")) |
2970 |
++ return NVME_QUIRK_NO_APST; |
2971 |
+ } |
2972 |
+ |
2973 |
+ return 0; |
2974 |
+@@ -2336,7 +2345,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
2975 |
+ if (result) |
2976 |
+ goto unmap; |
2977 |
+ |
2978 |
+- quirks |= check_dell_samsung_bug(pdev); |
2979 |
++ quirks |= check_vendor_combination_bug(pdev); |
2980 |
+ |
2981 |
+ result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, |
2982 |
+ quirks); |
2983 |
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c |
2984 |
+index 0ebb539f3bd3..33d4431c2b4b 100644 |
2985 |
+--- a/drivers/nvme/host/rdma.c |
2986 |
++++ b/drivers/nvme/host/rdma.c |
2987 |
+@@ -67,6 +67,9 @@ struct nvme_rdma_request { |
2988 |
+ struct nvme_request req; |
2989 |
+ struct ib_mr *mr; |
2990 |
+ struct nvme_rdma_qe sqe; |
2991 |
++ union nvme_result result; |
2992 |
++ __le16 status; |
2993 |
++ refcount_t ref; |
2994 |
+ struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS]; |
2995 |
+ u32 num_sge; |
2996 |
+ int nents; |
2997 |
+@@ -1177,6 +1180,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, |
2998 |
+ req->num_sge = 1; |
2999 |
+ req->inline_data = false; |
3000 |
+ req->mr->need_inval = false; |
3001 |
++ refcount_set(&req->ref, 2); /* send and recv completions */ |
3002 |
+ |
3003 |
+ c->common.flags |= NVME_CMD_SGL_METABUF; |
3004 |
+ |
3005 |
+@@ -1213,8 +1217,19 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, |
3006 |
+ |
3007 |
+ static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) |
3008 |
+ { |
3009 |
+- if (unlikely(wc->status != IB_WC_SUCCESS)) |
3010 |
++ struct nvme_rdma_qe *qe = |
3011 |
++ container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe); |
3012 |
++ struct nvme_rdma_request *req = |
3013 |
++ container_of(qe, struct nvme_rdma_request, sqe); |
3014 |
++ struct request *rq = blk_mq_rq_from_pdu(req); |
3015 |
++ |
3016 |
++ if (unlikely(wc->status != IB_WC_SUCCESS)) { |
3017 |
+ nvme_rdma_wr_error(cq, wc, "SEND"); |
3018 |
++ return; |
3019 |
++ } |
3020 |
++ |
3021 |
++ if (refcount_dec_and_test(&req->ref)) |
3022 |
++ nvme_end_request(rq, req->status, req->result); |
3023 |
+ } |
3024 |
+ |
3025 |
+ /* |
3026 |
+@@ -1359,14 +1374,19 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue, |
3027 |
+ } |
3028 |
+ req = blk_mq_rq_to_pdu(rq); |
3029 |
+ |
3030 |
+- if (rq->tag == tag) |
3031 |
+- ret = 1; |
3032 |
++ req->status = cqe->status; |
3033 |
++ req->result = cqe->result; |
3034 |
+ |
3035 |
+ if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) && |
3036 |
+ wc->ex.invalidate_rkey == req->mr->rkey) |
3037 |
+ req->mr->need_inval = false; |
3038 |
+ |
3039 |
+- nvme_end_request(rq, cqe->status, cqe->result); |
3040 |
++ if (refcount_dec_and_test(&req->ref)) { |
3041 |
++ if (rq->tag == tag) |
3042 |
++ ret = 1; |
3043 |
++ nvme_end_request(rq, req->status, req->result); |
3044 |
++ } |
3045 |
++ |
3046 |
+ return ret; |
3047 |
+ } |
3048 |
+ |
3049 |
+@@ -1603,31 +1623,11 @@ nvme_rdma_timeout(struct request *rq, bool reserved) |
3050 |
+ * We cannot accept any other command until the Connect command has completed. |
3051 |
+ */ |
3052 |
+ static inline blk_status_t |
3053 |
+-nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, struct request *rq) |
3054 |
+-{ |
3055 |
+- if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) { |
3056 |
+- struct nvme_command *cmd = nvme_req(rq)->cmd; |
3057 |
+- |
3058 |
+- if (!blk_rq_is_passthrough(rq) || |
3059 |
+- cmd->common.opcode != nvme_fabrics_command || |
3060 |
+- cmd->fabrics.fctype != nvme_fabrics_type_connect) { |
3061 |
+- /* |
3062 |
+- * reconnecting state means transport disruption, which |
3063 |
+- * can take a long time and even might fail permanently, |
3064 |
+- * fail fast to give upper layers a chance to failover. |
3065 |
+- * deleting state means that the ctrl will never accept |
3066 |
+- * commands again, fail it permanently. |
3067 |
+- */ |
3068 |
+- if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING || |
3069 |
+- queue->ctrl->ctrl.state == NVME_CTRL_DELETING) { |
3070 |
+- nvme_req(rq)->status = NVME_SC_ABORT_REQ; |
3071 |
+- return BLK_STS_IOERR; |
3072 |
+- } |
3073 |
+- return BLK_STS_RESOURCE; /* try again later */ |
3074 |
+- } |
3075 |
+- } |
3076 |
+- |
3077 |
+- return 0; |
3078 |
++nvme_rdma_is_ready(struct nvme_rdma_queue *queue, struct request *rq) |
3079 |
++{ |
3080 |
++ if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) |
3081 |
++ return nvmf_check_init_req(&queue->ctrl->ctrl, rq); |
3082 |
++ return BLK_STS_OK; |
3083 |
+ } |
3084 |
+ |
3085 |
+ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, |
3086 |
+@@ -1646,7 +1646,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, |
3087 |
+ |
3088 |
+ WARN_ON_ONCE(rq->tag < 0); |
3089 |
+ |
3090 |
+- ret = nvme_rdma_queue_is_ready(queue, rq); |
3091 |
++ ret = nvme_rdma_is_ready(queue, rq); |
3092 |
+ if (unlikely(ret)) |
3093 |
+ return ret; |
3094 |
+ |
3095 |
+diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c |
3096 |
+index 58e010bdda3e..8e21211b904b 100644 |
3097 |
+--- a/drivers/nvme/target/fc.c |
3098 |
++++ b/drivers/nvme/target/fc.c |
3099 |
+@@ -532,15 +532,15 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, |
3100 |
+ |
3101 |
+ tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); |
3102 |
+ |
3103 |
++ /* release the queue lookup reference on the completed IO */ |
3104 |
++ nvmet_fc_tgt_q_put(queue); |
3105 |
++ |
3106 |
+ spin_lock_irqsave(&queue->qlock, flags); |
3107 |
+ deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, |
3108 |
+ struct nvmet_fc_defer_fcp_req, req_list); |
3109 |
+ if (!deferfcp) { |
3110 |
+ list_add_tail(&fod->fcp_list, &fod->queue->fod_list); |
3111 |
+ spin_unlock_irqrestore(&queue->qlock, flags); |
3112 |
+- |
3113 |
+- /* Release reference taken at queue lookup and fod allocation */ |
3114 |
+- nvmet_fc_tgt_q_put(queue); |
3115 |
+ return; |
3116 |
+ } |
3117 |
+ |
3118 |
+@@ -759,6 +759,9 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) |
3119 |
+ tgtport->ops->fcp_req_release(&tgtport->fc_target_port, |
3120 |
+ deferfcp->fcp_req); |
3121 |
+ |
3122 |
++ /* release the queue lookup reference */ |
3123 |
++ nvmet_fc_tgt_q_put(queue); |
3124 |
++ |
3125 |
+ kfree(deferfcp); |
3126 |
+ |
3127 |
+ spin_lock_irqsave(&queue->qlock, flags); |
3128 |
+diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c |
3129 |
+index 92628c432926..02aff5cc48bf 100644 |
3130 |
+--- a/drivers/nvme/target/loop.c |
3131 |
++++ b/drivers/nvme/target/loop.c |
3132 |
+@@ -61,10 +61,15 @@ static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl) |
3133 |
+ return container_of(ctrl, struct nvme_loop_ctrl, ctrl); |
3134 |
+ } |
3135 |
+ |
3136 |
++enum nvme_loop_queue_flags { |
3137 |
++ NVME_LOOP_Q_LIVE = 0, |
3138 |
++}; |
3139 |
++ |
3140 |
+ struct nvme_loop_queue { |
3141 |
+ struct nvmet_cq nvme_cq; |
3142 |
+ struct nvmet_sq nvme_sq; |
3143 |
+ struct nvme_loop_ctrl *ctrl; |
3144 |
++ unsigned long flags; |
3145 |
+ }; |
3146 |
+ |
3147 |
+ static struct nvmet_port *nvmet_loop_port; |
3148 |
+@@ -153,6 +158,14 @@ nvme_loop_timeout(struct request *rq, bool reserved) |
3149 |
+ return BLK_EH_HANDLED; |
3150 |
+ } |
3151 |
+ |
3152 |
++static inline blk_status_t nvme_loop_is_ready(struct nvme_loop_queue *queue, |
3153 |
++ struct request *rq) |
3154 |
++{ |
3155 |
++ if (unlikely(!test_bit(NVME_LOOP_Q_LIVE, &queue->flags))) |
3156 |
++ return nvmf_check_init_req(&queue->ctrl->ctrl, rq); |
3157 |
++ return BLK_STS_OK; |
3158 |
++} |
3159 |
++ |
3160 |
+ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, |
3161 |
+ const struct blk_mq_queue_data *bd) |
3162 |
+ { |
3163 |
+@@ -162,6 +175,10 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, |
3164 |
+ struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); |
3165 |
+ blk_status_t ret; |
3166 |
+ |
3167 |
++ ret = nvme_loop_is_ready(queue, req); |
3168 |
++ if (unlikely(ret)) |
3169 |
++ return ret; |
3170 |
++ |
3171 |
+ ret = nvme_setup_cmd(ns, req, &iod->cmd); |
3172 |
+ if (ret) |
3173 |
+ return ret; |
3174 |
+@@ -275,6 +292,7 @@ static const struct blk_mq_ops nvme_loop_admin_mq_ops = { |
3175 |
+ |
3176 |
+ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) |
3177 |
+ { |
3178 |
++ clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); |
3179 |
+ nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); |
3180 |
+ blk_cleanup_queue(ctrl->ctrl.admin_q); |
3181 |
+ blk_mq_free_tag_set(&ctrl->admin_tag_set); |
3182 |
+@@ -305,8 +323,10 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl) |
3183 |
+ { |
3184 |
+ int i; |
3185 |
+ |
3186 |
+- for (i = 1; i < ctrl->ctrl.queue_count; i++) |
3187 |
++ for (i = 1; i < ctrl->ctrl.queue_count; i++) { |
3188 |
++ clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); |
3189 |
+ nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); |
3190 |
++ } |
3191 |
+ } |
3192 |
+ |
3193 |
+ static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl) |
3194 |
+@@ -346,6 +366,7 @@ static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl) |
3195 |
+ ret = nvmf_connect_io_queue(&ctrl->ctrl, i); |
3196 |
+ if (ret) |
3197 |
+ return ret; |
3198 |
++ set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); |
3199 |
+ } |
3200 |
+ |
3201 |
+ return 0; |
3202 |
+@@ -387,6 +408,8 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) |
3203 |
+ if (error) |
3204 |
+ goto out_cleanup_queue; |
3205 |
+ |
3206 |
++ set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); |
3207 |
++ |
3208 |
+ error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap); |
3209 |
+ if (error) { |
3210 |
+ dev_err(ctrl->ctrl.device, |
3211 |
+diff --git a/drivers/power/reset/zx-reboot.c b/drivers/power/reset/zx-reboot.c |
3212 |
+index 7549c7f74a3c..c03e96e6a041 100644 |
3213 |
+--- a/drivers/power/reset/zx-reboot.c |
3214 |
++++ b/drivers/power/reset/zx-reboot.c |
3215 |
+@@ -82,3 +82,7 @@ static struct platform_driver zx_reboot_driver = { |
3216 |
+ }, |
3217 |
+ }; |
3218 |
+ module_platform_driver(zx_reboot_driver); |
3219 |
++ |
3220 |
++MODULE_DESCRIPTION("ZTE SoCs reset driver"); |
3221 |
++MODULE_AUTHOR("Jun Nie <jun.nie@××××××.org>"); |
3222 |
++MODULE_LICENSE("GPL v2"); |
3223 |
+diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c |
3224 |
+index b5f4006198b9..a9a56aa9c26b 100644 |
3225 |
+--- a/drivers/s390/crypto/zcrypt_api.c |
3226 |
++++ b/drivers/s390/crypto/zcrypt_api.c |
3227 |
+@@ -218,8 +218,8 @@ static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq, |
3228 |
+ weight += atomic_read(&zq->load); |
3229 |
+ pref_weight += atomic_read(&pref_zq->load); |
3230 |
+ if (weight == pref_weight) |
3231 |
+- return &zq->queue->total_request_count > |
3232 |
+- &pref_zq->queue->total_request_count; |
3233 |
++ return zq->queue->total_request_count > |
3234 |
++ pref_zq->queue->total_request_count; |
3235 |
+ return weight > pref_weight; |
3236 |
+ } |
3237 |
+ |
3238 |
+diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c |
3239 |
+index af3e4d3f9735..7173ae53c526 100644 |
3240 |
+--- a/drivers/scsi/aacraid/aachba.c |
3241 |
++++ b/drivers/scsi/aacraid/aachba.c |
3242 |
+@@ -913,8 +913,15 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex) |
3243 |
+ memset(str, ' ', sizeof(*str)); |
3244 |
+ |
3245 |
+ if (sup_adap_info->adapter_type_text[0]) { |
3246 |
+- char *cp = sup_adap_info->adapter_type_text; |
3247 |
+ int c; |
3248 |
++ char *cp; |
3249 |
++ char *cname = kmemdup(sup_adap_info->adapter_type_text, |
3250 |
++ sizeof(sup_adap_info->adapter_type_text), |
3251 |
++ GFP_ATOMIC); |
3252 |
++ if (!cname) |
3253 |
++ return; |
3254 |
++ |
3255 |
++ cp = cname; |
3256 |
+ if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C')) |
3257 |
+ inqstrcpy("SMC", str->vid); |
3258 |
+ else { |
3259 |
+@@ -923,7 +930,7 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex) |
3260 |
+ ++cp; |
3261 |
+ c = *cp; |
3262 |
+ *cp = '\0'; |
3263 |
+- inqstrcpy(sup_adap_info->adapter_type_text, str->vid); |
3264 |
++ inqstrcpy(cname, str->vid); |
3265 |
+ *cp = c; |
3266 |
+ while (*cp && *cp != ' ') |
3267 |
+ ++cp; |
3268 |
+@@ -937,8 +944,8 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex) |
3269 |
+ cp[sizeof(str->pid)] = '\0'; |
3270 |
+ } |
3271 |
+ inqstrcpy (cp, str->pid); |
3272 |
+- if (c) |
3273 |
+- cp[sizeof(str->pid)] = c; |
3274 |
++ |
3275 |
++ kfree(cname); |
3276 |
+ } else { |
3277 |
+ struct aac_driver_ident *mp = aac_get_driver_ident(dev->cardtype); |
3278 |
+ |
3279 |
+diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c |
3280 |
+index 525a652dab48..c0a4fcb7fd0a 100644 |
3281 |
+--- a/drivers/scsi/aacraid/commsup.c |
3282 |
++++ b/drivers/scsi/aacraid/commsup.c |
3283 |
+@@ -1583,6 +1583,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) |
3284 |
+ * will ensure that i/o is queisced and the card is flushed in that |
3285 |
+ * case. |
3286 |
+ */ |
3287 |
++ aac_free_irq(aac); |
3288 |
+ aac_fib_map_free(aac); |
3289 |
+ dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr, |
3290 |
+ aac->comm_phys); |
3291 |
+@@ -1590,7 +1591,6 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) |
3292 |
+ aac->comm_phys = 0; |
3293 |
+ kfree(aac->queues); |
3294 |
+ aac->queues = NULL; |
3295 |
+- aac_free_irq(aac); |
3296 |
+ kfree(aac->fsa_dev); |
3297 |
+ aac->fsa_dev = NULL; |
3298 |
+ |
3299 |
+@@ -1672,14 +1672,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) |
3300 |
+ out: |
3301 |
+ aac->in_reset = 0; |
3302 |
+ scsi_unblock_requests(host); |
3303 |
+- /* |
3304 |
+- * Issue bus rescan to catch any configuration that might have |
3305 |
+- * occurred |
3306 |
+- */ |
3307 |
+- if (!retval) { |
3308 |
+- dev_info(&aac->pdev->dev, "Issuing bus rescan\n"); |
3309 |
+- scsi_scan_host(host); |
3310 |
+- } |
3311 |
++ |
3312 |
+ if (jafo) { |
3313 |
+ spin_lock_irq(host->host_lock); |
3314 |
+ } |
3315 |
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c |
3316 |
+index 794a4600e952..d344fef01f1d 100644 |
3317 |
+--- a/drivers/scsi/ufs/ufshcd.c |
3318 |
++++ b/drivers/scsi/ufs/ufshcd.c |
3319 |
+@@ -6555,12 +6555,15 @@ static int ufshcd_config_vreg(struct device *dev, |
3320 |
+ struct ufs_vreg *vreg, bool on) |
3321 |
+ { |
3322 |
+ int ret = 0; |
3323 |
+- struct regulator *reg = vreg->reg; |
3324 |
+- const char *name = vreg->name; |
3325 |
++ struct regulator *reg; |
3326 |
++ const char *name; |
3327 |
+ int min_uV, uA_load; |
3328 |
+ |
3329 |
+ BUG_ON(!vreg); |
3330 |
+ |
3331 |
++ reg = vreg->reg; |
3332 |
++ name = vreg->name; |
3333 |
++ |
3334 |
+ if (regulator_count_voltages(reg) > 0) { |
3335 |
+ min_uV = on ? vreg->min_uV : 0; |
3336 |
+ ret = regulator_set_voltage(reg, min_uV, vreg->max_uV); |
3337 |
+diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c |
3338 |
+index babb15f07995..d51ca243a028 100644 |
3339 |
+--- a/drivers/spi/spi-imx.c |
3340 |
++++ b/drivers/spi/spi-imx.c |
3341 |
+@@ -1496,12 +1496,23 @@ static int spi_imx_remove(struct platform_device *pdev) |
3342 |
+ { |
3343 |
+ struct spi_master *master = platform_get_drvdata(pdev); |
3344 |
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(master); |
3345 |
++ int ret; |
3346 |
+ |
3347 |
+ spi_bitbang_stop(&spi_imx->bitbang); |
3348 |
+ |
3349 |
++ ret = clk_enable(spi_imx->clk_per); |
3350 |
++ if (ret) |
3351 |
++ return ret; |
3352 |
++ |
3353 |
++ ret = clk_enable(spi_imx->clk_ipg); |
3354 |
++ if (ret) { |
3355 |
++ clk_disable(spi_imx->clk_per); |
3356 |
++ return ret; |
3357 |
++ } |
3358 |
++ |
3359 |
+ writel(0, spi_imx->base + MXC_CSPICTRL); |
3360 |
+- clk_unprepare(spi_imx->clk_ipg); |
3361 |
+- clk_unprepare(spi_imx->clk_per); |
3362 |
++ clk_disable_unprepare(spi_imx->clk_ipg); |
3363 |
++ clk_disable_unprepare(spi_imx->clk_per); |
3364 |
+ spi_imx_sdma_exit(spi_imx); |
3365 |
+ spi_master_put(master); |
3366 |
+ |
3367 |
+diff --git a/drivers/staging/ccree/ssi_cipher.c b/drivers/staging/ccree/ssi_cipher.c |
3368 |
+index 8d31a93fd8b7..087a622f20b2 100644 |
3369 |
+--- a/drivers/staging/ccree/ssi_cipher.c |
3370 |
++++ b/drivers/staging/ccree/ssi_cipher.c |
3371 |
+@@ -904,6 +904,7 @@ static int ssi_ablkcipher_decrypt(struct ablkcipher_request *req) |
3372 |
+ scatterwalk_map_and_copy(req_ctx->backup_info, req->src, |
3373 |
+ (req->nbytes - ivsize), ivsize, 0); |
3374 |
+ req_ctx->is_giv = false; |
3375 |
++ req_ctx->backup_info = NULL; |
3376 |
+ |
3377 |
+ return ssi_blkcipher_process(tfm, req_ctx, req->dst, req->src, req->nbytes, req->info, ivsize, (void *)req, DRV_CRYPTO_DIRECTION_DECRYPT); |
3378 |
+ } |
3379 |
+diff --git a/drivers/staging/ccree/ssi_driver.c b/drivers/staging/ccree/ssi_driver.c |
3380 |
+index 9c6f1200c130..eeb995307951 100644 |
3381 |
+--- a/drivers/staging/ccree/ssi_driver.c |
3382 |
++++ b/drivers/staging/ccree/ssi_driver.c |
3383 |
+@@ -141,7 +141,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id) |
3384 |
+ irr &= ~SSI_COMP_IRQ_MASK; |
3385 |
+ complete_request(drvdata); |
3386 |
+ } |
3387 |
+-#ifdef CC_SUPPORT_FIPS |
3388 |
++#ifdef CONFIG_CRYPTO_FIPS |
3389 |
+ /* TEE FIPS interrupt */ |
3390 |
+ if (likely((irr & SSI_GPR0_IRQ_MASK) != 0)) { |
3391 |
+ /* Mask interrupt - will be unmasked in Deferred service handler */ |
3392 |
+diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c |
3393 |
+index 64763aacda57..284cdd44a2ee 100644 |
3394 |
+--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c |
3395 |
++++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c |
3396 |
+@@ -825,14 +825,15 @@ struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cm |
3397 |
+ return conn; |
3398 |
+ |
3399 |
+ failed_2: |
3400 |
+- kiblnd_destroy_conn(conn, true); |
3401 |
++ kiblnd_destroy_conn(conn); |
3402 |
++ LIBCFS_FREE(conn, sizeof(*conn)); |
3403 |
+ failed_1: |
3404 |
+ LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr)); |
3405 |
+ failed_0: |
3406 |
+ return NULL; |
3407 |
+ } |
3408 |
+ |
3409 |
+-void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn) |
3410 |
++void kiblnd_destroy_conn(struct kib_conn *conn) |
3411 |
+ { |
3412 |
+ struct rdma_cm_id *cmid = conn->ibc_cmid; |
3413 |
+ struct kib_peer *peer = conn->ibc_peer; |
3414 |
+@@ -895,8 +896,6 @@ void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn) |
3415 |
+ rdma_destroy_id(cmid); |
3416 |
+ atomic_dec(&net->ibn_nconns); |
3417 |
+ } |
3418 |
+- |
3419 |
+- LIBCFS_FREE(conn, sizeof(*conn)); |
3420 |
+ } |
3421 |
+ |
3422 |
+ int kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why) |
3423 |
+diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h |
3424 |
+index a1e994a1cc84..98a5e2c21a83 100644 |
3425 |
+--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h |
3426 |
++++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h |
3427 |
+@@ -1015,7 +1015,7 @@ int kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why); |
3428 |
+ struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, |
3429 |
+ struct rdma_cm_id *cmid, |
3430 |
+ int state, int version); |
3431 |
+-void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn); |
3432 |
++void kiblnd_destroy_conn(struct kib_conn *conn); |
3433 |
+ void kiblnd_close_conn(struct kib_conn *conn, int error); |
3434 |
+ void kiblnd_close_conn_locked(struct kib_conn *conn, int error); |
3435 |
+ |
3436 |
+diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c |
3437 |
+index 8fc191d99927..29e10021b906 100644 |
3438 |
+--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c |
3439 |
++++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c |
3440 |
+@@ -3313,11 +3313,13 @@ kiblnd_connd(void *arg) |
3441 |
+ spin_unlock_irqrestore(lock, flags); |
3442 |
+ dropped_lock = 1; |
3443 |
+ |
3444 |
+- kiblnd_destroy_conn(conn, !peer); |
3445 |
++ kiblnd_destroy_conn(conn); |
3446 |
+ |
3447 |
+ spin_lock_irqsave(lock, flags); |
3448 |
+- if (!peer) |
3449 |
++ if (!peer) { |
3450 |
++ kfree(conn); |
3451 |
+ continue; |
3452 |
++ } |
3453 |
+ |
3454 |
+ conn->ibc_peer = peer; |
3455 |
+ if (peer->ibp_reconnected < KIB_RECONN_HIGH_RACE) |
3456 |
+diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c |
3457 |
+index c0664dc80bf2..446310775e90 100644 |
3458 |
+--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c |
3459 |
++++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c |
3460 |
+@@ -1395,19 +1395,13 @@ static int rtw_wx_get_essid(struct net_device *dev, |
3461 |
+ if ((check_fwstate(pmlmepriv, _FW_LINKED)) || |
3462 |
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) { |
3463 |
+ len = pcur_bss->Ssid.SsidLength; |
3464 |
+- |
3465 |
+- wrqu->essid.length = len; |
3466 |
+- |
3467 |
+ memcpy(extra, pcur_bss->Ssid.Ssid, len); |
3468 |
+- |
3469 |
+- wrqu->essid.flags = 1; |
3470 |
+ } else { |
3471 |
+- ret = -1; |
3472 |
+- goto exit; |
3473 |
++ len = 0; |
3474 |
++ *extra = 0; |
3475 |
+ } |
3476 |
+- |
3477 |
+-exit: |
3478 |
+- |
3479 |
++ wrqu->essid.length = len; |
3480 |
++ wrqu->essid.flags = 1; |
3481 |
+ |
3482 |
+ return ret; |
3483 |
+ } |
3484 |
+diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c |
3485 |
+index 1222c005fb98..951680640ad5 100644 |
3486 |
+--- a/drivers/tty/serial/8250/8250_of.c |
3487 |
++++ b/drivers/tty/serial/8250/8250_of.c |
3488 |
+@@ -141,8 +141,11 @@ static int of_platform_serial_setup(struct platform_device *ofdev, |
3489 |
+ } |
3490 |
+ |
3491 |
+ info->rst = devm_reset_control_get_optional_shared(&ofdev->dev, NULL); |
3492 |
+- if (IS_ERR(info->rst)) |
3493 |
++ if (IS_ERR(info->rst)) { |
3494 |
++ ret = PTR_ERR(info->rst); |
3495 |
+ goto err_dispose; |
3496 |
++ } |
3497 |
++ |
3498 |
+ ret = reset_control_deassert(info->rst); |
3499 |
+ if (ret) |
3500 |
+ goto err_dispose; |
3501 |
+diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c |
3502 |
+index 8a10b10e27aa..c206f173f912 100644 |
3503 |
+--- a/drivers/tty/serial/8250/8250_uniphier.c |
3504 |
++++ b/drivers/tty/serial/8250/8250_uniphier.c |
3505 |
+@@ -259,12 +259,13 @@ static int uniphier_uart_probe(struct platform_device *pdev) |
3506 |
+ up.dl_read = uniphier_serial_dl_read; |
3507 |
+ up.dl_write = uniphier_serial_dl_write; |
3508 |
+ |
3509 |
+- priv->line = serial8250_register_8250_port(&up); |
3510 |
+- if (priv->line < 0) { |
3511 |
++ ret = serial8250_register_8250_port(&up); |
3512 |
++ if (ret < 0) { |
3513 |
+ dev_err(dev, "failed to register 8250 port\n"); |
3514 |
+ clk_disable_unprepare(priv->clk); |
3515 |
+ return ret; |
3516 |
+ } |
3517 |
++ priv->line = ret; |
3518 |
+ |
3519 |
+ platform_set_drvdata(pdev, priv); |
3520 |
+ |
3521 |
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c |
3522 |
+index 3657d745e90f..521500c575c8 100644 |
3523 |
+--- a/drivers/tty/serial/imx.c |
3524 |
++++ b/drivers/tty/serial/imx.c |
3525 |
+@@ -2275,12 +2275,14 @@ static void serial_imx_enable_wakeup(struct imx_port *sport, bool on) |
3526 |
+ val &= ~UCR3_AWAKEN; |
3527 |
+ writel(val, sport->port.membase + UCR3); |
3528 |
+ |
3529 |
+- val = readl(sport->port.membase + UCR1); |
3530 |
+- if (on) |
3531 |
+- val |= UCR1_RTSDEN; |
3532 |
+- else |
3533 |
+- val &= ~UCR1_RTSDEN; |
3534 |
+- writel(val, sport->port.membase + UCR1); |
3535 |
++ if (sport->have_rtscts) { |
3536 |
++ val = readl(sport->port.membase + UCR1); |
3537 |
++ if (on) |
3538 |
++ val |= UCR1_RTSDEN; |
3539 |
++ else |
3540 |
++ val &= ~UCR1_RTSDEN; |
3541 |
++ writel(val, sport->port.membase + UCR1); |
3542 |
++ } |
3543 |
+ } |
3544 |
+ |
3545 |
+ static int imx_serial_port_suspend_noirq(struct device *dev) |
3546 |
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c |
3547 |
+index 94cccb6efa32..7892d0be8af9 100644 |
3548 |
+--- a/drivers/tty/tty_io.c |
3549 |
++++ b/drivers/tty/tty_io.c |
3550 |
+@@ -1322,6 +1322,9 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx) |
3551 |
+ "%s: %s driver does not set tty->port. This will crash the kernel later. Fix the driver!\n", |
3552 |
+ __func__, tty->driver->name); |
3553 |
+ |
3554 |
++ retval = tty_ldisc_lock(tty, 5 * HZ); |
3555 |
++ if (retval) |
3556 |
++ goto err_release_lock; |
3557 |
+ tty->port->itty = tty; |
3558 |
+ |
3559 |
+ /* |
3560 |
+@@ -1332,6 +1335,7 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx) |
3561 |
+ retval = tty_ldisc_setup(tty, tty->link); |
3562 |
+ if (retval) |
3563 |
+ goto err_release_tty; |
3564 |
++ tty_ldisc_unlock(tty); |
3565 |
+ /* Return the tty locked so that it cannot vanish under the caller */ |
3566 |
+ return tty; |
3567 |
+ |
3568 |
+@@ -1344,9 +1348,11 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx) |
3569 |
+ |
3570 |
+ /* call the tty release_tty routine to clean out this slot */ |
3571 |
+ err_release_tty: |
3572 |
+- tty_unlock(tty); |
3573 |
++ tty_ldisc_unlock(tty); |
3574 |
+ tty_info_ratelimited(tty, "ldisc open failed (%d), clearing slot %d\n", |
3575 |
+ retval, idx); |
3576 |
++err_release_lock: |
3577 |
++ tty_unlock(tty); |
3578 |
+ release_tty(tty, idx); |
3579 |
+ return ERR_PTR(retval); |
3580 |
+ } |
3581 |
+diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c |
3582 |
+index 84a8ac2a779f..7c895684c3ef 100644 |
3583 |
+--- a/drivers/tty/tty_ldisc.c |
3584 |
++++ b/drivers/tty/tty_ldisc.c |
3585 |
+@@ -336,7 +336,7 @@ static inline void __tty_ldisc_unlock(struct tty_struct *tty) |
3586 |
+ ldsem_up_write(&tty->ldisc_sem); |
3587 |
+ } |
3588 |
+ |
3589 |
+-static int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout) |
3590 |
++int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout) |
3591 |
+ { |
3592 |
+ int ret; |
3593 |
+ |
3594 |
+@@ -347,7 +347,7 @@ static int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout) |
3595 |
+ return 0; |
3596 |
+ } |
3597 |
+ |
3598 |
+-static void tty_ldisc_unlock(struct tty_struct *tty) |
3599 |
++void tty_ldisc_unlock(struct tty_struct *tty) |
3600 |
+ { |
3601 |
+ clear_bit(TTY_LDISC_HALTED, &tty->flags); |
3602 |
+ __tty_ldisc_unlock(tty); |
3603 |
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c |
3604 |
+index 18c923a4c16e..4149a965516e 100644 |
3605 |
+--- a/drivers/usb/class/cdc-acm.c |
3606 |
++++ b/drivers/usb/class/cdc-acm.c |
3607 |
+@@ -438,7 +438,7 @@ static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags) |
3608 |
+ |
3609 |
+ res = usb_submit_urb(acm->read_urbs[index], mem_flags); |
3610 |
+ if (res) { |
3611 |
+- if (res != -EPERM) { |
3612 |
++ if (res != -EPERM && res != -ENODEV) { |
3613 |
+ dev_err(&acm->data->dev, |
3614 |
+ "urb %d failed submission with %d\n", |
3615 |
+ index, res); |
3616 |
+@@ -1765,6 +1765,9 @@ static const struct usb_device_id acm_ids[] = { |
3617 |
+ { USB_DEVICE(0x0ace, 0x1611), /* ZyDAS 56K USB MODEM - new version */ |
3618 |
+ .driver_info = SINGLE_RX_URB, /* firmware bug */ |
3619 |
+ }, |
3620 |
++ { USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */ |
3621 |
++ .driver_info = SINGLE_RX_URB, |
3622 |
++ }, |
3623 |
+ { USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */ |
3624 |
+ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ |
3625 |
+ }, |
3626 |
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c |
3627 |
+index 5d061b3d8224..ed9346f0b000 100644 |
3628 |
+--- a/drivers/usb/gadget/composite.c |
3629 |
++++ b/drivers/usb/gadget/composite.c |
3630 |
+@@ -150,7 +150,6 @@ int config_ep_by_speed(struct usb_gadget *g, |
3631 |
+ struct usb_function *f, |
3632 |
+ struct usb_ep *_ep) |
3633 |
+ { |
3634 |
+- struct usb_composite_dev *cdev = get_gadget_data(g); |
3635 |
+ struct usb_endpoint_descriptor *chosen_desc = NULL; |
3636 |
+ struct usb_descriptor_header **speed_desc = NULL; |
3637 |
+ |
3638 |
+@@ -229,8 +228,12 @@ int config_ep_by_speed(struct usb_gadget *g, |
3639 |
+ _ep->maxburst = comp_desc->bMaxBurst + 1; |
3640 |
+ break; |
3641 |
+ default: |
3642 |
+- if (comp_desc->bMaxBurst != 0) |
3643 |
++ if (comp_desc->bMaxBurst != 0) { |
3644 |
++ struct usb_composite_dev *cdev; |
3645 |
++ |
3646 |
++ cdev = get_gadget_data(g); |
3647 |
+ ERROR(cdev, "ep0 bMaxBurst must be 0\n"); |
3648 |
++ } |
3649 |
+ _ep->maxburst = 1; |
3650 |
+ break; |
3651 |
+ } |
3652 |
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c |
3653 |
+index 876cdbec1307..c0491dd73f53 100644 |
3654 |
+--- a/drivers/usb/gadget/function/f_fs.c |
3655 |
++++ b/drivers/usb/gadget/function/f_fs.c |
3656 |
+@@ -3704,7 +3704,8 @@ static void ffs_closed(struct ffs_data *ffs) |
3657 |
+ ci = opts->func_inst.group.cg_item.ci_parent->ci_parent; |
3658 |
+ ffs_dev_unlock(); |
3659 |
+ |
3660 |
+- unregister_gadget_item(ci); |
3661 |
++ if (test_bit(FFS_FL_BOUND, &ffs->flags)) |
3662 |
++ unregister_gadget_item(ci); |
3663 |
+ return; |
3664 |
+ done: |
3665 |
+ ffs_dev_unlock(); |
3666 |
+diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c |
3667 |
+index 284bd1a7b570..794bb4958383 100644 |
3668 |
+--- a/drivers/usb/gadget/udc/core.c |
3669 |
++++ b/drivers/usb/gadget/udc/core.c |
3670 |
+@@ -923,7 +923,7 @@ int usb_gadget_ep_match_desc(struct usb_gadget *gadget, |
3671 |
+ return 0; |
3672 |
+ |
3673 |
+ /* "high bandwidth" works only at high speed */ |
3674 |
+- if (!gadget_is_dualspeed(gadget) && usb_endpoint_maxp(desc) & (3<<11)) |
3675 |
++ if (!gadget_is_dualspeed(gadget) && usb_endpoint_maxp_mult(desc) > 1) |
3676 |
+ return 0; |
3677 |
+ |
3678 |
+ switch (type) { |
3679 |
+diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig |
3680 |
+index a8d5f2e4878d..c66b93664d54 100644 |
3681 |
+--- a/drivers/usb/serial/Kconfig |
3682 |
++++ b/drivers/usb/serial/Kconfig |
3683 |
+@@ -63,6 +63,7 @@ config USB_SERIAL_SIMPLE |
3684 |
+ - Google USB serial devices |
3685 |
+ - HP4x calculators |
3686 |
+ - a number of Motorola phones |
3687 |
++ - Motorola Tetra devices |
3688 |
+ - Novatel Wireless GPS receivers |
3689 |
+ - Siemens USB/MPI adapter. |
3690 |
+ - ViVOtech ViVOpay USB device. |
3691 |
+diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c |
3692 |
+index bdf8bd814a9a..01f3ac7769f3 100644 |
3693 |
+--- a/drivers/usb/serial/io_edgeport.c |
3694 |
++++ b/drivers/usb/serial/io_edgeport.c |
3695 |
+@@ -2286,7 +2286,6 @@ static int write_cmd_usb(struct edgeport_port *edge_port, |
3696 |
+ /* something went wrong */ |
3697 |
+ dev_err(dev, "%s - usb_submit_urb(write command) failed, status = %d\n", |
3698 |
+ __func__, status); |
3699 |
+- usb_kill_urb(urb); |
3700 |
+ usb_free_urb(urb); |
3701 |
+ atomic_dec(&CmdUrbs); |
3702 |
+ return status; |
3703 |
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c |
3704 |
+index a9400458ccea..dcf78a498927 100644 |
3705 |
+--- a/drivers/usb/serial/option.c |
3706 |
++++ b/drivers/usb/serial/option.c |
3707 |
+@@ -383,6 +383,9 @@ static void option_instat_callback(struct urb *urb); |
3708 |
+ #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603 |
3709 |
+ #define FOUR_G_SYSTEMS_PRODUCT_W100 0x9b01 |
3710 |
+ |
3711 |
++/* Fujisoft products */ |
3712 |
++#define FUJISOFT_PRODUCT_FS040U 0x9b02 |
3713 |
++ |
3714 |
+ /* iBall 3.5G connect wireless modem */ |
3715 |
+ #define IBALL_3_5G_CONNECT 0x9605 |
3716 |
+ |
3717 |
+@@ -1897,6 +1900,8 @@ static const struct usb_device_id option_ids[] = { |
3718 |
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100), |
3719 |
+ .driver_info = (kernel_ulong_t)&four_g_w100_blacklist |
3720 |
+ }, |
3721 |
++ {USB_DEVICE(LONGCHEER_VENDOR_ID, FUJISOFT_PRODUCT_FS040U), |
3722 |
++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist}, |
3723 |
+ { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) }, |
3724 |
+ { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff), |
3725 |
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, |
3726 |
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c |
3727 |
+index a585b477415d..34c5a75f98a7 100644 |
3728 |
+--- a/drivers/usb/serial/pl2303.c |
3729 |
++++ b/drivers/usb/serial/pl2303.c |
3730 |
+@@ -41,6 +41,7 @@ static const struct usb_device_id id_table[] = { |
3731 |
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ2) }, |
3732 |
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_DCU11) }, |
3733 |
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ3) }, |
3734 |
++ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_CHILITAG) }, |
3735 |
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_PHAROS) }, |
3736 |
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ALDIGA) }, |
3737 |
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) }, |
3738 |
+diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h |
3739 |
+index 3b5a15d1dc0d..123289085ee2 100644 |
3740 |
+--- a/drivers/usb/serial/pl2303.h |
3741 |
++++ b/drivers/usb/serial/pl2303.h |
3742 |
+@@ -17,6 +17,7 @@ |
3743 |
+ #define PL2303_PRODUCT_ID_DCU11 0x1234 |
3744 |
+ #define PL2303_PRODUCT_ID_PHAROS 0xaaa0 |
3745 |
+ #define PL2303_PRODUCT_ID_RSAQ3 0xaaa2 |
3746 |
++#define PL2303_PRODUCT_ID_CHILITAG 0xaaa8 |
3747 |
+ #define PL2303_PRODUCT_ID_ALDIGA 0x0611 |
3748 |
+ #define PL2303_PRODUCT_ID_MMX 0x0612 |
3749 |
+ #define PL2303_PRODUCT_ID_GPRS 0x0609 |
3750 |
+diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c |
3751 |
+index e98b6e57b703..6aa7ff2c1cf7 100644 |
3752 |
+--- a/drivers/usb/serial/usb-serial-simple.c |
3753 |
++++ b/drivers/usb/serial/usb-serial-simple.c |
3754 |
+@@ -80,6 +80,11 @@ DEVICE(vivopay, VIVOPAY_IDS); |
3755 |
+ { USB_DEVICE(0x22b8, 0x2c64) } /* Motorola V950 phone */ |
3756 |
+ DEVICE(moto_modem, MOTO_IDS); |
3757 |
+ |
3758 |
++/* Motorola Tetra driver */ |
3759 |
++#define MOTOROLA_TETRA_IDS() \ |
3760 |
++ { USB_DEVICE(0x0cad, 0x9011) } /* Motorola Solutions TETRA PEI */ |
3761 |
++DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS); |
3762 |
++ |
3763 |
+ /* Novatel Wireless GPS driver */ |
3764 |
+ #define NOVATEL_IDS() \ |
3765 |
+ { USB_DEVICE(0x09d7, 0x0100) } /* NovAtel FlexPack GPS */ |
3766 |
+@@ -110,6 +115,7 @@ static struct usb_serial_driver * const serial_drivers[] = { |
3767 |
+ &google_device, |
3768 |
+ &vivopay_device, |
3769 |
+ &moto_modem_device, |
3770 |
++ &motorola_tetra_device, |
3771 |
+ &novatel_gps_device, |
3772 |
+ &hp4x_device, |
3773 |
+ &suunto_device, |
3774 |
+@@ -125,6 +131,7 @@ static const struct usb_device_id id_table[] = { |
3775 |
+ GOOGLE_IDS(), |
3776 |
+ VIVOPAY_IDS(), |
3777 |
+ MOTO_IDS(), |
3778 |
++ MOTOROLA_TETRA_IDS(), |
3779 |
+ NOVATEL_IDS(), |
3780 |
+ HP4X_IDS(), |
3781 |
+ SUUNTO_IDS(), |
3782 |
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c |
3783 |
+index 63cf981ed81c..0bc8543e96b1 100644 |
3784 |
+--- a/drivers/usb/storage/uas.c |
3785 |
++++ b/drivers/usb/storage/uas.c |
3786 |
+@@ -1076,20 +1076,19 @@ static int uas_post_reset(struct usb_interface *intf) |
3787 |
+ return 0; |
3788 |
+ |
3789 |
+ err = uas_configure_endpoints(devinfo); |
3790 |
+- if (err) { |
3791 |
++ if (err && err != ENODEV) |
3792 |
+ shost_printk(KERN_ERR, shost, |
3793 |
+ "%s: alloc streams error %d after reset", |
3794 |
+ __func__, err); |
3795 |
+- return 1; |
3796 |
+- } |
3797 |
+ |
3798 |
++ /* we must unblock the host in every case lest we deadlock */ |
3799 |
+ spin_lock_irqsave(shost->host_lock, flags); |
3800 |
+ scsi_report_bus_reset(shost, 0); |
3801 |
+ spin_unlock_irqrestore(shost->host_lock, flags); |
3802 |
+ |
3803 |
+ scsi_unblock_requests(shost); |
3804 |
+ |
3805 |
+- return 0; |
3806 |
++ return err ? 1 : 0; |
3807 |
+ } |
3808 |
+ |
3809 |
+ static int uas_suspend(struct usb_interface *intf, pm_message_t message) |
3810 |
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c |
3811 |
+index aafcc785f840..d564a7049d7f 100644 |
3812 |
+--- a/fs/btrfs/file.c |
3813 |
++++ b/fs/btrfs/file.c |
3814 |
+@@ -2056,6 +2056,8 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) |
3815 |
+ len = (u64)end - (u64)start + 1; |
3816 |
+ trace_btrfs_sync_file(file, datasync); |
3817 |
+ |
3818 |
++ btrfs_init_log_ctx(&ctx, inode); |
3819 |
++ |
3820 |
+ /* |
3821 |
+ * We write the dirty pages in the range and wait until they complete |
3822 |
+ * out of the ->i_mutex. If so, we can flush the dirty pages by |
3823 |
+@@ -2202,8 +2204,6 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) |
3824 |
+ } |
3825 |
+ trans->sync = true; |
3826 |
+ |
3827 |
+- btrfs_init_log_ctx(&ctx, inode); |
3828 |
+- |
3829 |
+ ret = btrfs_log_dentry_safe(trans, root, dentry, start, end, &ctx); |
3830 |
+ if (ret < 0) { |
3831 |
+ /* Fallthrough and commit/free transaction. */ |
3832 |
+@@ -2261,6 +2261,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) |
3833 |
+ ret = btrfs_end_transaction(trans); |
3834 |
+ } |
3835 |
+ out: |
3836 |
++ ASSERT(list_empty(&ctx.list)); |
3837 |
+ err = file_check_and_advance_wb_err(file); |
3838 |
+ if (!ret) |
3839 |
+ ret = err; |
3840 |
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c |
3841 |
+index cdc9f4015ec3..4426d1c73e50 100644 |
3842 |
+--- a/fs/btrfs/free-space-cache.c |
3843 |
++++ b/fs/btrfs/free-space-cache.c |
3844 |
+@@ -1264,7 +1264,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, |
3845 |
+ /* Lock all pages first so we can lock the extent safely. */ |
3846 |
+ ret = io_ctl_prepare_pages(io_ctl, inode, 0); |
3847 |
+ if (ret) |
3848 |
+- goto out; |
3849 |
++ goto out_unlock; |
3850 |
+ |
3851 |
+ lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, |
3852 |
+ &cached_state); |
3853 |
+@@ -1358,6 +1358,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, |
3854 |
+ out_nospc: |
3855 |
+ cleanup_write_cache_enospc(inode, io_ctl, &cached_state); |
3856 |
+ |
3857 |
++out_unlock: |
3858 |
+ if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) |
3859 |
+ up_write(&block_group->data_rwsem); |
3860 |
+ |
3861 |
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c |
3862 |
+index d94e3f68b9b1..c71afd424900 100644 |
3863 |
+--- a/fs/btrfs/inode.c |
3864 |
++++ b/fs/btrfs/inode.c |
3865 |
+@@ -5500,6 +5500,14 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, |
3866 |
+ goto out_err; |
3867 |
+ |
3868 |
+ btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); |
3869 |
++ if (location->type != BTRFS_INODE_ITEM_KEY && |
3870 |
++ location->type != BTRFS_ROOT_ITEM_KEY) { |
3871 |
++ btrfs_warn(root->fs_info, |
3872 |
++"%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))", |
3873 |
++ __func__, name, btrfs_ino(BTRFS_I(dir)), |
3874 |
++ location->objectid, location->type, location->offset); |
3875 |
++ goto out_err; |
3876 |
++ } |
3877 |
+ out: |
3878 |
+ btrfs_free_path(path); |
3879 |
+ return ret; |
3880 |
+@@ -5816,8 +5824,6 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) |
3881 |
+ return inode; |
3882 |
+ } |
3883 |
+ |
3884 |
+- BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY); |
3885 |
+- |
3886 |
+ index = srcu_read_lock(&fs_info->subvol_srcu); |
3887 |
+ ret = fixup_tree_root_location(fs_info, dir, dentry, |
3888 |
+ &location, &sub_root); |
3889 |
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c |
3890 |
+index 8fd195cfe81b..2c35717a3470 100644 |
3891 |
+--- a/fs/btrfs/send.c |
3892 |
++++ b/fs/btrfs/send.c |
3893 |
+@@ -3527,7 +3527,40 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx, |
3894 |
+ } |
3895 |
+ |
3896 |
+ /* |
3897 |
+- * Check if ino ino1 is an ancestor of inode ino2 in the given root. |
3898 |
++ * Check if inode ino2, or any of its ancestors, is inode ino1. |
3899 |
++ * Return 1 if true, 0 if false and < 0 on error. |
3900 |
++ */ |
3901 |
++static int check_ino_in_path(struct btrfs_root *root, |
3902 |
++ const u64 ino1, |
3903 |
++ const u64 ino1_gen, |
3904 |
++ const u64 ino2, |
3905 |
++ const u64 ino2_gen, |
3906 |
++ struct fs_path *fs_path) |
3907 |
++{ |
3908 |
++ u64 ino = ino2; |
3909 |
++ |
3910 |
++ if (ino1 == ino2) |
3911 |
++ return ino1_gen == ino2_gen; |
3912 |
++ |
3913 |
++ while (ino > BTRFS_FIRST_FREE_OBJECTID) { |
3914 |
++ u64 parent; |
3915 |
++ u64 parent_gen; |
3916 |
++ int ret; |
3917 |
++ |
3918 |
++ fs_path_reset(fs_path); |
3919 |
++ ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path); |
3920 |
++ if (ret < 0) |
3921 |
++ return ret; |
3922 |
++ if (parent == ino1) |
3923 |
++ return parent_gen == ino1_gen; |
3924 |
++ ino = parent; |
3925 |
++ } |
3926 |
++ return 0; |
3927 |
++} |
3928 |
++ |
3929 |
++/* |
3930 |
++ * Check if ino ino1 is an ancestor of inode ino2 in the given root for any |
3931 |
++ * possible path (in case ino2 is not a directory and has multiple hard links). |
3932 |
+ * Return 1 if true, 0 if false and < 0 on error. |
3933 |
+ */ |
3934 |
+ static int is_ancestor(struct btrfs_root *root, |
3935 |
+@@ -3536,36 +3569,91 @@ static int is_ancestor(struct btrfs_root *root, |
3936 |
+ const u64 ino2, |
3937 |
+ struct fs_path *fs_path) |
3938 |
+ { |
3939 |
+- u64 ino = ino2; |
3940 |
+- bool free_path = false; |
3941 |
++ bool free_fs_path = false; |
3942 |
+ int ret = 0; |
3943 |
++ struct btrfs_path *path = NULL; |
3944 |
++ struct btrfs_key key; |
3945 |
+ |
3946 |
+ if (!fs_path) { |
3947 |
+ fs_path = fs_path_alloc(); |
3948 |
+ if (!fs_path) |
3949 |
+ return -ENOMEM; |
3950 |
+- free_path = true; |
3951 |
++ free_fs_path = true; |
3952 |
+ } |
3953 |
+ |
3954 |
+- while (ino > BTRFS_FIRST_FREE_OBJECTID) { |
3955 |
+- u64 parent; |
3956 |
+- u64 parent_gen; |
3957 |
++ path = alloc_path_for_send(); |
3958 |
++ if (!path) { |
3959 |
++ ret = -ENOMEM; |
3960 |
++ goto out; |
3961 |
++ } |
3962 |
+ |
3963 |
+- fs_path_reset(fs_path); |
3964 |
+- ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path); |
3965 |
+- if (ret < 0) { |
3966 |
+- if (ret == -ENOENT && ino == ino2) |
3967 |
+- ret = 0; |
3968 |
+- goto out; |
3969 |
++ key.objectid = ino2; |
3970 |
++ key.type = BTRFS_INODE_REF_KEY; |
3971 |
++ key.offset = 0; |
3972 |
++ |
3973 |
++ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
3974 |
++ if (ret < 0) |
3975 |
++ goto out; |
3976 |
++ |
3977 |
++ while (true) { |
3978 |
++ struct extent_buffer *leaf = path->nodes[0]; |
3979 |
++ int slot = path->slots[0]; |
3980 |
++ u32 cur_offset = 0; |
3981 |
++ u32 item_size; |
3982 |
++ |
3983 |
++ if (slot >= btrfs_header_nritems(leaf)) { |
3984 |
++ ret = btrfs_next_leaf(root, path); |
3985 |
++ if (ret < 0) |
3986 |
++ goto out; |
3987 |
++ if (ret > 0) |
3988 |
++ break; |
3989 |
++ continue; |
3990 |
+ } |
3991 |
+- if (parent == ino1) { |
3992 |
+- ret = parent_gen == ino1_gen ? 1 : 0; |
3993 |
+- goto out; |
3994 |
++ |
3995 |
++ btrfs_item_key_to_cpu(leaf, &key, slot); |
3996 |
++ if (key.objectid != ino2) |
3997 |
++ break; |
3998 |
++ if (key.type != BTRFS_INODE_REF_KEY && |
3999 |
++ key.type != BTRFS_INODE_EXTREF_KEY) |
4000 |
++ break; |
4001 |
++ |
4002 |
++ item_size = btrfs_item_size_nr(leaf, slot); |
4003 |
++ while (cur_offset < item_size) { |
4004 |
++ u64 parent; |
4005 |
++ u64 parent_gen; |
4006 |
++ |
4007 |
++ if (key.type == BTRFS_INODE_EXTREF_KEY) { |
4008 |
++ unsigned long ptr; |
4009 |
++ struct btrfs_inode_extref *extref; |
4010 |
++ |
4011 |
++ ptr = btrfs_item_ptr_offset(leaf, slot); |
4012 |
++ extref = (struct btrfs_inode_extref *) |
4013 |
++ (ptr + cur_offset); |
4014 |
++ parent = btrfs_inode_extref_parent(leaf, |
4015 |
++ extref); |
4016 |
++ cur_offset += sizeof(*extref); |
4017 |
++ cur_offset += btrfs_inode_extref_name_len(leaf, |
4018 |
++ extref); |
4019 |
++ } else { |
4020 |
++ parent = key.offset; |
4021 |
++ cur_offset = item_size; |
4022 |
++ } |
4023 |
++ |
4024 |
++ ret = get_inode_info(root, parent, NULL, &parent_gen, |
4025 |
++ NULL, NULL, NULL, NULL); |
4026 |
++ if (ret < 0) |
4027 |
++ goto out; |
4028 |
++ ret = check_ino_in_path(root, ino1, ino1_gen, |
4029 |
++ parent, parent_gen, fs_path); |
4030 |
++ if (ret) |
4031 |
++ goto out; |
4032 |
+ } |
4033 |
+- ino = parent; |
4034 |
++ path->slots[0]++; |
4035 |
+ } |
4036 |
++ ret = 0; |
4037 |
+ out: |
4038 |
+- if (free_path) |
4039 |
++ btrfs_free_path(path); |
4040 |
++ if (free_fs_path) |
4041 |
+ fs_path_free(fs_path); |
4042 |
+ return ret; |
4043 |
+ } |
4044 |
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c |
4045 |
+index c800d067fcbf..d3002842d7f6 100644 |
4046 |
+--- a/fs/btrfs/tree-log.c |
4047 |
++++ b/fs/btrfs/tree-log.c |
4048 |
+@@ -4100,7 +4100,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans, |
4049 |
+ |
4050 |
+ if (ordered_io_err) { |
4051 |
+ ctx->io_err = -EIO; |
4052 |
+- return 0; |
4053 |
++ return ctx->io_err; |
4054 |
+ } |
4055 |
+ |
4056 |
+ btrfs_init_map_token(&token); |
4057 |
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c |
4058 |
+index 0c11121a8ace..4006b2a1233d 100644 |
4059 |
+--- a/fs/btrfs/volumes.c |
4060 |
++++ b/fs/btrfs/volumes.c |
4061 |
+@@ -1765,20 +1765,24 @@ static int btrfs_rm_dev_item(struct btrfs_fs_info *fs_info, |
4062 |
+ key.offset = device->devid; |
4063 |
+ |
4064 |
+ ret = btrfs_search_slot(trans, root, &key, path, -1, 1); |
4065 |
+- if (ret < 0) |
4066 |
+- goto out; |
4067 |
+- |
4068 |
+- if (ret > 0) { |
4069 |
+- ret = -ENOENT; |
4070 |
++ if (ret) { |
4071 |
++ if (ret > 0) |
4072 |
++ ret = -ENOENT; |
4073 |
++ btrfs_abort_transaction(trans, ret); |
4074 |
++ btrfs_end_transaction(trans); |
4075 |
+ goto out; |
4076 |
+ } |
4077 |
+ |
4078 |
+ ret = btrfs_del_item(trans, root, path); |
4079 |
+- if (ret) |
4080 |
+- goto out; |
4081 |
++ if (ret) { |
4082 |
++ btrfs_abort_transaction(trans, ret); |
4083 |
++ btrfs_end_transaction(trans); |
4084 |
++ } |
4085 |
++ |
4086 |
+ out: |
4087 |
+ btrfs_free_path(path); |
4088 |
+- btrfs_commit_transaction(trans); |
4089 |
++ if (!ret) |
4090 |
++ ret = btrfs_commit_transaction(trans); |
4091 |
+ return ret; |
4092 |
+ } |
4093 |
+ |
4094 |
+diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c |
4095 |
+index 45e96549ebd2..809cbccbad28 100644 |
4096 |
+--- a/fs/lockd/svc.c |
4097 |
++++ b/fs/lockd/svc.c |
4098 |
+@@ -57,6 +57,9 @@ static struct task_struct *nlmsvc_task; |
4099 |
+ static struct svc_rqst *nlmsvc_rqst; |
4100 |
+ unsigned long nlmsvc_timeout; |
4101 |
+ |
4102 |
++atomic_t nlm_ntf_refcnt = ATOMIC_INIT(0); |
4103 |
++DECLARE_WAIT_QUEUE_HEAD(nlm_ntf_wq); |
4104 |
++ |
4105 |
+ unsigned int lockd_net_id; |
4106 |
+ |
4107 |
+ /* |
4108 |
+@@ -292,7 +295,8 @@ static int lockd_inetaddr_event(struct notifier_block *this, |
4109 |
+ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; |
4110 |
+ struct sockaddr_in sin; |
4111 |
+ |
4112 |
+- if (event != NETDEV_DOWN) |
4113 |
++ if ((event != NETDEV_DOWN) || |
4114 |
++ !atomic_inc_not_zero(&nlm_ntf_refcnt)) |
4115 |
+ goto out; |
4116 |
+ |
4117 |
+ if (nlmsvc_rqst) { |
4118 |
+@@ -303,6 +307,8 @@ static int lockd_inetaddr_event(struct notifier_block *this, |
4119 |
+ svc_age_temp_xprts_now(nlmsvc_rqst->rq_server, |
4120 |
+ (struct sockaddr *)&sin); |
4121 |
+ } |
4122 |
++ atomic_dec(&nlm_ntf_refcnt); |
4123 |
++ wake_up(&nlm_ntf_wq); |
4124 |
+ |
4125 |
+ out: |
4126 |
+ return NOTIFY_DONE; |
4127 |
+@@ -319,7 +325,8 @@ static int lockd_inet6addr_event(struct notifier_block *this, |
4128 |
+ struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; |
4129 |
+ struct sockaddr_in6 sin6; |
4130 |
+ |
4131 |
+- if (event != NETDEV_DOWN) |
4132 |
++ if ((event != NETDEV_DOWN) || |
4133 |
++ !atomic_inc_not_zero(&nlm_ntf_refcnt)) |
4134 |
+ goto out; |
4135 |
+ |
4136 |
+ if (nlmsvc_rqst) { |
4137 |
+@@ -331,6 +338,8 @@ static int lockd_inet6addr_event(struct notifier_block *this, |
4138 |
+ svc_age_temp_xprts_now(nlmsvc_rqst->rq_server, |
4139 |
+ (struct sockaddr *)&sin6); |
4140 |
+ } |
4141 |
++ atomic_dec(&nlm_ntf_refcnt); |
4142 |
++ wake_up(&nlm_ntf_wq); |
4143 |
+ |
4144 |
+ out: |
4145 |
+ return NOTIFY_DONE; |
4146 |
+@@ -347,10 +356,12 @@ static void lockd_unregister_notifiers(void) |
4147 |
+ #if IS_ENABLED(CONFIG_IPV6) |
4148 |
+ unregister_inet6addr_notifier(&lockd_inet6addr_notifier); |
4149 |
+ #endif |
4150 |
++ wait_event(nlm_ntf_wq, atomic_read(&nlm_ntf_refcnt) == 0); |
4151 |
+ } |
4152 |
+ |
4153 |
+ static void lockd_svc_exit_thread(void) |
4154 |
+ { |
4155 |
++ atomic_dec(&nlm_ntf_refcnt); |
4156 |
+ lockd_unregister_notifiers(); |
4157 |
+ svc_exit_thread(nlmsvc_rqst); |
4158 |
+ } |
4159 |
+@@ -375,6 +386,7 @@ static int lockd_start_svc(struct svc_serv *serv) |
4160 |
+ goto out_rqst; |
4161 |
+ } |
4162 |
+ |
4163 |
++ atomic_inc(&nlm_ntf_refcnt); |
4164 |
+ svc_sock_update_bufs(serv); |
4165 |
+ serv->sv_maxconn = nlm_max_connections; |
4166 |
+ |
4167 |
+diff --git a/fs/namespace.c b/fs/namespace.c |
4168 |
+index d18deb4c410b..adae9ffce91d 100644 |
4169 |
+--- a/fs/namespace.c |
4170 |
++++ b/fs/namespace.c |
4171 |
+@@ -2826,6 +2826,7 @@ long do_mount(const char *dev_name, const char __user *dir_name, |
4172 |
+ SB_DIRSYNC | |
4173 |
+ SB_SILENT | |
4174 |
+ SB_POSIXACL | |
4175 |
++ SB_LAZYTIME | |
4176 |
+ SB_I_VERSION); |
4177 |
+ |
4178 |
+ if (flags & MS_REMOUNT) |
4179 |
+diff --git a/fs/nfs_common/grace.c b/fs/nfs_common/grace.c |
4180 |
+index 420d3a0ab258..3b13fb3b0553 100644 |
4181 |
+--- a/fs/nfs_common/grace.c |
4182 |
++++ b/fs/nfs_common/grace.c |
4183 |
+@@ -30,7 +30,11 @@ locks_start_grace(struct net *net, struct lock_manager *lm) |
4184 |
+ struct list_head *grace_list = net_generic(net, grace_net_id); |
4185 |
+ |
4186 |
+ spin_lock(&grace_lock); |
4187 |
+- list_add(&lm->list, grace_list); |
4188 |
++ if (list_empty(&lm->list)) |
4189 |
++ list_add(&lm->list, grace_list); |
4190 |
++ else |
4191 |
++ WARN(1, "double list_add attempt detected in net %x %s\n", |
4192 |
++ net->ns.inum, (net == &init_net) ? "(init_net)" : ""); |
4193 |
+ spin_unlock(&grace_lock); |
4194 |
+ } |
4195 |
+ EXPORT_SYMBOL_GPL(locks_start_grace); |
4196 |
+@@ -104,7 +108,9 @@ grace_exit_net(struct net *net) |
4197 |
+ { |
4198 |
+ struct list_head *grace_list = net_generic(net, grace_net_id); |
4199 |
+ |
4200 |
+- BUG_ON(!list_empty(grace_list)); |
4201 |
++ WARN_ONCE(!list_empty(grace_list), |
4202 |
++ "net %x %s: grace_list is not empty\n", |
4203 |
++ net->ns.inum, __func__); |
4204 |
+ } |
4205 |
+ |
4206 |
+ static struct pernet_operations grace_net_ops = { |
4207 |
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c |
4208 |
+index a439a70177a4..d89e6ccd33ba 100644 |
4209 |
+--- a/fs/nfsd/nfs4state.c |
4210 |
++++ b/fs/nfsd/nfs4state.c |
4211 |
+@@ -63,12 +63,16 @@ static const stateid_t zero_stateid = { |
4212 |
+ static const stateid_t currentstateid = { |
4213 |
+ .si_generation = 1, |
4214 |
+ }; |
4215 |
++static const stateid_t close_stateid = { |
4216 |
++ .si_generation = 0xffffffffU, |
4217 |
++}; |
4218 |
+ |
4219 |
+ static u64 current_sessionid = 1; |
4220 |
+ |
4221 |
+ #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t))) |
4222 |
+ #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t))) |
4223 |
+ #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t))) |
4224 |
++#define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t))) |
4225 |
+ |
4226 |
+ /* forward declarations */ |
4227 |
+ static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner); |
4228 |
+@@ -4866,7 +4870,8 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid) |
4229 |
+ struct nfs4_stid *s; |
4230 |
+ __be32 status = nfserr_bad_stateid; |
4231 |
+ |
4232 |
+- if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) |
4233 |
++ if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) || |
4234 |
++ CLOSE_STATEID(stateid)) |
4235 |
+ return status; |
4236 |
+ /* Client debugging aid. */ |
4237 |
+ if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) { |
4238 |
+@@ -4924,7 +4929,8 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, |
4239 |
+ else if (typemask & NFS4_DELEG_STID) |
4240 |
+ typemask |= NFS4_REVOKED_DELEG_STID; |
4241 |
+ |
4242 |
+- if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) |
4243 |
++ if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) || |
4244 |
++ CLOSE_STATEID(stateid)) |
4245 |
+ return nfserr_bad_stateid; |
4246 |
+ status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn); |
4247 |
+ if (status == nfserr_stale_clientid) { |
4248 |
+@@ -5177,15 +5183,9 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_ |
4249 |
+ status = nfsd4_check_seqid(cstate, sop, seqid); |
4250 |
+ if (status) |
4251 |
+ return status; |
4252 |
+- if (stp->st_stid.sc_type == NFS4_CLOSED_STID |
4253 |
+- || stp->st_stid.sc_type == NFS4_REVOKED_DELEG_STID) |
4254 |
+- /* |
4255 |
+- * "Closed" stateid's exist *only* to return |
4256 |
+- * nfserr_replay_me from the previous step, and |
4257 |
+- * revoked delegations are kept only for free_stateid. |
4258 |
+- */ |
4259 |
+- return nfserr_bad_stateid; |
4260 |
+- mutex_lock(&stp->st_mutex); |
4261 |
++ status = nfsd4_lock_ol_stateid(stp); |
4262 |
++ if (status != nfs_ok) |
4263 |
++ return status; |
4264 |
+ status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); |
4265 |
+ if (status == nfs_ok) |
4266 |
+ status = nfs4_check_fh(current_fh, &stp->st_stid); |
4267 |
+@@ -5411,6 +5411,11 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, |
4268 |
+ nfsd4_close_open_stateid(stp); |
4269 |
+ mutex_unlock(&stp->st_mutex); |
4270 |
+ |
4271 |
++ /* See RFC5661 sectionm 18.2.4 */ |
4272 |
++ if (stp->st_stid.sc_client->cl_minorversion) |
4273 |
++ memcpy(&close->cl_stateid, &close_stateid, |
4274 |
++ sizeof(close->cl_stateid)); |
4275 |
++ |
4276 |
+ /* put reference from nfs4_preprocess_seqid_op */ |
4277 |
+ nfs4_put_stid(&stp->st_stid); |
4278 |
+ out: |
4279 |
+@@ -7016,6 +7021,10 @@ static int nfs4_state_create_net(struct net *net) |
4280 |
+ INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]); |
4281 |
+ nn->conf_name_tree = RB_ROOT; |
4282 |
+ nn->unconf_name_tree = RB_ROOT; |
4283 |
++ nn->boot_time = get_seconds(); |
4284 |
++ nn->grace_ended = false; |
4285 |
++ nn->nfsd4_manager.block_opens = true; |
4286 |
++ INIT_LIST_HEAD(&nn->nfsd4_manager.list); |
4287 |
+ INIT_LIST_HEAD(&nn->client_lru); |
4288 |
+ INIT_LIST_HEAD(&nn->close_lru); |
4289 |
+ INIT_LIST_HEAD(&nn->del_recall_lru); |
4290 |
+@@ -7073,9 +7082,6 @@ nfs4_state_start_net(struct net *net) |
4291 |
+ ret = nfs4_state_create_net(net); |
4292 |
+ if (ret) |
4293 |
+ return ret; |
4294 |
+- nn->boot_time = get_seconds(); |
4295 |
+- nn->grace_ended = false; |
4296 |
+- nn->nfsd4_manager.block_opens = true; |
4297 |
+ locks_start_grace(net, &nn->nfsd4_manager); |
4298 |
+ nfsd4_client_tracking_init(net); |
4299 |
+ printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n", |
4300 |
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c |
4301 |
+index 9f78b5015f2e..4cd0c2336624 100644 |
4302 |
+--- a/fs/quota/dquot.c |
4303 |
++++ b/fs/quota/dquot.c |
4304 |
+@@ -934,12 +934,13 @@ static int dqinit_needed(struct inode *inode, int type) |
4305 |
+ } |
4306 |
+ |
4307 |
+ /* This routine is guarded by s_umount semaphore */ |
4308 |
+-static void add_dquot_ref(struct super_block *sb, int type) |
4309 |
++static int add_dquot_ref(struct super_block *sb, int type) |
4310 |
+ { |
4311 |
+ struct inode *inode, *old_inode = NULL; |
4312 |
+ #ifdef CONFIG_QUOTA_DEBUG |
4313 |
+ int reserved = 0; |
4314 |
+ #endif |
4315 |
++ int err = 0; |
4316 |
+ |
4317 |
+ spin_lock(&sb->s_inode_list_lock); |
4318 |
+ list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { |
4319 |
+@@ -959,7 +960,11 @@ static void add_dquot_ref(struct super_block *sb, int type) |
4320 |
+ reserved = 1; |
4321 |
+ #endif |
4322 |
+ iput(old_inode); |
4323 |
+- __dquot_initialize(inode, type); |
4324 |
++ err = __dquot_initialize(inode, type); |
4325 |
++ if (err) { |
4326 |
++ iput(inode); |
4327 |
++ goto out; |
4328 |
++ } |
4329 |
+ |
4330 |
+ /* |
4331 |
+ * We hold a reference to 'inode' so it couldn't have been |
4332 |
+@@ -974,7 +979,7 @@ static void add_dquot_ref(struct super_block *sb, int type) |
4333 |
+ } |
4334 |
+ spin_unlock(&sb->s_inode_list_lock); |
4335 |
+ iput(old_inode); |
4336 |
+- |
4337 |
++out: |
4338 |
+ #ifdef CONFIG_QUOTA_DEBUG |
4339 |
+ if (reserved) { |
4340 |
+ quota_error(sb, "Writes happened before quota was turned on " |
4341 |
+@@ -982,6 +987,7 @@ static void add_dquot_ref(struct super_block *sb, int type) |
4342 |
+ "Please run quotacheck(8)"); |
4343 |
+ } |
4344 |
+ #endif |
4345 |
++ return err; |
4346 |
+ } |
4347 |
+ |
4348 |
+ /* |
4349 |
+@@ -2372,10 +2378,11 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id, |
4350 |
+ dqopt->flags |= dquot_state_flag(flags, type); |
4351 |
+ spin_unlock(&dq_state_lock); |
4352 |
+ |
4353 |
+- add_dquot_ref(sb, type); |
4354 |
+- |
4355 |
+- return 0; |
4356 |
++ error = add_dquot_ref(sb, type); |
4357 |
++ if (error) |
4358 |
++ dquot_disable(sb, type, flags); |
4359 |
+ |
4360 |
++ return error; |
4361 |
+ out_file_init: |
4362 |
+ dqopt->files[type] = NULL; |
4363 |
+ iput(inode); |
4364 |
+@@ -2978,7 +2985,8 @@ static int __init dquot_init(void) |
4365 |
+ pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld," |
4366 |
+ " %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order)); |
4367 |
+ |
4368 |
+- register_shrinker(&dqcache_shrinker); |
4369 |
++ if (register_shrinker(&dqcache_shrinker)) |
4370 |
++ panic("Cannot register dquot shrinker"); |
4371 |
+ |
4372 |
+ return 0; |
4373 |
+ } |
4374 |
+diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c |
4375 |
+index 5464ec517702..4885c7b6e44f 100644 |
4376 |
+--- a/fs/reiserfs/super.c |
4377 |
++++ b/fs/reiserfs/super.c |
4378 |
+@@ -2591,7 +2591,6 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type, |
4379 |
+ return err; |
4380 |
+ if (inode->i_size < off + len - towrite) |
4381 |
+ i_size_write(inode, off + len - towrite); |
4382 |
+- inode->i_version++; |
4383 |
+ inode->i_mtime = inode->i_ctime = current_time(inode); |
4384 |
+ mark_inode_dirty(inode); |
4385 |
+ return len - towrite; |
4386 |
+diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c |
4387 |
+index a3eeaba156c5..b0cccf8a81a8 100644 |
4388 |
+--- a/fs/xfs/xfs_aops.c |
4389 |
++++ b/fs/xfs/xfs_aops.c |
4390 |
+@@ -399,7 +399,7 @@ xfs_map_blocks( |
4391 |
+ (ip->i_df.if_flags & XFS_IFEXTENTS)); |
4392 |
+ ASSERT(offset <= mp->m_super->s_maxbytes); |
4393 |
+ |
4394 |
+- if (offset + count > mp->m_super->s_maxbytes) |
4395 |
++ if ((xfs_ufsize_t)offset + count > mp->m_super->s_maxbytes) |
4396 |
+ count = mp->m_super->s_maxbytes - offset; |
4397 |
+ end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); |
4398 |
+ offset_fsb = XFS_B_TO_FSBT(mp, offset); |
4399 |
+@@ -1265,7 +1265,7 @@ xfs_map_trim_size( |
4400 |
+ if (mapping_size > size) |
4401 |
+ mapping_size = size; |
4402 |
+ if (offset < i_size_read(inode) && |
4403 |
+- offset + mapping_size >= i_size_read(inode)) { |
4404 |
++ (xfs_ufsize_t)offset + mapping_size >= i_size_read(inode)) { |
4405 |
+ /* limit mapping to block that spans EOF */ |
4406 |
+ mapping_size = roundup_64(i_size_read(inode) - offset, |
4407 |
+ i_blocksize(inode)); |
4408 |
+@@ -1312,7 +1312,7 @@ xfs_get_blocks( |
4409 |
+ lockmode = xfs_ilock_data_map_shared(ip); |
4410 |
+ |
4411 |
+ ASSERT(offset <= mp->m_super->s_maxbytes); |
4412 |
+- if (offset + size > mp->m_super->s_maxbytes) |
4413 |
++ if ((xfs_ufsize_t)offset + size > mp->m_super->s_maxbytes) |
4414 |
+ size = mp->m_super->s_maxbytes - offset; |
4415 |
+ end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size); |
4416 |
+ offset_fsb = XFS_B_TO_FSBT(mp, offset); |
4417 |
+diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c |
4418 |
+index dd136f7275e4..e5fb008d75e8 100644 |
4419 |
+--- a/fs/xfs/xfs_bmap_item.c |
4420 |
++++ b/fs/xfs/xfs_bmap_item.c |
4421 |
+@@ -389,7 +389,8 @@ xfs_bud_init( |
4422 |
+ int |
4423 |
+ xfs_bui_recover( |
4424 |
+ struct xfs_mount *mp, |
4425 |
+- struct xfs_bui_log_item *buip) |
4426 |
++ struct xfs_bui_log_item *buip, |
4427 |
++ struct xfs_defer_ops *dfops) |
4428 |
+ { |
4429 |
+ int error = 0; |
4430 |
+ unsigned int bui_type; |
4431 |
+@@ -404,9 +405,7 @@ xfs_bui_recover( |
4432 |
+ xfs_exntst_t state; |
4433 |
+ struct xfs_trans *tp; |
4434 |
+ struct xfs_inode *ip = NULL; |
4435 |
+- struct xfs_defer_ops dfops; |
4436 |
+ struct xfs_bmbt_irec irec; |
4437 |
+- xfs_fsblock_t firstfsb; |
4438 |
+ |
4439 |
+ ASSERT(!test_bit(XFS_BUI_RECOVERED, &buip->bui_flags)); |
4440 |
+ |
4441 |
+@@ -464,7 +463,6 @@ xfs_bui_recover( |
4442 |
+ |
4443 |
+ if (VFS_I(ip)->i_nlink == 0) |
4444 |
+ xfs_iflags_set(ip, XFS_IRECOVERY); |
4445 |
+- xfs_defer_init(&dfops, &firstfsb); |
4446 |
+ |
4447 |
+ /* Process deferred bmap item. */ |
4448 |
+ state = (bmap->me_flags & XFS_BMAP_EXTENT_UNWRITTEN) ? |
4449 |
+@@ -479,16 +477,16 @@ xfs_bui_recover( |
4450 |
+ break; |
4451 |
+ default: |
4452 |
+ error = -EFSCORRUPTED; |
4453 |
+- goto err_dfops; |
4454 |
++ goto err_inode; |
4455 |
+ } |
4456 |
+ xfs_trans_ijoin(tp, ip, 0); |
4457 |
+ |
4458 |
+ count = bmap->me_len; |
4459 |
+- error = xfs_trans_log_finish_bmap_update(tp, budp, &dfops, type, |
4460 |
++ error = xfs_trans_log_finish_bmap_update(tp, budp, dfops, type, |
4461 |
+ ip, whichfork, bmap->me_startoff, |
4462 |
+ bmap->me_startblock, &count, state); |
4463 |
+ if (error) |
4464 |
+- goto err_dfops; |
4465 |
++ goto err_inode; |
4466 |
+ |
4467 |
+ if (count > 0) { |
4468 |
+ ASSERT(type == XFS_BMAP_UNMAP); |
4469 |
+@@ -496,16 +494,11 @@ xfs_bui_recover( |
4470 |
+ irec.br_blockcount = count; |
4471 |
+ irec.br_startoff = bmap->me_startoff; |
4472 |
+ irec.br_state = state; |
4473 |
+- error = xfs_bmap_unmap_extent(tp->t_mountp, &dfops, ip, &irec); |
4474 |
++ error = xfs_bmap_unmap_extent(tp->t_mountp, dfops, ip, &irec); |
4475 |
+ if (error) |
4476 |
+- goto err_dfops; |
4477 |
++ goto err_inode; |
4478 |
+ } |
4479 |
+ |
4480 |
+- /* Finish transaction, free inodes. */ |
4481 |
+- error = xfs_defer_finish(&tp, &dfops); |
4482 |
+- if (error) |
4483 |
+- goto err_dfops; |
4484 |
+- |
4485 |
+ set_bit(XFS_BUI_RECOVERED, &buip->bui_flags); |
4486 |
+ error = xfs_trans_commit(tp); |
4487 |
+ xfs_iunlock(ip, XFS_ILOCK_EXCL); |
4488 |
+@@ -513,8 +506,6 @@ xfs_bui_recover( |
4489 |
+ |
4490 |
+ return error; |
4491 |
+ |
4492 |
+-err_dfops: |
4493 |
+- xfs_defer_cancel(&dfops); |
4494 |
+ err_inode: |
4495 |
+ xfs_trans_cancel(tp); |
4496 |
+ if (ip) { |
4497 |
+diff --git a/fs/xfs/xfs_bmap_item.h b/fs/xfs/xfs_bmap_item.h |
4498 |
+index c867daae4a3c..24b354a2c836 100644 |
4499 |
+--- a/fs/xfs/xfs_bmap_item.h |
4500 |
++++ b/fs/xfs/xfs_bmap_item.h |
4501 |
+@@ -93,6 +93,7 @@ struct xfs_bud_log_item *xfs_bud_init(struct xfs_mount *, |
4502 |
+ struct xfs_bui_log_item *); |
4503 |
+ void xfs_bui_item_free(struct xfs_bui_log_item *); |
4504 |
+ void xfs_bui_release(struct xfs_bui_log_item *); |
4505 |
+-int xfs_bui_recover(struct xfs_mount *mp, struct xfs_bui_log_item *buip); |
4506 |
++int xfs_bui_recover(struct xfs_mount *mp, struct xfs_bui_log_item *buip, |
4507 |
++ struct xfs_defer_ops *dfops); |
4508 |
+ |
4509 |
+ #endif /* __XFS_BMAP_ITEM_H__ */ |
4510 |
+diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c |
4511 |
+index 2f97c12ca75e..16f93d7356b7 100644 |
4512 |
+--- a/fs/xfs/xfs_buf.c |
4513 |
++++ b/fs/xfs/xfs_buf.c |
4514 |
+@@ -1813,22 +1813,27 @@ xfs_alloc_buftarg( |
4515 |
+ btp->bt_daxdev = dax_dev; |
4516 |
+ |
4517 |
+ if (xfs_setsize_buftarg_early(btp, bdev)) |
4518 |
+- goto error; |
4519 |
++ goto error_free; |
4520 |
+ |
4521 |
+ if (list_lru_init(&btp->bt_lru)) |
4522 |
+- goto error; |
4523 |
++ goto error_free; |
4524 |
+ |
4525 |
+ if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL)) |
4526 |
+- goto error; |
4527 |
++ goto error_lru; |
4528 |
+ |
4529 |
+ btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count; |
4530 |
+ btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan; |
4531 |
+ btp->bt_shrinker.seeks = DEFAULT_SEEKS; |
4532 |
+ btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE; |
4533 |
+- register_shrinker(&btp->bt_shrinker); |
4534 |
++ if (register_shrinker(&btp->bt_shrinker)) |
4535 |
++ goto error_pcpu; |
4536 |
+ return btp; |
4537 |
+ |
4538 |
+-error: |
4539 |
++error_pcpu: |
4540 |
++ percpu_counter_destroy(&btp->bt_io_count); |
4541 |
++error_lru: |
4542 |
++ list_lru_destroy(&btp->bt_lru); |
4543 |
++error_free: |
4544 |
+ kmem_free(btp); |
4545 |
+ return NULL; |
4546 |
+ } |
4547 |
+diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c |
4548 |
+index cd82429d8df7..5a86495127fd 100644 |
4549 |
+--- a/fs/xfs/xfs_dquot.c |
4550 |
++++ b/fs/xfs/xfs_dquot.c |
4551 |
+@@ -987,14 +987,22 @@ xfs_qm_dqflush_done( |
4552 |
+ * holding the lock before removing the dquot from the AIL. |
4553 |
+ */ |
4554 |
+ if ((lip->li_flags & XFS_LI_IN_AIL) && |
4555 |
+- lip->li_lsn == qip->qli_flush_lsn) { |
4556 |
++ ((lip->li_lsn == qip->qli_flush_lsn) || |
4557 |
++ (lip->li_flags & XFS_LI_FAILED))) { |
4558 |
+ |
4559 |
+ /* xfs_trans_ail_delete() drops the AIL lock. */ |
4560 |
+ spin_lock(&ailp->xa_lock); |
4561 |
+- if (lip->li_lsn == qip->qli_flush_lsn) |
4562 |
++ if (lip->li_lsn == qip->qli_flush_lsn) { |
4563 |
+ xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE); |
4564 |
+- else |
4565 |
++ } else { |
4566 |
++ /* |
4567 |
++ * Clear the failed state since we are about to drop the |
4568 |
++ * flush lock |
4569 |
++ */ |
4570 |
++ if (lip->li_flags & XFS_LI_FAILED) |
4571 |
++ xfs_clear_li_failed(lip); |
4572 |
+ spin_unlock(&ailp->xa_lock); |
4573 |
++ } |
4574 |
+ } |
4575 |
+ |
4576 |
+ /* |
4577 |
+diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c |
4578 |
+index 2c7a1629e064..664dea105e76 100644 |
4579 |
+--- a/fs/xfs/xfs_dquot_item.c |
4580 |
++++ b/fs/xfs/xfs_dquot_item.c |
4581 |
+@@ -137,6 +137,26 @@ xfs_qm_dqunpin_wait( |
4582 |
+ wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0)); |
4583 |
+ } |
4584 |
+ |
4585 |
++/* |
4586 |
++ * Callback used to mark a buffer with XFS_LI_FAILED when items in the buffer |
4587 |
++ * have been failed during writeback |
4588 |
++ * |
4589 |
++ * this informs the AIL that the dquot is already flush locked on the next push, |
4590 |
++ * and acquires a hold on the buffer to ensure that it isn't reclaimed before |
4591 |
++ * dirty data makes it to disk. |
4592 |
++ */ |
4593 |
++STATIC void |
4594 |
++xfs_dquot_item_error( |
4595 |
++ struct xfs_log_item *lip, |
4596 |
++ struct xfs_buf *bp) |
4597 |
++{ |
4598 |
++ struct xfs_dquot *dqp; |
4599 |
++ |
4600 |
++ dqp = DQUOT_ITEM(lip)->qli_dquot; |
4601 |
++ ASSERT(!completion_done(&dqp->q_flush)); |
4602 |
++ xfs_set_li_failed(lip, bp); |
4603 |
++} |
4604 |
++ |
4605 |
+ STATIC uint |
4606 |
+ xfs_qm_dquot_logitem_push( |
4607 |
+ struct xfs_log_item *lip, |
4608 |
+@@ -144,13 +164,28 @@ xfs_qm_dquot_logitem_push( |
4609 |
+ __acquires(&lip->li_ailp->xa_lock) |
4610 |
+ { |
4611 |
+ struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot; |
4612 |
+- struct xfs_buf *bp = NULL; |
4613 |
++ struct xfs_buf *bp = lip->li_buf; |
4614 |
+ uint rval = XFS_ITEM_SUCCESS; |
4615 |
+ int error; |
4616 |
+ |
4617 |
+ if (atomic_read(&dqp->q_pincount) > 0) |
4618 |
+ return XFS_ITEM_PINNED; |
4619 |
+ |
4620 |
++ /* |
4621 |
++ * The buffer containing this item failed to be written back |
4622 |
++ * previously. Resubmit the buffer for IO |
4623 |
++ */ |
4624 |
++ if (lip->li_flags & XFS_LI_FAILED) { |
4625 |
++ if (!xfs_buf_trylock(bp)) |
4626 |
++ return XFS_ITEM_LOCKED; |
4627 |
++ |
4628 |
++ if (!xfs_buf_resubmit_failed_buffers(bp, lip, buffer_list)) |
4629 |
++ rval = XFS_ITEM_FLUSHING; |
4630 |
++ |
4631 |
++ xfs_buf_unlock(bp); |
4632 |
++ return rval; |
4633 |
++ } |
4634 |
++ |
4635 |
+ if (!xfs_dqlock_nowait(dqp)) |
4636 |
+ return XFS_ITEM_LOCKED; |
4637 |
+ |
4638 |
+@@ -242,7 +277,8 @@ static const struct xfs_item_ops xfs_dquot_item_ops = { |
4639 |
+ .iop_unlock = xfs_qm_dquot_logitem_unlock, |
4640 |
+ .iop_committed = xfs_qm_dquot_logitem_committed, |
4641 |
+ .iop_push = xfs_qm_dquot_logitem_push, |
4642 |
+- .iop_committing = xfs_qm_dquot_logitem_committing |
4643 |
++ .iop_committing = xfs_qm_dquot_logitem_committing, |
4644 |
++ .iop_error = xfs_dquot_item_error |
4645 |
+ }; |
4646 |
+ |
4647 |
+ /* |
4648 |
+diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c |
4649 |
+index 63350906961a..cb4833d06467 100644 |
4650 |
+--- a/fs/xfs/xfs_inode.c |
4651 |
++++ b/fs/xfs/xfs_inode.c |
4652 |
+@@ -2421,6 +2421,24 @@ xfs_ifree_cluster( |
4653 |
+ return 0; |
4654 |
+ } |
4655 |
+ |
4656 |
++/* |
4657 |
++ * Free any local-format buffers sitting around before we reset to |
4658 |
++ * extents format. |
4659 |
++ */ |
4660 |
++static inline void |
4661 |
++xfs_ifree_local_data( |
4662 |
++ struct xfs_inode *ip, |
4663 |
++ int whichfork) |
4664 |
++{ |
4665 |
++ struct xfs_ifork *ifp; |
4666 |
++ |
4667 |
++ if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL) |
4668 |
++ return; |
4669 |
++ |
4670 |
++ ifp = XFS_IFORK_PTR(ip, whichfork); |
4671 |
++ xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); |
4672 |
++} |
4673 |
++ |
4674 |
+ /* |
4675 |
+ * This is called to return an inode to the inode free list. |
4676 |
+ * The inode should already be truncated to 0 length and have |
4677 |
+@@ -2458,6 +2476,9 @@ xfs_ifree( |
4678 |
+ if (error) |
4679 |
+ return error; |
4680 |
+ |
4681 |
++ xfs_ifree_local_data(ip, XFS_DATA_FORK); |
4682 |
++ xfs_ifree_local_data(ip, XFS_ATTR_FORK); |
4683 |
++ |
4684 |
+ VFS_I(ip)->i_mode = 0; /* mark incore inode as free */ |
4685 |
+ ip->i_d.di_flags = 0; |
4686 |
+ ip->i_d.di_dmevmask = 0; |
4687 |
+diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c |
4688 |
+index d6e049fdd977..eaf29646c28f 100644 |
4689 |
+--- a/fs/xfs/xfs_log_recover.c |
4690 |
++++ b/fs/xfs/xfs_log_recover.c |
4691 |
+@@ -24,6 +24,7 @@ |
4692 |
+ #include "xfs_bit.h" |
4693 |
+ #include "xfs_sb.h" |
4694 |
+ #include "xfs_mount.h" |
4695 |
++#include "xfs_defer.h" |
4696 |
+ #include "xfs_da_format.h" |
4697 |
+ #include "xfs_da_btree.h" |
4698 |
+ #include "xfs_inode.h" |
4699 |
+@@ -4714,7 +4715,8 @@ STATIC int |
4700 |
+ xlog_recover_process_cui( |
4701 |
+ struct xfs_mount *mp, |
4702 |
+ struct xfs_ail *ailp, |
4703 |
+- struct xfs_log_item *lip) |
4704 |
++ struct xfs_log_item *lip, |
4705 |
++ struct xfs_defer_ops *dfops) |
4706 |
+ { |
4707 |
+ struct xfs_cui_log_item *cuip; |
4708 |
+ int error; |
4709 |
+@@ -4727,7 +4729,7 @@ xlog_recover_process_cui( |
4710 |
+ return 0; |
4711 |
+ |
4712 |
+ spin_unlock(&ailp->xa_lock); |
4713 |
+- error = xfs_cui_recover(mp, cuip); |
4714 |
++ error = xfs_cui_recover(mp, cuip, dfops); |
4715 |
+ spin_lock(&ailp->xa_lock); |
4716 |
+ |
4717 |
+ return error; |
4718 |
+@@ -4754,7 +4756,8 @@ STATIC int |
4719 |
+ xlog_recover_process_bui( |
4720 |
+ struct xfs_mount *mp, |
4721 |
+ struct xfs_ail *ailp, |
4722 |
+- struct xfs_log_item *lip) |
4723 |
++ struct xfs_log_item *lip, |
4724 |
++ struct xfs_defer_ops *dfops) |
4725 |
+ { |
4726 |
+ struct xfs_bui_log_item *buip; |
4727 |
+ int error; |
4728 |
+@@ -4767,7 +4770,7 @@ xlog_recover_process_bui( |
4729 |
+ return 0; |
4730 |
+ |
4731 |
+ spin_unlock(&ailp->xa_lock); |
4732 |
+- error = xfs_bui_recover(mp, buip); |
4733 |
++ error = xfs_bui_recover(mp, buip, dfops); |
4734 |
+ spin_lock(&ailp->xa_lock); |
4735 |
+ |
4736 |
+ return error; |
4737 |
+@@ -4803,6 +4806,46 @@ static inline bool xlog_item_is_intent(struct xfs_log_item *lip) |
4738 |
+ } |
4739 |
+ } |
4740 |
+ |
4741 |
++/* Take all the collected deferred ops and finish them in order. */ |
4742 |
++static int |
4743 |
++xlog_finish_defer_ops( |
4744 |
++ struct xfs_mount *mp, |
4745 |
++ struct xfs_defer_ops *dfops) |
4746 |
++{ |
4747 |
++ struct xfs_trans *tp; |
4748 |
++ int64_t freeblks; |
4749 |
++ uint resblks; |
4750 |
++ int error; |
4751 |
++ |
4752 |
++ /* |
4753 |
++ * We're finishing the defer_ops that accumulated as a result of |
4754 |
++ * recovering unfinished intent items during log recovery. We |
4755 |
++ * reserve an itruncate transaction because it is the largest |
4756 |
++ * permanent transaction type. Since we're the only user of the fs |
4757 |
++ * right now, take 93% (15/16) of the available free blocks. Use |
4758 |
++ * weird math to avoid a 64-bit division. |
4759 |
++ */ |
4760 |
++ freeblks = percpu_counter_sum(&mp->m_fdblocks); |
4761 |
++ if (freeblks <= 0) |
4762 |
++ return -ENOSPC; |
4763 |
++ resblks = min_t(int64_t, UINT_MAX, freeblks); |
4764 |
++ resblks = (resblks * 15) >> 4; |
4765 |
++ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, resblks, |
4766 |
++ 0, XFS_TRANS_RESERVE, &tp); |
4767 |
++ if (error) |
4768 |
++ return error; |
4769 |
++ |
4770 |
++ error = xfs_defer_finish(&tp, dfops); |
4771 |
++ if (error) |
4772 |
++ goto out_cancel; |
4773 |
++ |
4774 |
++ return xfs_trans_commit(tp); |
4775 |
++ |
4776 |
++out_cancel: |
4777 |
++ xfs_trans_cancel(tp); |
4778 |
++ return error; |
4779 |
++} |
4780 |
++ |
4781 |
+ /* |
4782 |
+ * When this is called, all of the log intent items which did not have |
4783 |
+ * corresponding log done items should be in the AIL. What we do now |
4784 |
+@@ -4823,10 +4866,12 @@ STATIC int |
4785 |
+ xlog_recover_process_intents( |
4786 |
+ struct xlog *log) |
4787 |
+ { |
4788 |
+- struct xfs_log_item *lip; |
4789 |
+- int error = 0; |
4790 |
++ struct xfs_defer_ops dfops; |
4791 |
+ struct xfs_ail_cursor cur; |
4792 |
++ struct xfs_log_item *lip; |
4793 |
+ struct xfs_ail *ailp; |
4794 |
++ xfs_fsblock_t firstfsb; |
4795 |
++ int error = 0; |
4796 |
+ #if defined(DEBUG) || defined(XFS_WARN) |
4797 |
+ xfs_lsn_t last_lsn; |
4798 |
+ #endif |
4799 |
+@@ -4837,6 +4882,7 @@ xlog_recover_process_intents( |
4800 |
+ #if defined(DEBUG) || defined(XFS_WARN) |
4801 |
+ last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block); |
4802 |
+ #endif |
4803 |
++ xfs_defer_init(&dfops, &firstfsb); |
4804 |
+ while (lip != NULL) { |
4805 |
+ /* |
4806 |
+ * We're done when we see something other than an intent. |
4807 |
+@@ -4857,6 +4903,12 @@ xlog_recover_process_intents( |
4808 |
+ */ |
4809 |
+ ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0); |
4810 |
+ |
4811 |
++ /* |
4812 |
++ * NOTE: If your intent processing routine can create more |
4813 |
++ * deferred ops, you /must/ attach them to the dfops in this |
4814 |
++ * routine or else those subsequent intents will get |
4815 |
++ * replayed in the wrong order! |
4816 |
++ */ |
4817 |
+ switch (lip->li_type) { |
4818 |
+ case XFS_LI_EFI: |
4819 |
+ error = xlog_recover_process_efi(log->l_mp, ailp, lip); |
4820 |
+@@ -4865,10 +4917,12 @@ xlog_recover_process_intents( |
4821 |
+ error = xlog_recover_process_rui(log->l_mp, ailp, lip); |
4822 |
+ break; |
4823 |
+ case XFS_LI_CUI: |
4824 |
+- error = xlog_recover_process_cui(log->l_mp, ailp, lip); |
4825 |
++ error = xlog_recover_process_cui(log->l_mp, ailp, lip, |
4826 |
++ &dfops); |
4827 |
+ break; |
4828 |
+ case XFS_LI_BUI: |
4829 |
+- error = xlog_recover_process_bui(log->l_mp, ailp, lip); |
4830 |
++ error = xlog_recover_process_bui(log->l_mp, ailp, lip, |
4831 |
++ &dfops); |
4832 |
+ break; |
4833 |
+ } |
4834 |
+ if (error) |
4835 |
+@@ -4878,6 +4932,11 @@ xlog_recover_process_intents( |
4836 |
+ out: |
4837 |
+ xfs_trans_ail_cursor_done(&cur); |
4838 |
+ spin_unlock(&ailp->xa_lock); |
4839 |
++ if (error) |
4840 |
++ xfs_defer_cancel(&dfops); |
4841 |
++ else |
4842 |
++ error = xlog_finish_defer_ops(log->l_mp, &dfops); |
4843 |
++ |
4844 |
+ return error; |
4845 |
+ } |
4846 |
+ |
4847 |
+diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c |
4848 |
+index 8f2e2fac4255..3a55d6fc271b 100644 |
4849 |
+--- a/fs/xfs/xfs_refcount_item.c |
4850 |
++++ b/fs/xfs/xfs_refcount_item.c |
4851 |
+@@ -393,7 +393,8 @@ xfs_cud_init( |
4852 |
+ int |
4853 |
+ xfs_cui_recover( |
4854 |
+ struct xfs_mount *mp, |
4855 |
+- struct xfs_cui_log_item *cuip) |
4856 |
++ struct xfs_cui_log_item *cuip, |
4857 |
++ struct xfs_defer_ops *dfops) |
4858 |
+ { |
4859 |
+ int i; |
4860 |
+ int error = 0; |
4861 |
+@@ -405,11 +406,9 @@ xfs_cui_recover( |
4862 |
+ struct xfs_trans *tp; |
4863 |
+ struct xfs_btree_cur *rcur = NULL; |
4864 |
+ enum xfs_refcount_intent_type type; |
4865 |
+- xfs_fsblock_t firstfsb; |
4866 |
+ xfs_fsblock_t new_fsb; |
4867 |
+ xfs_extlen_t new_len; |
4868 |
+ struct xfs_bmbt_irec irec; |
4869 |
+- struct xfs_defer_ops dfops; |
4870 |
+ bool requeue_only = false; |
4871 |
+ |
4872 |
+ ASSERT(!test_bit(XFS_CUI_RECOVERED, &cuip->cui_flags)); |
4873 |
+@@ -465,7 +464,6 @@ xfs_cui_recover( |
4874 |
+ return error; |
4875 |
+ cudp = xfs_trans_get_cud(tp, cuip); |
4876 |
+ |
4877 |
+- xfs_defer_init(&dfops, &firstfsb); |
4878 |
+ for (i = 0; i < cuip->cui_format.cui_nextents; i++) { |
4879 |
+ refc = &cuip->cui_format.cui_extents[i]; |
4880 |
+ refc_type = refc->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK; |
4881 |
+@@ -485,7 +483,7 @@ xfs_cui_recover( |
4882 |
+ new_len = refc->pe_len; |
4883 |
+ } else |
4884 |
+ error = xfs_trans_log_finish_refcount_update(tp, cudp, |
4885 |
+- &dfops, type, refc->pe_startblock, refc->pe_len, |
4886 |
++ dfops, type, refc->pe_startblock, refc->pe_len, |
4887 |
+ &new_fsb, &new_len, &rcur); |
4888 |
+ if (error) |
4889 |
+ goto abort_error; |
4890 |
+@@ -497,21 +495,21 @@ xfs_cui_recover( |
4891 |
+ switch (type) { |
4892 |
+ case XFS_REFCOUNT_INCREASE: |
4893 |
+ error = xfs_refcount_increase_extent( |
4894 |
+- tp->t_mountp, &dfops, &irec); |
4895 |
++ tp->t_mountp, dfops, &irec); |
4896 |
+ break; |
4897 |
+ case XFS_REFCOUNT_DECREASE: |
4898 |
+ error = xfs_refcount_decrease_extent( |
4899 |
+- tp->t_mountp, &dfops, &irec); |
4900 |
++ tp->t_mountp, dfops, &irec); |
4901 |
+ break; |
4902 |
+ case XFS_REFCOUNT_ALLOC_COW: |
4903 |
+ error = xfs_refcount_alloc_cow_extent( |
4904 |
+- tp->t_mountp, &dfops, |
4905 |
++ tp->t_mountp, dfops, |
4906 |
+ irec.br_startblock, |
4907 |
+ irec.br_blockcount); |
4908 |
+ break; |
4909 |
+ case XFS_REFCOUNT_FREE_COW: |
4910 |
+ error = xfs_refcount_free_cow_extent( |
4911 |
+- tp->t_mountp, &dfops, |
4912 |
++ tp->t_mountp, dfops, |
4913 |
+ irec.br_startblock, |
4914 |
+ irec.br_blockcount); |
4915 |
+ break; |
4916 |
+@@ -525,17 +523,12 @@ xfs_cui_recover( |
4917 |
+ } |
4918 |
+ |
4919 |
+ xfs_refcount_finish_one_cleanup(tp, rcur, error); |
4920 |
+- error = xfs_defer_finish(&tp, &dfops); |
4921 |
+- if (error) |
4922 |
+- goto abort_defer; |
4923 |
+ set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags); |
4924 |
+ error = xfs_trans_commit(tp); |
4925 |
+ return error; |
4926 |
+ |
4927 |
+ abort_error: |
4928 |
+ xfs_refcount_finish_one_cleanup(tp, rcur, error); |
4929 |
+-abort_defer: |
4930 |
+- xfs_defer_cancel(&dfops); |
4931 |
+ xfs_trans_cancel(tp); |
4932 |
+ return error; |
4933 |
+ } |
4934 |
+diff --git a/fs/xfs/xfs_refcount_item.h b/fs/xfs/xfs_refcount_item.h |
4935 |
+index 5b74dddfa64b..0e5327349a13 100644 |
4936 |
+--- a/fs/xfs/xfs_refcount_item.h |
4937 |
++++ b/fs/xfs/xfs_refcount_item.h |
4938 |
+@@ -96,6 +96,7 @@ struct xfs_cud_log_item *xfs_cud_init(struct xfs_mount *, |
4939 |
+ struct xfs_cui_log_item *); |
4940 |
+ void xfs_cui_item_free(struct xfs_cui_log_item *); |
4941 |
+ void xfs_cui_release(struct xfs_cui_log_item *); |
4942 |
+-int xfs_cui_recover(struct xfs_mount *mp, struct xfs_cui_log_item *cuip); |
4943 |
++int xfs_cui_recover(struct xfs_mount *mp, struct xfs_cui_log_item *cuip, |
4944 |
++ struct xfs_defer_ops *dfops); |
4945 |
+ |
4946 |
+ #endif /* __XFS_REFCOUNT_ITEM_H__ */ |
4947 |
+diff --git a/include/crypto/gcm.h b/include/crypto/gcm.h |
4948 |
+new file mode 100644 |
4949 |
+index 000000000000..c50e057ea17e |
4950 |
+--- /dev/null |
4951 |
++++ b/include/crypto/gcm.h |
4952 |
+@@ -0,0 +1,8 @@ |
4953 |
++#ifndef _CRYPTO_GCM_H |
4954 |
++#define _CRYPTO_GCM_H |
4955 |
++ |
4956 |
++#define GCM_AES_IV_SIZE 12 |
4957 |
++#define GCM_RFC4106_IV_SIZE 8 |
4958 |
++#define GCM_RFC4543_IV_SIZE 8 |
4959 |
++ |
4960 |
++#endif |
4961 |
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h |
4962 |
+index 6882538eda32..5a8019befafd 100644 |
4963 |
+--- a/include/linux/kvm_host.h |
4964 |
++++ b/include/linux/kvm_host.h |
4965 |
+@@ -714,6 +714,9 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, |
4966 |
+ unsigned long len); |
4967 |
+ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); |
4968 |
+ |
4969 |
++void kvm_sigset_activate(struct kvm_vcpu *vcpu); |
4970 |
++void kvm_sigset_deactivate(struct kvm_vcpu *vcpu); |
4971 |
++ |
4972 |
+ void kvm_vcpu_block(struct kvm_vcpu *vcpu); |
4973 |
+ void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); |
4974 |
+ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); |
4975 |
+diff --git a/include/linux/tty.h b/include/linux/tty.h |
4976 |
+index 7ac8ba208b1f..0a6c71e0ad01 100644 |
4977 |
+--- a/include/linux/tty.h |
4978 |
++++ b/include/linux/tty.h |
4979 |
+@@ -405,6 +405,8 @@ extern const char *tty_name(const struct tty_struct *tty); |
4980 |
+ extern struct tty_struct *tty_kopen(dev_t device); |
4981 |
+ extern void tty_kclose(struct tty_struct *tty); |
4982 |
+ extern int tty_dev_name_to_number(const char *name, dev_t *number); |
4983 |
++extern int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout); |
4984 |
++extern void tty_ldisc_unlock(struct tty_struct *tty); |
4985 |
+ #else |
4986 |
+ static inline void tty_kref_put(struct tty_struct *tty) |
4987 |
+ { } |
4988 |
+diff --git a/include/net/mac80211.h b/include/net/mac80211.h |
4989 |
+index 885690fa39c8..4f1d2dec43ce 100644 |
4990 |
+--- a/include/net/mac80211.h |
4991 |
++++ b/include/net/mac80211.h |
4992 |
+@@ -4470,18 +4470,24 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw, |
4993 |
+ * ieee80211_nullfunc_get - retrieve a nullfunc template |
4994 |
+ * @hw: pointer obtained from ieee80211_alloc_hw(). |
4995 |
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback. |
4996 |
++ * @qos_ok: QoS NDP is acceptable to the caller, this should be set |
4997 |
++ * if at all possible |
4998 |
+ * |
4999 |
+ * Creates a Nullfunc template which can, for example, uploaded to |
5000 |
+ * hardware. The template must be updated after association so that correct |
5001 |
+ * BSSID and address is used. |
5002 |
+ * |
5003 |
++ * If @qos_ndp is set and the association is to an AP with QoS/WMM, the |
5004 |
++ * returned packet will be QoS NDP. |
5005 |
++ * |
5006 |
+ * Note: Caller (or hardware) is responsible for setting the |
5007 |
+ * &IEEE80211_FCTL_PM bit as well as Duration and Sequence Control fields. |
5008 |
+ * |
5009 |
+ * Return: The nullfunc template. %NULL on error. |
5010 |
+ */ |
5011 |
+ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw, |
5012 |
+- struct ieee80211_vif *vif); |
5013 |
++ struct ieee80211_vif *vif, |
5014 |
++ bool qos_ok); |
5015 |
+ |
5016 |
+ /** |
5017 |
+ * ieee80211_probereq_get - retrieve a Probe Request template |
5018 |
+diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h |
5019 |
+index ebe96796027a..a58caf5807ff 100644 |
5020 |
+--- a/include/trace/events/rxrpc.h |
5021 |
++++ b/include/trace/events/rxrpc.h |
5022 |
+@@ -49,6 +49,7 @@ enum rxrpc_conn_trace { |
5023 |
+ rxrpc_conn_put_client, |
5024 |
+ rxrpc_conn_put_service, |
5025 |
+ rxrpc_conn_queued, |
5026 |
++ rxrpc_conn_reap_service, |
5027 |
+ rxrpc_conn_seen, |
5028 |
+ }; |
5029 |
+ |
5030 |
+@@ -206,6 +207,7 @@ enum rxrpc_congest_change { |
5031 |
+ EM(rxrpc_conn_put_client, "PTc") \ |
5032 |
+ EM(rxrpc_conn_put_service, "PTs") \ |
5033 |
+ EM(rxrpc_conn_queued, "QUE") \ |
5034 |
++ EM(rxrpc_conn_reap_service, "RPs") \ |
5035 |
+ E_(rxrpc_conn_seen, "SEE") |
5036 |
+ |
5037 |
+ #define rxrpc_client_traces \ |
5038 |
+diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h |
5039 |
+index 26283fefdf5f..f7015aa12347 100644 |
5040 |
+--- a/include/uapi/linux/kfd_ioctl.h |
5041 |
++++ b/include/uapi/linux/kfd_ioctl.h |
5042 |
+@@ -233,29 +233,29 @@ struct kfd_ioctl_wait_events_args { |
5043 |
+ }; |
5044 |
+ |
5045 |
+ struct kfd_ioctl_set_scratch_backing_va_args { |
5046 |
+- uint64_t va_addr; /* to KFD */ |
5047 |
+- uint32_t gpu_id; /* to KFD */ |
5048 |
+- uint32_t pad; |
5049 |
++ __u64 va_addr; /* to KFD */ |
5050 |
++ __u32 gpu_id; /* to KFD */ |
5051 |
++ __u32 pad; |
5052 |
+ }; |
5053 |
+ |
5054 |
+ struct kfd_ioctl_get_tile_config_args { |
5055 |
+ /* to KFD: pointer to tile array */ |
5056 |
+- uint64_t tile_config_ptr; |
5057 |
++ __u64 tile_config_ptr; |
5058 |
+ /* to KFD: pointer to macro tile array */ |
5059 |
+- uint64_t macro_tile_config_ptr; |
5060 |
++ __u64 macro_tile_config_ptr; |
5061 |
+ /* to KFD: array size allocated by user mode |
5062 |
+ * from KFD: array size filled by kernel |
5063 |
+ */ |
5064 |
+- uint32_t num_tile_configs; |
5065 |
++ __u32 num_tile_configs; |
5066 |
+ /* to KFD: array size allocated by user mode |
5067 |
+ * from KFD: array size filled by kernel |
5068 |
+ */ |
5069 |
+- uint32_t num_macro_tile_configs; |
5070 |
++ __u32 num_macro_tile_configs; |
5071 |
+ |
5072 |
+- uint32_t gpu_id; /* to KFD */ |
5073 |
+- uint32_t gb_addr_config; /* from KFD */ |
5074 |
+- uint32_t num_banks; /* from KFD */ |
5075 |
+- uint32_t num_ranks; /* from KFD */ |
5076 |
++ __u32 gpu_id; /* to KFD */ |
5077 |
++ __u32 gb_addr_config; /* from KFD */ |
5078 |
++ __u32 num_banks; /* from KFD */ |
5079 |
++ __u32 num_ranks; /* from KFD */ |
5080 |
+ /* struct size can be extended later if needed |
5081 |
+ * without breaking ABI compatibility |
5082 |
+ */ |
5083 |
+diff --git a/kernel/events/core.c b/kernel/events/core.c |
5084 |
+index 24ebad5567b4..8c20af8738ac 100644 |
5085 |
+--- a/kernel/events/core.c |
5086 |
++++ b/kernel/events/core.c |
5087 |
+@@ -6756,6 +6756,7 @@ static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info, |
5088 |
+ ns_inode = ns_path.dentry->d_inode; |
5089 |
+ ns_link_info->dev = new_encode_dev(ns_inode->i_sb->s_dev); |
5090 |
+ ns_link_info->ino = ns_inode->i_ino; |
5091 |
++ path_put(&ns_path); |
5092 |
+ } |
5093 |
+ } |
5094 |
+ |
5095 |
+diff --git a/kernel/futex.c b/kernel/futex.c |
5096 |
+index 52b3f4703158..046cd780d057 100644 |
5097 |
+--- a/kernel/futex.c |
5098 |
++++ b/kernel/futex.c |
5099 |
+@@ -2311,9 +2311,6 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, |
5100 |
+ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); |
5101 |
+ |
5102 |
+ oldowner = pi_state->owner; |
5103 |
+- /* Owner died? */ |
5104 |
+- if (!pi_state->owner) |
5105 |
+- newtid |= FUTEX_OWNER_DIED; |
5106 |
+ |
5107 |
+ /* |
5108 |
+ * We are here because either: |
5109 |
+@@ -2374,6 +2371,9 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, |
5110 |
+ } |
5111 |
+ |
5112 |
+ newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; |
5113 |
++ /* Owner died? */ |
5114 |
++ if (!pi_state->owner) |
5115 |
++ newtid |= FUTEX_OWNER_DIED; |
5116 |
+ |
5117 |
+ if (get_futex_value_locked(&uval, uaddr)) |
5118 |
+ goto handle_fault; |
5119 |
+diff --git a/lib/test_firmware.c b/lib/test_firmware.c |
5120 |
+index 64a4c76cba2b..e7008688769b 100644 |
5121 |
+--- a/lib/test_firmware.c |
5122 |
++++ b/lib/test_firmware.c |
5123 |
+@@ -371,6 +371,7 @@ static ssize_t config_num_requests_store(struct device *dev, |
5124 |
+ if (test_fw_config->reqs) { |
5125 |
+ pr_err("Must call release_all_firmware prior to changing config\n"); |
5126 |
+ rc = -EINVAL; |
5127 |
++ mutex_unlock(&test_fw_mutex); |
5128 |
+ goto out; |
5129 |
+ } |
5130 |
+ mutex_unlock(&test_fw_mutex); |
5131 |
+diff --git a/mm/kmemleak.c b/mm/kmemleak.c |
5132 |
+index 7780cd83a495..a1ba553816eb 100644 |
5133 |
+--- a/mm/kmemleak.c |
5134 |
++++ b/mm/kmemleak.c |
5135 |
+@@ -1532,6 +1532,8 @@ static void kmemleak_scan(void) |
5136 |
+ if (page_count(page) == 0) |
5137 |
+ continue; |
5138 |
+ scan_block(page, page + 1, NULL); |
5139 |
++ if (!(pfn % (MAX_SCAN_SIZE / sizeof(*page)))) |
5140 |
++ cond_resched(); |
5141 |
+ } |
5142 |
+ } |
5143 |
+ put_online_mems(); |
5144 |
+diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c |
5145 |
+index d8bbd0d2225a..d6d3f316de4c 100644 |
5146 |
+--- a/net/mac80211/mesh_hwmp.c |
5147 |
++++ b/net/mac80211/mesh_hwmp.c |
5148 |
+@@ -797,7 +797,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, |
5149 |
+ struct mesh_path *mpath; |
5150 |
+ u8 ttl, flags, hopcount; |
5151 |
+ const u8 *orig_addr; |
5152 |
+- u32 orig_sn, metric, metric_txsta, interval; |
5153 |
++ u32 orig_sn, new_metric, orig_metric, last_hop_metric, interval; |
5154 |
+ bool root_is_gate; |
5155 |
+ |
5156 |
+ ttl = rann->rann_ttl; |
5157 |
+@@ -808,7 +808,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, |
5158 |
+ interval = le32_to_cpu(rann->rann_interval); |
5159 |
+ hopcount = rann->rann_hopcount; |
5160 |
+ hopcount++; |
5161 |
+- metric = le32_to_cpu(rann->rann_metric); |
5162 |
++ orig_metric = le32_to_cpu(rann->rann_metric); |
5163 |
+ |
5164 |
+ /* Ignore our own RANNs */ |
5165 |
+ if (ether_addr_equal(orig_addr, sdata->vif.addr)) |
5166 |
+@@ -825,7 +825,10 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, |
5167 |
+ return; |
5168 |
+ } |
5169 |
+ |
5170 |
+- metric_txsta = airtime_link_metric_get(local, sta); |
5171 |
++ last_hop_metric = airtime_link_metric_get(local, sta); |
5172 |
++ new_metric = orig_metric + last_hop_metric; |
5173 |
++ if (new_metric < orig_metric) |
5174 |
++ new_metric = MAX_METRIC; |
5175 |
+ |
5176 |
+ mpath = mesh_path_lookup(sdata, orig_addr); |
5177 |
+ if (!mpath) { |
5178 |
+@@ -838,7 +841,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, |
5179 |
+ } |
5180 |
+ |
5181 |
+ if (!(SN_LT(mpath->sn, orig_sn)) && |
5182 |
+- !(mpath->sn == orig_sn && metric < mpath->rann_metric)) { |
5183 |
++ !(mpath->sn == orig_sn && new_metric < mpath->rann_metric)) { |
5184 |
+ rcu_read_unlock(); |
5185 |
+ return; |
5186 |
+ } |
5187 |
+@@ -856,7 +859,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, |
5188 |
+ } |
5189 |
+ |
5190 |
+ mpath->sn = orig_sn; |
5191 |
+- mpath->rann_metric = metric + metric_txsta; |
5192 |
++ mpath->rann_metric = new_metric; |
5193 |
+ mpath->is_root = true; |
5194 |
+ /* Recording RANNs sender address to send individually |
5195 |
+ * addressed PREQs destined for root mesh STA */ |
5196 |
+@@ -876,7 +879,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, |
5197 |
+ mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr, |
5198 |
+ orig_sn, 0, NULL, 0, broadcast_addr, |
5199 |
+ hopcount, ttl, interval, |
5200 |
+- metric + metric_txsta, 0, sdata); |
5201 |
++ new_metric, 0, sdata); |
5202 |
+ } |
5203 |
+ |
5204 |
+ rcu_read_unlock(); |
5205 |
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c |
5206 |
+index 3b8e2709d8de..9115cc52ce83 100644 |
5207 |
+--- a/net/mac80211/mlme.c |
5208 |
++++ b/net/mac80211/mlme.c |
5209 |
+@@ -908,7 +908,7 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local, |
5210 |
+ struct ieee80211_hdr_3addr *nullfunc; |
5211 |
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
5212 |
+ |
5213 |
+- skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif); |
5214 |
++ skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif, true); |
5215 |
+ if (!skb) |
5216 |
+ return; |
5217 |
+ |
5218 |
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c |
5219 |
+index 94826680cf2b..73429841f115 100644 |
5220 |
+--- a/net/mac80211/tx.c |
5221 |
++++ b/net/mac80211/tx.c |
5222 |
+@@ -4404,13 +4404,15 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw, |
5223 |
+ EXPORT_SYMBOL(ieee80211_pspoll_get); |
5224 |
+ |
5225 |
+ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw, |
5226 |
+- struct ieee80211_vif *vif) |
5227 |
++ struct ieee80211_vif *vif, |
5228 |
++ bool qos_ok) |
5229 |
+ { |
5230 |
+ struct ieee80211_hdr_3addr *nullfunc; |
5231 |
+ struct ieee80211_sub_if_data *sdata; |
5232 |
+ struct ieee80211_if_managed *ifmgd; |
5233 |
+ struct ieee80211_local *local; |
5234 |
+ struct sk_buff *skb; |
5235 |
++ bool qos = false; |
5236 |
+ |
5237 |
+ if (WARN_ON(vif->type != NL80211_IFTYPE_STATION)) |
5238 |
+ return NULL; |
5239 |
+@@ -4419,7 +4421,17 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw, |
5240 |
+ ifmgd = &sdata->u.mgd; |
5241 |
+ local = sdata->local; |
5242 |
+ |
5243 |
+- skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc)); |
5244 |
++ if (qos_ok) { |
5245 |
++ struct sta_info *sta; |
5246 |
++ |
5247 |
++ rcu_read_lock(); |
5248 |
++ sta = sta_info_get(sdata, ifmgd->bssid); |
5249 |
++ qos = sta && sta->sta.wme; |
5250 |
++ rcu_read_unlock(); |
5251 |
++ } |
5252 |
++ |
5253 |
++ skb = dev_alloc_skb(local->hw.extra_tx_headroom + |
5254 |
++ sizeof(*nullfunc) + 2); |
5255 |
+ if (!skb) |
5256 |
+ return NULL; |
5257 |
+ |
5258 |
+@@ -4429,6 +4441,19 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw, |
5259 |
+ nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | |
5260 |
+ IEEE80211_STYPE_NULLFUNC | |
5261 |
+ IEEE80211_FCTL_TODS); |
5262 |
++ if (qos) { |
5263 |
++ __le16 qos = cpu_to_le16(7); |
5264 |
++ |
5265 |
++ BUILD_BUG_ON((IEEE80211_STYPE_QOS_NULLFUNC | |
5266 |
++ IEEE80211_STYPE_NULLFUNC) != |
5267 |
++ IEEE80211_STYPE_QOS_NULLFUNC); |
5268 |
++ nullfunc->frame_control |= |
5269 |
++ cpu_to_le16(IEEE80211_STYPE_QOS_NULLFUNC); |
5270 |
++ skb->priority = 7; |
5271 |
++ skb_set_queue_mapping(skb, IEEE80211_AC_VO); |
5272 |
++ skb_put_data(skb, &qos, sizeof(qos)); |
5273 |
++ } |
5274 |
++ |
5275 |
+ memcpy(nullfunc->addr1, ifmgd->bssid, ETH_ALEN); |
5276 |
+ memcpy(nullfunc->addr2, vif->addr, ETH_ALEN); |
5277 |
+ memcpy(nullfunc->addr3, ifmgd->bssid, ETH_ALEN); |
5278 |
+diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c |
5279 |
+index e8eb427ce6d1..0d9f6afa266c 100644 |
5280 |
+--- a/net/openvswitch/flow_netlink.c |
5281 |
++++ b/net/openvswitch/flow_netlink.c |
5282 |
+@@ -1903,14 +1903,11 @@ int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb) |
5283 |
+ |
5284 |
+ #define MAX_ACTIONS_BUFSIZE (32 * 1024) |
5285 |
+ |
5286 |
+-static struct sw_flow_actions *nla_alloc_flow_actions(int size, bool log) |
5287 |
++static struct sw_flow_actions *nla_alloc_flow_actions(int size) |
5288 |
+ { |
5289 |
+ struct sw_flow_actions *sfa; |
5290 |
+ |
5291 |
+- if (size > MAX_ACTIONS_BUFSIZE) { |
5292 |
+- OVS_NLERR(log, "Flow action size %u bytes exceeds max", size); |
5293 |
+- return ERR_PTR(-EINVAL); |
5294 |
+- } |
5295 |
++ WARN_ON_ONCE(size > MAX_ACTIONS_BUFSIZE); |
5296 |
+ |
5297 |
+ sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL); |
5298 |
+ if (!sfa) |
5299 |
+@@ -1983,12 +1980,15 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, |
5300 |
+ new_acts_size = ksize(*sfa) * 2; |
5301 |
+ |
5302 |
+ if (new_acts_size > MAX_ACTIONS_BUFSIZE) { |
5303 |
+- if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) |
5304 |
++ if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) { |
5305 |
++ OVS_NLERR(log, "Flow action size exceeds max %u", |
5306 |
++ MAX_ACTIONS_BUFSIZE); |
5307 |
+ return ERR_PTR(-EMSGSIZE); |
5308 |
++ } |
5309 |
+ new_acts_size = MAX_ACTIONS_BUFSIZE; |
5310 |
+ } |
5311 |
+ |
5312 |
+- acts = nla_alloc_flow_actions(new_acts_size, log); |
5313 |
++ acts = nla_alloc_flow_actions(new_acts_size); |
5314 |
+ if (IS_ERR(acts)) |
5315 |
+ return (void *)acts; |
5316 |
+ |
5317 |
+@@ -2660,7 +2660,7 @@ int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, |
5318 |
+ { |
5319 |
+ int err; |
5320 |
+ |
5321 |
+- *sfa = nla_alloc_flow_actions(nla_len(attr), log); |
5322 |
++ *sfa = nla_alloc_flow_actions(min(nla_len(attr), MAX_ACTIONS_BUFSIZE)); |
5323 |
+ if (IS_ERR(*sfa)) |
5324 |
+ return PTR_ERR(*sfa); |
5325 |
+ |
5326 |
+diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c |
5327 |
+index 4b0a8288c98a..7c1cb08874d5 100644 |
5328 |
+--- a/net/rxrpc/af_rxrpc.c |
5329 |
++++ b/net/rxrpc/af_rxrpc.c |
5330 |
+@@ -823,6 +823,19 @@ static int rxrpc_release_sock(struct sock *sk) |
5331 |
+ sock_orphan(sk); |
5332 |
+ sk->sk_shutdown = SHUTDOWN_MASK; |
5333 |
+ |
5334 |
++ /* We want to kill off all connections from a service socket |
5335 |
++ * as fast as possible because we can't share these; client |
5336 |
++ * sockets, on the other hand, can share an endpoint. |
5337 |
++ */ |
5338 |
++ switch (sk->sk_state) { |
5339 |
++ case RXRPC_SERVER_BOUND: |
5340 |
++ case RXRPC_SERVER_BOUND2: |
5341 |
++ case RXRPC_SERVER_LISTENING: |
5342 |
++ case RXRPC_SERVER_LISTEN_DISABLED: |
5343 |
++ rx->local->service_closed = true; |
5344 |
++ break; |
5345 |
++ } |
5346 |
++ |
5347 |
+ spin_lock_bh(&sk->sk_receive_queue.lock); |
5348 |
+ sk->sk_state = RXRPC_CLOSE; |
5349 |
+ spin_unlock_bh(&sk->sk_receive_queue.lock); |
5350 |
+diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h |
5351 |
+index ea5600b747cc..e6c2c4f56fb1 100644 |
5352 |
+--- a/net/rxrpc/ar-internal.h |
5353 |
++++ b/net/rxrpc/ar-internal.h |
5354 |
+@@ -84,6 +84,7 @@ struct rxrpc_net { |
5355 |
+ unsigned int nr_client_conns; |
5356 |
+ unsigned int nr_active_client_conns; |
5357 |
+ bool kill_all_client_conns; |
5358 |
++ bool live; |
5359 |
+ spinlock_t client_conn_cache_lock; /* Lock for ->*_client_conns */ |
5360 |
+ spinlock_t client_conn_discard_lock; /* Prevent multiple discarders */ |
5361 |
+ struct list_head waiting_client_conns; |
5362 |
+@@ -265,6 +266,7 @@ struct rxrpc_local { |
5363 |
+ rwlock_t services_lock; /* lock for services list */ |
5364 |
+ int debug_id; /* debug ID for printks */ |
5365 |
+ bool dead; |
5366 |
++ bool service_closed; /* Service socket closed */ |
5367 |
+ struct sockaddr_rxrpc srx; /* local address */ |
5368 |
+ }; |
5369 |
+ |
5370 |
+@@ -671,7 +673,7 @@ extern unsigned int rxrpc_max_call_lifetime; |
5371 |
+ extern struct kmem_cache *rxrpc_call_jar; |
5372 |
+ |
5373 |
+ struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long); |
5374 |
+-struct rxrpc_call *rxrpc_alloc_call(gfp_t); |
5375 |
++struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t); |
5376 |
+ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *, |
5377 |
+ struct rxrpc_conn_parameters *, |
5378 |
+ struct sockaddr_rxrpc *, |
5379 |
+@@ -824,6 +826,7 @@ void rxrpc_process_connection(struct work_struct *); |
5380 |
+ * conn_object.c |
5381 |
+ */ |
5382 |
+ extern unsigned int rxrpc_connection_expiry; |
5383 |
++extern unsigned int rxrpc_closed_conn_expiry; |
5384 |
+ |
5385 |
+ struct rxrpc_connection *rxrpc_alloc_connection(gfp_t); |
5386 |
+ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *, |
5387 |
+diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c |
5388 |
+index cbd1701e813a..3028298ca561 100644 |
5389 |
+--- a/net/rxrpc/call_accept.c |
5390 |
++++ b/net/rxrpc/call_accept.c |
5391 |
+@@ -94,7 +94,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, |
5392 |
+ /* Now it gets complicated, because calls get registered with the |
5393 |
+ * socket here, particularly if a user ID is preassigned by the user. |
5394 |
+ */ |
5395 |
+- call = rxrpc_alloc_call(gfp); |
5396 |
++ call = rxrpc_alloc_call(rx, gfp); |
5397 |
+ if (!call) |
5398 |
+ return -ENOMEM; |
5399 |
+ call->flags |= (1 << RXRPC_CALL_IS_SERVICE); |
5400 |
+diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c |
5401 |
+index fcdd6555a820..8a5a42e8ec23 100644 |
5402 |
+--- a/net/rxrpc/call_object.c |
5403 |
++++ b/net/rxrpc/call_object.c |
5404 |
+@@ -55,6 +55,8 @@ static void rxrpc_call_timer_expired(unsigned long _call) |
5405 |
+ rxrpc_set_timer(call, rxrpc_timer_expired, ktime_get_real()); |
5406 |
+ } |
5407 |
+ |
5408 |
++static struct lock_class_key rxrpc_call_user_mutex_lock_class_key; |
5409 |
++ |
5410 |
+ /* |
5411 |
+ * find an extant server call |
5412 |
+ * - called in process context with IRQs enabled |
5413 |
+@@ -95,7 +97,7 @@ struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx, |
5414 |
+ /* |
5415 |
+ * allocate a new call |
5416 |
+ */ |
5417 |
+-struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) |
5418 |
++struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp) |
5419 |
+ { |
5420 |
+ struct rxrpc_call *call; |
5421 |
+ |
5422 |
+@@ -114,6 +116,14 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) |
5423 |
+ goto nomem_2; |
5424 |
+ |
5425 |
+ mutex_init(&call->user_mutex); |
5426 |
++ |
5427 |
++ /* Prevent lockdep reporting a deadlock false positive between the afs |
5428 |
++ * filesystem and sys_sendmsg() via the mmap sem. |
5429 |
++ */ |
5430 |
++ if (rx->sk.sk_kern_sock) |
5431 |
++ lockdep_set_class(&call->user_mutex, |
5432 |
++ &rxrpc_call_user_mutex_lock_class_key); |
5433 |
++ |
5434 |
+ setup_timer(&call->timer, rxrpc_call_timer_expired, |
5435 |
+ (unsigned long)call); |
5436 |
+ INIT_WORK(&call->processor, &rxrpc_process_call); |
5437 |
+@@ -150,7 +160,8 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) |
5438 |
+ /* |
5439 |
+ * Allocate a new client call. |
5440 |
+ */ |
5441 |
+-static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx, |
5442 |
++static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx, |
5443 |
++ struct sockaddr_rxrpc *srx, |
5444 |
+ gfp_t gfp) |
5445 |
+ { |
5446 |
+ struct rxrpc_call *call; |
5447 |
+@@ -158,7 +169,7 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx, |
5448 |
+ |
5449 |
+ _enter(""); |
5450 |
+ |
5451 |
+- call = rxrpc_alloc_call(gfp); |
5452 |
++ call = rxrpc_alloc_call(rx, gfp); |
5453 |
+ if (!call) |
5454 |
+ return ERR_PTR(-ENOMEM); |
5455 |
+ call->state = RXRPC_CALL_CLIENT_AWAIT_CONN; |
5456 |
+@@ -209,7 +220,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, |
5457 |
+ |
5458 |
+ _enter("%p,%lx", rx, user_call_ID); |
5459 |
+ |
5460 |
+- call = rxrpc_alloc_client_call(srx, gfp); |
5461 |
++ call = rxrpc_alloc_client_call(rx, srx, gfp); |
5462 |
+ if (IS_ERR(call)) { |
5463 |
+ release_sock(&rx->sk); |
5464 |
+ _leave(" = %ld", PTR_ERR(call)); |
5465 |
+diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c |
5466 |
+index 5f9624bd311c..78a154173d90 100644 |
5467 |
+--- a/net/rxrpc/conn_client.c |
5468 |
++++ b/net/rxrpc/conn_client.c |
5469 |
+@@ -1061,6 +1061,8 @@ void rxrpc_discard_expired_client_conns(struct work_struct *work) |
5470 |
+ expiry = rxrpc_conn_idle_client_expiry; |
5471 |
+ if (nr_conns > rxrpc_reap_client_connections) |
5472 |
+ expiry = rxrpc_conn_idle_client_fast_expiry; |
5473 |
++ if (conn->params.local->service_closed) |
5474 |
++ expiry = rxrpc_closed_conn_expiry * HZ; |
5475 |
+ |
5476 |
+ conn_expires_at = conn->idle_timestamp + expiry; |
5477 |
+ |
5478 |
+diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c |
5479 |
+index fe575798592f..a48c817b792b 100644 |
5480 |
+--- a/net/rxrpc/conn_object.c |
5481 |
++++ b/net/rxrpc/conn_object.c |
5482 |
+@@ -20,7 +20,8 @@ |
5483 |
+ /* |
5484 |
+ * Time till a connection expires after last use (in seconds). |
5485 |
+ */ |
5486 |
+-unsigned int rxrpc_connection_expiry = 10 * 60; |
5487 |
++unsigned int __read_mostly rxrpc_connection_expiry = 10 * 60; |
5488 |
++unsigned int __read_mostly rxrpc_closed_conn_expiry = 10; |
5489 |
+ |
5490 |
+ static void rxrpc_destroy_connection(struct rcu_head *); |
5491 |
+ |
5492 |
+@@ -312,7 +313,7 @@ void rxrpc_put_service_conn(struct rxrpc_connection *conn) |
5493 |
+ n = atomic_dec_return(&conn->usage); |
5494 |
+ trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here); |
5495 |
+ ASSERTCMP(n, >=, 0); |
5496 |
+- if (n == 0) { |
5497 |
++ if (n == 1) { |
5498 |
+ rxnet = conn->params.local->rxnet; |
5499 |
+ rxrpc_queue_delayed_work(&rxnet->service_conn_reaper, 0); |
5500 |
+ } |
5501 |
+@@ -353,15 +354,14 @@ void rxrpc_service_connection_reaper(struct work_struct *work) |
5502 |
+ struct rxrpc_net *rxnet = |
5503 |
+ container_of(to_delayed_work(work), |
5504 |
+ struct rxrpc_net, service_conn_reaper); |
5505 |
+- unsigned long reap_older_than, earliest, idle_timestamp, now; |
5506 |
++ unsigned long expire_at, earliest, idle_timestamp, now; |
5507 |
+ |
5508 |
+ LIST_HEAD(graveyard); |
5509 |
+ |
5510 |
+ _enter(""); |
5511 |
+ |
5512 |
+ now = jiffies; |
5513 |
+- reap_older_than = now - rxrpc_connection_expiry * HZ; |
5514 |
+- earliest = ULONG_MAX; |
5515 |
++ earliest = now + MAX_JIFFY_OFFSET; |
5516 |
+ |
5517 |
+ write_lock(&rxnet->conn_lock); |
5518 |
+ list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) { |
5519 |
+@@ -371,15 +371,21 @@ void rxrpc_service_connection_reaper(struct work_struct *work) |
5520 |
+ if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) |
5521 |
+ continue; |
5522 |
+ |
5523 |
+- idle_timestamp = READ_ONCE(conn->idle_timestamp); |
5524 |
+- _debug("reap CONN %d { u=%d,t=%ld }", |
5525 |
+- conn->debug_id, atomic_read(&conn->usage), |
5526 |
+- (long)reap_older_than - (long)idle_timestamp); |
5527 |
+- |
5528 |
+- if (time_after(idle_timestamp, reap_older_than)) { |
5529 |
+- if (time_before(idle_timestamp, earliest)) |
5530 |
+- earliest = idle_timestamp; |
5531 |
+- continue; |
5532 |
++ if (rxnet->live) { |
5533 |
++ idle_timestamp = READ_ONCE(conn->idle_timestamp); |
5534 |
++ expire_at = idle_timestamp + rxrpc_connection_expiry * HZ; |
5535 |
++ if (conn->params.local->service_closed) |
5536 |
++ expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ; |
5537 |
++ |
5538 |
++ _debug("reap CONN %d { u=%d,t=%ld }", |
5539 |
++ conn->debug_id, atomic_read(&conn->usage), |
5540 |
++ (long)expire_at - (long)now); |
5541 |
++ |
5542 |
++ if (time_before(now, expire_at)) { |
5543 |
++ if (time_before(expire_at, earliest)) |
5544 |
++ earliest = expire_at; |
5545 |
++ continue; |
5546 |
++ } |
5547 |
+ } |
5548 |
+ |
5549 |
+ /* The usage count sits at 1 whilst the object is unused on the |
5550 |
+@@ -387,6 +393,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work) |
5551 |
+ */ |
5552 |
+ if (atomic_cmpxchg(&conn->usage, 1, 0) != 1) |
5553 |
+ continue; |
5554 |
++ trace_rxrpc_conn(conn, rxrpc_conn_reap_service, 0, 0); |
5555 |
+ |
5556 |
+ if (rxrpc_conn_is_client(conn)) |
5557 |
+ BUG(); |
5558 |
+@@ -397,10 +404,10 @@ void rxrpc_service_connection_reaper(struct work_struct *work) |
5559 |
+ } |
5560 |
+ write_unlock(&rxnet->conn_lock); |
5561 |
+ |
5562 |
+- if (earliest != ULONG_MAX) { |
5563 |
+- _debug("reschedule reaper %ld", (long) earliest - now); |
5564 |
++ if (earliest != now + MAX_JIFFY_OFFSET) { |
5565 |
++ _debug("reschedule reaper %ld", (long)earliest - (long)now); |
5566 |
+ ASSERT(time_after(earliest, now)); |
5567 |
+- rxrpc_queue_delayed_work(&rxnet->client_conn_reaper, |
5568 |
++ rxrpc_queue_delayed_work(&rxnet->service_conn_reaper, |
5569 |
+ earliest - now); |
5570 |
+ } |
5571 |
+ |
5572 |
+@@ -429,7 +436,6 @@ void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet) |
5573 |
+ |
5574 |
+ rxrpc_destroy_all_client_connections(rxnet); |
5575 |
+ |
5576 |
+- rxrpc_connection_expiry = 0; |
5577 |
+ cancel_delayed_work(&rxnet->client_conn_reaper); |
5578 |
+ rxrpc_queue_delayed_work(&rxnet->client_conn_reaper, 0); |
5579 |
+ flush_workqueue(rxrpc_workqueue); |
5580 |
+diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c |
5581 |
+index 7edceb8522f5..684c51d600c7 100644 |
5582 |
+--- a/net/rxrpc/net_ns.c |
5583 |
++++ b/net/rxrpc/net_ns.c |
5584 |
+@@ -22,6 +22,7 @@ static __net_init int rxrpc_init_net(struct net *net) |
5585 |
+ struct rxrpc_net *rxnet = rxrpc_net(net); |
5586 |
+ int ret; |
5587 |
+ |
5588 |
++ rxnet->live = true; |
5589 |
+ get_random_bytes(&rxnet->epoch, sizeof(rxnet->epoch)); |
5590 |
+ rxnet->epoch |= RXRPC_RANDOM_EPOCH; |
5591 |
+ |
5592 |
+@@ -60,6 +61,7 @@ static __net_init int rxrpc_init_net(struct net *net) |
5593 |
+ return 0; |
5594 |
+ |
5595 |
+ err_proc: |
5596 |
++ rxnet->live = false; |
5597 |
+ return ret; |
5598 |
+ } |
5599 |
+ |
5600 |
+@@ -70,6 +72,7 @@ static __net_exit void rxrpc_exit_net(struct net *net) |
5601 |
+ { |
5602 |
+ struct rxrpc_net *rxnet = rxrpc_net(net); |
5603 |
+ |
5604 |
++ rxnet->live = false; |
5605 |
+ rxrpc_destroy_all_calls(rxnet); |
5606 |
+ rxrpc_destroy_all_connections(rxnet); |
5607 |
+ rxrpc_destroy_all_locals(rxnet); |
5608 |
+diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c |
5609 |
+index 9ea6f972767e..d2f51d6a253c 100644 |
5610 |
+--- a/net/rxrpc/sendmsg.c |
5611 |
++++ b/net/rxrpc/sendmsg.c |
5612 |
+@@ -563,8 +563,8 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) |
5613 |
+ /* The socket is now unlocked. */ |
5614 |
+ if (IS_ERR(call)) |
5615 |
+ return PTR_ERR(call); |
5616 |
+- rxrpc_put_call(call, rxrpc_call_put); |
5617 |
+- return 0; |
5618 |
++ ret = 0; |
5619 |
++ goto out_put_unlock; |
5620 |
+ } |
5621 |
+ |
5622 |
+ call = rxrpc_find_call_by_user_ID(rx, p.user_call_ID); |
5623 |
+@@ -633,6 +633,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) |
5624 |
+ ret = rxrpc_send_data(rx, call, msg, len, NULL); |
5625 |
+ } |
5626 |
+ |
5627 |
++out_put_unlock: |
5628 |
+ mutex_unlock(&call->user_mutex); |
5629 |
+ error_put: |
5630 |
+ rxrpc_put_call(call, rxrpc_call_put); |
5631 |
+diff --git a/net/sctp/stream.c b/net/sctp/stream.c |
5632 |
+index 724adf2786a2..9ea6057ed28b 100644 |
5633 |
+--- a/net/sctp/stream.c |
5634 |
++++ b/net/sctp/stream.c |
5635 |
+@@ -224,6 +224,9 @@ int sctp_send_reset_assoc(struct sctp_association *asoc) |
5636 |
+ if (asoc->strreset_outstanding) |
5637 |
+ return -EINPROGRESS; |
5638 |
+ |
5639 |
++ if (!sctp_outq_is_empty(&asoc->outqueue)) |
5640 |
++ return -EAGAIN; |
5641 |
++ |
5642 |
+ chunk = sctp_make_strreset_tsnreq(asoc); |
5643 |
+ if (!chunk) |
5644 |
+ return -ENOMEM; |
5645 |
+@@ -538,12 +541,18 @@ struct sctp_chunk *sctp_process_strreset_tsnreq( |
5646 |
+ i = asoc->strreset_inseq - request_seq - 1; |
5647 |
+ result = asoc->strreset_result[i]; |
5648 |
+ if (result == SCTP_STRRESET_PERFORMED) { |
5649 |
+- next_tsn = asoc->next_tsn; |
5650 |
++ next_tsn = asoc->ctsn_ack_point + 1; |
5651 |
+ init_tsn = |
5652 |
+ sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1; |
5653 |
+ } |
5654 |
+ goto err; |
5655 |
+ } |
5656 |
++ |
5657 |
++ if (!sctp_outq_is_empty(&asoc->outqueue)) { |
5658 |
++ result = SCTP_STRRESET_IN_PROGRESS; |
5659 |
++ goto err; |
5660 |
++ } |
5661 |
++ |
5662 |
+ asoc->strreset_inseq++; |
5663 |
+ |
5664 |
+ if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_ASSOC_REQ)) |
5665 |
+@@ -554,9 +563,10 @@ struct sctp_chunk *sctp_process_strreset_tsnreq( |
5666 |
+ goto out; |
5667 |
+ } |
5668 |
+ |
5669 |
+- /* G3: The same processing as though a SACK chunk with no gap report |
5670 |
+- * and a cumulative TSN ACK of the Sender's Next TSN minus 1 were |
5671 |
+- * received MUST be performed. |
5672 |
++ /* G4: The same processing as though a FWD-TSN chunk (as defined in |
5673 |
++ * [RFC3758]) with all streams affected and a new cumulative TSN |
5674 |
++ * ACK of the Receiver's Next TSN minus 1 were received MUST be |
5675 |
++ * performed. |
5676 |
+ */ |
5677 |
+ max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map); |
5678 |
+ sctp_ulpq_reasm_flushtsn(&asoc->ulpq, max_tsn_seen); |
5679 |
+@@ -571,10 +581,9 @@ struct sctp_chunk *sctp_process_strreset_tsnreq( |
5680 |
+ sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL, |
5681 |
+ init_tsn, GFP_ATOMIC); |
5682 |
+ |
5683 |
+- /* G4: The same processing as though a FWD-TSN chunk (as defined in |
5684 |
+- * [RFC3758]) with all streams affected and a new cumulative TSN |
5685 |
+- * ACK of the Receiver's Next TSN minus 1 were received MUST be |
5686 |
+- * performed. |
5687 |
++ /* G3: The same processing as though a SACK chunk with no gap report |
5688 |
++ * and a cumulative TSN ACK of the Sender's Next TSN minus 1 were |
5689 |
++ * received MUST be performed. |
5690 |
+ */ |
5691 |
+ sctp_outq_free(&asoc->outqueue); |
5692 |
+ |
5693 |
+@@ -835,6 +844,7 @@ struct sctp_chunk *sctp_process_strreset_resp( |
5694 |
+ if (result == SCTP_STRRESET_PERFORMED) { |
5695 |
+ __u32 mtsn = sctp_tsnmap_get_max_tsn_seen( |
5696 |
+ &asoc->peer.tsn_map); |
5697 |
++ LIST_HEAD(temp); |
5698 |
+ |
5699 |
+ sctp_ulpq_reasm_flushtsn(&asoc->ulpq, mtsn); |
5700 |
+ sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); |
5701 |
+@@ -843,7 +853,13 @@ struct sctp_chunk *sctp_process_strreset_resp( |
5702 |
+ SCTP_TSN_MAP_INITIAL, |
5703 |
+ stsn, GFP_ATOMIC); |
5704 |
+ |
5705 |
++ /* Clean up sacked and abandoned queues only. As the |
5706 |
++ * out_chunk_list may not be empty, splice it to temp, |
5707 |
++ * then get it back after sctp_outq_free is done. |
5708 |
++ */ |
5709 |
++ list_splice_init(&asoc->outqueue.out_chunk_list, &temp); |
5710 |
+ sctp_outq_free(&asoc->outqueue); |
5711 |
++ list_splice_init(&temp, &asoc->outqueue.out_chunk_list); |
5712 |
+ |
5713 |
+ asoc->next_tsn = rtsn; |
5714 |
+ asoc->ctsn_ack_point = asoc->next_tsn - 1; |
5715 |
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c |
5716 |
+index 4dad5da388d6..8cb40f8ffa5b 100644 |
5717 |
+--- a/net/sunrpc/xprtsock.c |
5718 |
++++ b/net/sunrpc/xprtsock.c |
5719 |
+@@ -2437,6 +2437,7 @@ static void xs_tcp_setup_socket(struct work_struct *work) |
5720 |
+ case -ECONNREFUSED: |
5721 |
+ case -ECONNRESET: |
5722 |
+ case -ENETUNREACH: |
5723 |
++ case -EHOSTUNREACH: |
5724 |
+ case -EADDRINUSE: |
5725 |
+ case -ENOBUFS: |
5726 |
+ /* |
5727 |
+diff --git a/scripts/faddr2line b/scripts/faddr2line |
5728 |
+index 1f5ce959f596..39e07d8574dd 100755 |
5729 |
+--- a/scripts/faddr2line |
5730 |
++++ b/scripts/faddr2line |
5731 |
+@@ -44,9 +44,16 @@ |
5732 |
+ set -o errexit |
5733 |
+ set -o nounset |
5734 |
+ |
5735 |
++READELF="${CROSS_COMPILE}readelf" |
5736 |
++ADDR2LINE="${CROSS_COMPILE}addr2line" |
5737 |
++SIZE="${CROSS_COMPILE}size" |
5738 |
++NM="${CROSS_COMPILE}nm" |
5739 |
++ |
5740 |
+ command -v awk >/dev/null 2>&1 || die "awk isn't installed" |
5741 |
+-command -v readelf >/dev/null 2>&1 || die "readelf isn't installed" |
5742 |
+-command -v addr2line >/dev/null 2>&1 || die "addr2line isn't installed" |
5743 |
++command -v ${READELF} >/dev/null 2>&1 || die "readelf isn't installed" |
5744 |
++command -v ${ADDR2LINE} >/dev/null 2>&1 || die "addr2line isn't installed" |
5745 |
++command -v ${SIZE} >/dev/null 2>&1 || die "size isn't installed" |
5746 |
++command -v ${NM} >/dev/null 2>&1 || die "nm isn't installed" |
5747 |
+ |
5748 |
+ usage() { |
5749 |
+ echo "usage: faddr2line <object file> <func+offset> <func+offset>..." >&2 |
5750 |
+@@ -69,10 +76,10 @@ die() { |
5751 |
+ find_dir_prefix() { |
5752 |
+ local objfile=$1 |
5753 |
+ |
5754 |
+- local start_kernel_addr=$(readelf -sW $objfile | awk '$8 == "start_kernel" {printf "0x%s", $2}') |
5755 |
++ local start_kernel_addr=$(${READELF} -sW $objfile | awk '$8 == "start_kernel" {printf "0x%s", $2}') |
5756 |
+ [[ -z $start_kernel_addr ]] && return |
5757 |
+ |
5758 |
+- local file_line=$(addr2line -e $objfile $start_kernel_addr) |
5759 |
++ local file_line=$(${ADDR2LINE} -e $objfile $start_kernel_addr) |
5760 |
+ [[ -z $file_line ]] && return |
5761 |
+ |
5762 |
+ local prefix=${file_line%init/main.c:*} |
5763 |
+@@ -104,7 +111,7 @@ __faddr2line() { |
5764 |
+ |
5765 |
+ # Go through each of the object's symbols which match the func name. |
5766 |
+ # In rare cases there might be duplicates. |
5767 |
+- file_end=$(size -Ax $objfile | awk '$1 == ".text" {print $2}') |
5768 |
++ file_end=$(${SIZE} -Ax $objfile | awk '$1 == ".text" {print $2}') |
5769 |
+ while read symbol; do |
5770 |
+ local fields=($symbol) |
5771 |
+ local sym_base=0x${fields[0]} |
5772 |
+@@ -156,10 +163,10 @@ __faddr2line() { |
5773 |
+ |
5774 |
+ # pass real address to addr2line |
5775 |
+ echo "$func+$offset/$sym_size:" |
5776 |
+- addr2line -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;" |
5777 |
++ ${ADDR2LINE} -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;" |
5778 |
+ DONE=1 |
5779 |
+ |
5780 |
+- done < <(nm -n $objfile | awk -v fn=$func -v end=$file_end '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, "0x"$1 } END {if (found == 1) print line, end; }') |
5781 |
++ done < <(${NM} -n $objfile | awk -v fn=$func -v end=$file_end '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, "0x"$1 } END {if (found == 1) print line, end; }') |
5782 |
+ } |
5783 |
+ |
5784 |
+ [[ $# -lt 2 ]] && usage |
5785 |
+diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c |
5786 |
+index 95209a5f8595..8daf16e1d421 100644 |
5787 |
+--- a/security/integrity/ima/ima_policy.c |
5788 |
++++ b/security/integrity/ima/ima_policy.c |
5789 |
+@@ -743,7 +743,7 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry) |
5790 |
+ case Opt_fsuuid: |
5791 |
+ ima_log_string(ab, "fsuuid", args[0].from); |
5792 |
+ |
5793 |
+- if (uuid_is_null(&entry->fsuuid)) { |
5794 |
++ if (!uuid_is_null(&entry->fsuuid)) { |
5795 |
+ result = -EINVAL; |
5796 |
+ break; |
5797 |
+ } |
5798 |
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c |
5799 |
+index 145e92d6ca94..b2d039537d5e 100644 |
5800 |
+--- a/sound/pci/hda/patch_realtek.c |
5801 |
++++ b/sound/pci/hda/patch_realtek.c |
5802 |
+@@ -3131,11 +3131,13 @@ static void alc256_shutup(struct hda_codec *codec) |
5803 |
+ if (hp_pin_sense) |
5804 |
+ msleep(85); |
5805 |
+ |
5806 |
++ /* 3k pull low control for Headset jack. */ |
5807 |
++ /* NOTE: call this before clearing the pin, otherwise codec stalls */ |
5808 |
++ alc_update_coef_idx(codec, 0x46, 0, 3 << 12); |
5809 |
++ |
5810 |
+ snd_hda_codec_write(codec, hp_pin, 0, |
5811 |
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); |
5812 |
+ |
5813 |
+- alc_update_coef_idx(codec, 0x46, 0, 3 << 12); /* 3k pull low control for Headset jack. */ |
5814 |
+- |
5815 |
+ if (hp_pin_sense) |
5816 |
+ msleep(100); |
5817 |
+ |
5818 |
+diff --git a/tools/gpio/gpio-event-mon.c b/tools/gpio/gpio-event-mon.c |
5819 |
+index 1c14c2595158..4b36323ea64b 100644 |
5820 |
+--- a/tools/gpio/gpio-event-mon.c |
5821 |
++++ b/tools/gpio/gpio-event-mon.c |
5822 |
+@@ -23,6 +23,7 @@ |
5823 |
+ #include <getopt.h> |
5824 |
+ #include <inttypes.h> |
5825 |
+ #include <sys/ioctl.h> |
5826 |
++#include <sys/types.h> |
5827 |
+ #include <linux/gpio.h> |
5828 |
+ |
5829 |
+ int monitor_device(const char *device_name, |
5830 |
+diff --git a/tools/power/cpupower/bench/system.c b/tools/power/cpupower/bench/system.c |
5831 |
+index c25a74ae51ba..2bb3eef7d5c1 100644 |
5832 |
+--- a/tools/power/cpupower/bench/system.c |
5833 |
++++ b/tools/power/cpupower/bench/system.c |
5834 |
+@@ -61,7 +61,7 @@ int set_cpufreq_governor(char *governor, unsigned int cpu) |
5835 |
+ |
5836 |
+ dprintf("set %s as cpufreq governor\n", governor); |
5837 |
+ |
5838 |
+- if (cpupower_is_cpu_online(cpu) != 0) { |
5839 |
++ if (cpupower_is_cpu_online(cpu) != 1) { |
5840 |
+ perror("cpufreq_cpu_exists"); |
5841 |
+ fprintf(stderr, "error: cpu %u does not exist\n", cpu); |
5842 |
+ return -1; |
5843 |
+diff --git a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c |
5844 |
+index 1b5da0066ebf..5b3205f16217 100644 |
5845 |
+--- a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c |
5846 |
++++ b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c |
5847 |
+@@ -130,15 +130,18 @@ static struct cpuidle_monitor *cpuidle_register(void) |
5848 |
+ { |
5849 |
+ int num; |
5850 |
+ char *tmp; |
5851 |
++ int this_cpu; |
5852 |
++ |
5853 |
++ this_cpu = sched_getcpu(); |
5854 |
+ |
5855 |
+ /* Assume idle state count is the same for all CPUs */ |
5856 |
+- cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(0); |
5857 |
++ cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(this_cpu); |
5858 |
+ |
5859 |
+ if (cpuidle_sysfs_monitor.hw_states_num <= 0) |
5860 |
+ return NULL; |
5861 |
+ |
5862 |
+ for (num = 0; num < cpuidle_sysfs_monitor.hw_states_num; num++) { |
5863 |
+- tmp = cpuidle_state_name(0, num); |
5864 |
++ tmp = cpuidle_state_name(this_cpu, num); |
5865 |
+ if (tmp == NULL) |
5866 |
+ continue; |
5867 |
+ |
5868 |
+@@ -146,7 +149,7 @@ static struct cpuidle_monitor *cpuidle_register(void) |
5869 |
+ strncpy(cpuidle_cstates[num].name, tmp, CSTATE_NAME_LEN - 1); |
5870 |
+ free(tmp); |
5871 |
+ |
5872 |
+- tmp = cpuidle_state_desc(0, num); |
5873 |
++ tmp = cpuidle_state_desc(this_cpu, num); |
5874 |
+ if (tmp == NULL) |
5875 |
+ continue; |
5876 |
+ strncpy(cpuidle_cstates[num].desc, tmp, CSTATE_DESC_LEN - 1); |
5877 |
+diff --git a/tools/usb/usbip/src/usbip_bind.c b/tools/usb/usbip/src/usbip_bind.c |
5878 |
+index fa46141ae68b..e121cfb1746a 100644 |
5879 |
+--- a/tools/usb/usbip/src/usbip_bind.c |
5880 |
++++ b/tools/usb/usbip/src/usbip_bind.c |
5881 |
+@@ -144,6 +144,7 @@ static int bind_device(char *busid) |
5882 |
+ int rc; |
5883 |
+ struct udev *udev; |
5884 |
+ struct udev_device *dev; |
5885 |
++ const char *devpath; |
5886 |
+ |
5887 |
+ /* Check whether the device with this bus ID exists. */ |
5888 |
+ udev = udev_new(); |
5889 |
+@@ -152,8 +153,16 @@ static int bind_device(char *busid) |
5890 |
+ err("device with the specified bus ID does not exist"); |
5891 |
+ return -1; |
5892 |
+ } |
5893 |
++ devpath = udev_device_get_devpath(dev); |
5894 |
+ udev_unref(udev); |
5895 |
+ |
5896 |
++ /* If the device is already attached to vhci_hcd - bail out */ |
5897 |
++ if (strstr(devpath, USBIP_VHCI_DRV_NAME)) { |
5898 |
++ err("bind loop detected: device: %s is attached to %s\n", |
5899 |
++ devpath, USBIP_VHCI_DRV_NAME); |
5900 |
++ return -1; |
5901 |
++ } |
5902 |
++ |
5903 |
+ rc = unbind_other(busid); |
5904 |
+ if (rc == UNBIND_ST_FAILED) { |
5905 |
+ err("could not unbind driver from device on busid %s", busid); |
5906 |
+diff --git a/tools/usb/usbip/src/usbip_list.c b/tools/usb/usbip/src/usbip_list.c |
5907 |
+index f1b38e866dd7..d65a9f444174 100644 |
5908 |
+--- a/tools/usb/usbip/src/usbip_list.c |
5909 |
++++ b/tools/usb/usbip/src/usbip_list.c |
5910 |
+@@ -187,6 +187,7 @@ static int list_devices(bool parsable) |
5911 |
+ const char *busid; |
5912 |
+ char product_name[128]; |
5913 |
+ int ret = -1; |
5914 |
++ const char *devpath; |
5915 |
+ |
5916 |
+ /* Create libudev context. */ |
5917 |
+ udev = udev_new(); |
5918 |
+@@ -209,6 +210,14 @@ static int list_devices(bool parsable) |
5919 |
+ path = udev_list_entry_get_name(dev_list_entry); |
5920 |
+ dev = udev_device_new_from_syspath(udev, path); |
5921 |
+ |
5922 |
++ /* Ignore devices attached to vhci_hcd */ |
5923 |
++ devpath = udev_device_get_devpath(dev); |
5924 |
++ if (strstr(devpath, USBIP_VHCI_DRV_NAME)) { |
5925 |
++ dbg("Skip the device %s already attached to %s\n", |
5926 |
++ devpath, USBIP_VHCI_DRV_NAME); |
5927 |
++ continue; |
5928 |
++ } |
5929 |
++ |
5930 |
+ /* Get device information. */ |
5931 |
+ idVendor = udev_device_get_sysattr_value(dev, "idVendor"); |
5932 |
+ idProduct = udev_device_get_sysattr_value(dev, "idProduct"); |
5933 |
+diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c |
5934 |
+index 95cba0799828..9a07ee94a230 100644 |
5935 |
+--- a/virt/kvm/arm/arm.c |
5936 |
++++ b/virt/kvm/arm/arm.c |
5937 |
+@@ -612,7 +612,6 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu) |
5938 |
+ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) |
5939 |
+ { |
5940 |
+ int ret; |
5941 |
+- sigset_t sigsaved; |
5942 |
+ |
5943 |
+ if (unlikely(!kvm_vcpu_initialized(vcpu))) |
5944 |
+ return -ENOEXEC; |
5945 |
+@@ -630,8 +629,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) |
5946 |
+ if (run->immediate_exit) |
5947 |
+ return -EINTR; |
5948 |
+ |
5949 |
+- if (vcpu->sigset_active) |
5950 |
+- sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); |
5951 |
++ kvm_sigset_activate(vcpu); |
5952 |
+ |
5953 |
+ ret = 1; |
5954 |
+ run->exit_reason = KVM_EXIT_UNKNOWN; |
5955 |
+@@ -753,8 +751,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) |
5956 |
+ kvm_pmu_update_run(vcpu); |
5957 |
+ } |
5958 |
+ |
5959 |
+- if (vcpu->sigset_active) |
5960 |
+- sigprocmask(SIG_SETMASK, &sigsaved, NULL); |
5961 |
++ kvm_sigset_deactivate(vcpu); |
5962 |
++ |
5963 |
+ return ret; |
5964 |
+ } |
5965 |
+ |
5966 |
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c |
5967 |
+index 2447d7c017e7..8401774f5aeb 100644 |
5968 |
+--- a/virt/kvm/kvm_main.c |
5969 |
++++ b/virt/kvm/kvm_main.c |
5970 |
+@@ -2073,6 +2073,29 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn) |
5971 |
+ } |
5972 |
+ EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty); |
5973 |
+ |
5974 |
++void kvm_sigset_activate(struct kvm_vcpu *vcpu) |
5975 |
++{ |
5976 |
++ if (!vcpu->sigset_active) |
5977 |
++ return; |
5978 |
++ |
5979 |
++ /* |
5980 |
++ * This does a lockless modification of ->real_blocked, which is fine |
5981 |
++ * because, only current can change ->real_blocked and all readers of |
5982 |
++ * ->real_blocked don't care as long ->real_blocked is always a subset |
5983 |
++ * of ->blocked. |
5984 |
++ */ |
5985 |
++ sigprocmask(SIG_SETMASK, &vcpu->sigset, ¤t->real_blocked); |
5986 |
++} |
5987 |
++ |
5988 |
++void kvm_sigset_deactivate(struct kvm_vcpu *vcpu) |
5989 |
++{ |
5990 |
++ if (!vcpu->sigset_active) |
5991 |
++ return; |
5992 |
++ |
5993 |
++ sigprocmask(SIG_SETMASK, ¤t->real_blocked, NULL); |
5994 |
++ sigemptyset(¤t->real_blocked); |
5995 |
++} |
5996 |
++ |
5997 |
+ static void grow_halt_poll_ns(struct kvm_vcpu *vcpu) |
5998 |
+ { |
5999 |
+ unsigned int old, val, grow; |