1 |
commit: c13c64f2c17f2f38c7d2c4bf9bdc16390f9b6795 |
2 |
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
3 |
AuthorDate: Fri Aug 11 17:44:50 2017 +0000 |
4 |
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
5 |
CommitDate: Fri Aug 11 17:44:50 2017 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c13c64f2 |
7 |
|
8 |
Linux patch 4.4.81 |
9 |
|
10 |
0000_README | 4 + |
11 |
1080_linux-4.4.81.patch | 2112 +++++++++++++++++++++++++++++++++++++++++++++++ |
12 |
2 files changed, 2116 insertions(+) |
13 |
|
14 |
diff --git a/0000_README b/0000_README |
15 |
index 82594ae..c396c3a 100644 |
16 |
--- a/0000_README |
17 |
+++ b/0000_README |
18 |
@@ -363,6 +363,10 @@ Patch: 1079_linux-4.4.80.patch |
19 |
From: http://www.kernel.org |
20 |
Desc: Linux 4.4.80 |
21 |
|
22 |
+Patch: 1080_linux-4.4.81.patch |
23 |
+From: http://www.kernel.org |
24 |
+Desc: Linux 4.4.81 |
25 |
+ |
26 |
Patch: 1500_XATTR_USER_PREFIX.patch |
27 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
28 |
Desc: Support for namespace user.pax.* on tmpfs. |
29 |
|
30 |
diff --git a/1080_linux-4.4.81.patch b/1080_linux-4.4.81.patch |
31 |
new file mode 100644 |
32 |
index 0000000..952c856 |
33 |
--- /dev/null |
34 |
+++ b/1080_linux-4.4.81.patch |
35 |
@@ -0,0 +1,2112 @@ |
36 |
+diff --git a/Makefile b/Makefile |
37 |
+index dddd55adde24..d049e53a6960 100644 |
38 |
+--- a/Makefile |
39 |
++++ b/Makefile |
40 |
+@@ -1,6 +1,6 @@ |
41 |
+ VERSION = 4 |
42 |
+ PATCHLEVEL = 4 |
43 |
+-SUBLEVEL = 80 |
44 |
++SUBLEVEL = 81 |
45 |
+ EXTRAVERSION = |
46 |
+ NAME = Blurry Fish Butt |
47 |
+ |
48 |
+diff --git a/arch/arm/boot/dts/armada-388-gp.dts b/arch/arm/boot/dts/armada-388-gp.dts |
49 |
+index cd316021d6ce..6c1b45c1af66 100644 |
50 |
+--- a/arch/arm/boot/dts/armada-388-gp.dts |
51 |
++++ b/arch/arm/boot/dts/armada-388-gp.dts |
52 |
+@@ -89,7 +89,7 @@ |
53 |
+ pinctrl-names = "default"; |
54 |
+ pinctrl-0 = <&pca0_pins>; |
55 |
+ interrupt-parent = <&gpio0>; |
56 |
+- interrupts = <18 IRQ_TYPE_EDGE_FALLING>; |
57 |
++ interrupts = <18 IRQ_TYPE_LEVEL_LOW>; |
58 |
+ gpio-controller; |
59 |
+ #gpio-cells = <2>; |
60 |
+ interrupt-controller; |
61 |
+@@ -101,7 +101,7 @@ |
62 |
+ compatible = "nxp,pca9555"; |
63 |
+ pinctrl-names = "default"; |
64 |
+ interrupt-parent = <&gpio0>; |
65 |
+- interrupts = <18 IRQ_TYPE_EDGE_FALLING>; |
66 |
++ interrupts = <18 IRQ_TYPE_LEVEL_LOW>; |
67 |
+ gpio-controller; |
68 |
+ #gpio-cells = <2>; |
69 |
+ interrupt-controller; |
70 |
+diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h |
71 |
+index bfe2a2f5a644..22b73112b75f 100644 |
72 |
+--- a/arch/arm/include/asm/ftrace.h |
73 |
++++ b/arch/arm/include/asm/ftrace.h |
74 |
+@@ -54,6 +54,24 @@ static inline void *return_address(unsigned int level) |
75 |
+ |
76 |
+ #define ftrace_return_address(n) return_address(n) |
77 |
+ |
78 |
++#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME |
79 |
++ |
80 |
++static inline bool arch_syscall_match_sym_name(const char *sym, |
81 |
++ const char *name) |
82 |
++{ |
83 |
++ if (!strcmp(sym, "sys_mmap2")) |
84 |
++ sym = "sys_mmap_pgoff"; |
85 |
++ else if (!strcmp(sym, "sys_statfs64_wrapper")) |
86 |
++ sym = "sys_statfs64"; |
87 |
++ else if (!strcmp(sym, "sys_fstatfs64_wrapper")) |
88 |
++ sym = "sys_fstatfs64"; |
89 |
++ else if (!strcmp(sym, "sys_arm_fadvise64_64")) |
90 |
++ sym = "sys_fadvise64_64"; |
91 |
++ |
92 |
++ /* Ignore case since sym may start with "SyS" instead of "sys" */ |
93 |
++ return !strcasecmp(sym, name); |
94 |
++} |
95 |
++ |
96 |
+ #endif /* ifndef __ASSEMBLY__ */ |
97 |
+ |
98 |
+ #endif /* _ASM_ARM_FTRACE */ |
99 |
+diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h |
100 |
+index ec9c04de3664..ff05992dae7a 100644 |
101 |
+--- a/arch/sparc/include/asm/trap_block.h |
102 |
++++ b/arch/sparc/include/asm/trap_block.h |
103 |
+@@ -54,6 +54,7 @@ extern struct trap_per_cpu trap_block[NR_CPUS]; |
104 |
+ void init_cur_cpu_trap(struct thread_info *); |
105 |
+ void setup_tba(void); |
106 |
+ extern int ncpus_probed; |
107 |
++extern u64 cpu_mondo_counter[NR_CPUS]; |
108 |
+ |
109 |
+ unsigned long real_hard_smp_processor_id(void); |
110 |
+ |
111 |
+diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c |
112 |
+index 95a9fa0d2195..4511caa3b7e9 100644 |
113 |
+--- a/arch/sparc/kernel/smp_64.c |
114 |
++++ b/arch/sparc/kernel/smp_64.c |
115 |
+@@ -617,22 +617,48 @@ retry: |
116 |
+ } |
117 |
+ } |
118 |
+ |
119 |
+-/* Multi-cpu list version. */ |
120 |
++#define CPU_MONDO_COUNTER(cpuid) (cpu_mondo_counter[cpuid]) |
121 |
++#define MONDO_USEC_WAIT_MIN 2 |
122 |
++#define MONDO_USEC_WAIT_MAX 100 |
123 |
++#define MONDO_RETRY_LIMIT 500000 |
124 |
++ |
125 |
++/* Multi-cpu list version. |
126 |
++ * |
127 |
++ * Deliver xcalls to 'cnt' number of cpus in 'cpu_list'. |
128 |
++ * Sometimes not all cpus receive the mondo, requiring us to re-send |
129 |
++ * the mondo until all cpus have received, or cpus are truly stuck |
130 |
++ * unable to receive mondo, and we timeout. |
131 |
++ * Occasionally a target cpu strand is borrowed briefly by hypervisor to |
132 |
++ * perform guest service, such as PCIe error handling. Consider the |
133 |
++ * service time, 1 second overall wait is reasonable for 1 cpu. |
134 |
++ * Here two in-between mondo check wait time are defined: 2 usec for |
135 |
++ * single cpu quick turn around and up to 100usec for large cpu count. |
136 |
++ * Deliver mondo to large number of cpus could take longer, we adjusts |
137 |
++ * the retry count as long as target cpus are making forward progress. |
138 |
++ */ |
139 |
+ static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt) |
140 |
+ { |
141 |
+- int retries, this_cpu, prev_sent, i, saw_cpu_error; |
142 |
++ int this_cpu, tot_cpus, prev_sent, i, rem; |
143 |
++ int usec_wait, retries, tot_retries; |
144 |
++ u16 first_cpu = 0xffff; |
145 |
++ unsigned long xc_rcvd = 0; |
146 |
+ unsigned long status; |
147 |
++ int ecpuerror_id = 0; |
148 |
++ int enocpu_id = 0; |
149 |
+ u16 *cpu_list; |
150 |
++ u16 cpu; |
151 |
+ |
152 |
+ this_cpu = smp_processor_id(); |
153 |
+- |
154 |
+ cpu_list = __va(tb->cpu_list_pa); |
155 |
+- |
156 |
+- saw_cpu_error = 0; |
157 |
+- retries = 0; |
158 |
++ usec_wait = cnt * MONDO_USEC_WAIT_MIN; |
159 |
++ if (usec_wait > MONDO_USEC_WAIT_MAX) |
160 |
++ usec_wait = MONDO_USEC_WAIT_MAX; |
161 |
++ retries = tot_retries = 0; |
162 |
++ tot_cpus = cnt; |
163 |
+ prev_sent = 0; |
164 |
++ |
165 |
+ do { |
166 |
+- int forward_progress, n_sent; |
167 |
++ int n_sent, mondo_delivered, target_cpu_busy; |
168 |
+ |
169 |
+ status = sun4v_cpu_mondo_send(cnt, |
170 |
+ tb->cpu_list_pa, |
171 |
+@@ -640,94 +666,113 @@ static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt) |
172 |
+ |
173 |
+ /* HV_EOK means all cpus received the xcall, we're done. */ |
174 |
+ if (likely(status == HV_EOK)) |
175 |
+- break; |
176 |
++ goto xcall_done; |
177 |
++ |
178 |
++ /* If not these non-fatal errors, panic */ |
179 |
++ if (unlikely((status != HV_EWOULDBLOCK) && |
180 |
++ (status != HV_ECPUERROR) && |
181 |
++ (status != HV_ENOCPU))) |
182 |
++ goto fatal_errors; |
183 |
+ |
184 |
+ /* First, see if we made any forward progress. |
185 |
++ * |
186 |
++ * Go through the cpu_list, count the target cpus that have |
187 |
++ * received our mondo (n_sent), and those that did not (rem). |
188 |
++ * Re-pack cpu_list with the cpus remain to be retried in the |
189 |
++ * front - this simplifies tracking the truly stalled cpus. |
190 |
+ * |
191 |
+ * The hypervisor indicates successful sends by setting |
192 |
+ * cpu list entries to the value 0xffff. |
193 |
++ * |
194 |
++ * EWOULDBLOCK means some target cpus did not receive the |
195 |
++ * mondo and retry usually helps. |
196 |
++ * |
197 |
++ * ECPUERROR means at least one target cpu is in error state, |
198 |
++ * it's usually safe to skip the faulty cpu and retry. |
199 |
++ * |
200 |
++ * ENOCPU means one of the target cpu doesn't belong to the |
201 |
++ * domain, perhaps offlined which is unexpected, but not |
202 |
++ * fatal and it's okay to skip the offlined cpu. |
203 |
+ */ |
204 |
++ rem = 0; |
205 |
+ n_sent = 0; |
206 |
+ for (i = 0; i < cnt; i++) { |
207 |
+- if (likely(cpu_list[i] == 0xffff)) |
208 |
++ cpu = cpu_list[i]; |
209 |
++ if (likely(cpu == 0xffff)) { |
210 |
+ n_sent++; |
211 |
++ } else if ((status == HV_ECPUERROR) && |
212 |
++ (sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) { |
213 |
++ ecpuerror_id = cpu + 1; |
214 |
++ } else if (status == HV_ENOCPU && !cpu_online(cpu)) { |
215 |
++ enocpu_id = cpu + 1; |
216 |
++ } else { |
217 |
++ cpu_list[rem++] = cpu; |
218 |
++ } |
219 |
+ } |
220 |
+ |
221 |
+- forward_progress = 0; |
222 |
+- if (n_sent > prev_sent) |
223 |
+- forward_progress = 1; |
224 |
++ /* No cpu remained, we're done. */ |
225 |
++ if (rem == 0) |
226 |
++ break; |
227 |
+ |
228 |
+- prev_sent = n_sent; |
229 |
++ /* Otherwise, update the cpu count for retry. */ |
230 |
++ cnt = rem; |
231 |
+ |
232 |
+- /* If we get a HV_ECPUERROR, then one or more of the cpus |
233 |
+- * in the list are in error state. Use the cpu_state() |
234 |
+- * hypervisor call to find out which cpus are in error state. |
235 |
++ /* Record the overall number of mondos received by the |
236 |
++ * first of the remaining cpus. |
237 |
+ */ |
238 |
+- if (unlikely(status == HV_ECPUERROR)) { |
239 |
+- for (i = 0; i < cnt; i++) { |
240 |
+- long err; |
241 |
+- u16 cpu; |
242 |
++ if (first_cpu != cpu_list[0]) { |
243 |
++ first_cpu = cpu_list[0]; |
244 |
++ xc_rcvd = CPU_MONDO_COUNTER(first_cpu); |
245 |
++ } |
246 |
+ |
247 |
+- cpu = cpu_list[i]; |
248 |
+- if (cpu == 0xffff) |
249 |
+- continue; |
250 |
++ /* Was any mondo delivered successfully? */ |
251 |
++ mondo_delivered = (n_sent > prev_sent); |
252 |
++ prev_sent = n_sent; |
253 |
+ |
254 |
+- err = sun4v_cpu_state(cpu); |
255 |
+- if (err == HV_CPU_STATE_ERROR) { |
256 |
+- saw_cpu_error = (cpu + 1); |
257 |
+- cpu_list[i] = 0xffff; |
258 |
+- } |
259 |
+- } |
260 |
+- } else if (unlikely(status != HV_EWOULDBLOCK)) |
261 |
+- goto fatal_mondo_error; |
262 |
++ /* or, was any target cpu busy processing other mondos? */ |
263 |
++ target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu)); |
264 |
++ xc_rcvd = CPU_MONDO_COUNTER(first_cpu); |
265 |
+ |
266 |
+- /* Don't bother rewriting the CPU list, just leave the |
267 |
+- * 0xffff and non-0xffff entries in there and the |
268 |
+- * hypervisor will do the right thing. |
269 |
+- * |
270 |
+- * Only advance timeout state if we didn't make any |
271 |
+- * forward progress. |
272 |
++ /* Retry count is for no progress. If we're making progress, |
273 |
++ * reset the retry count. |
274 |
+ */ |
275 |
+- if (unlikely(!forward_progress)) { |
276 |
+- if (unlikely(++retries > 10000)) |
277 |
+- goto fatal_mondo_timeout; |
278 |
+- |
279 |
+- /* Delay a little bit to let other cpus catch up |
280 |
+- * on their cpu mondo queue work. |
281 |
+- */ |
282 |
+- udelay(2 * cnt); |
283 |
++ if (likely(mondo_delivered || target_cpu_busy)) { |
284 |
++ tot_retries += retries; |
285 |
++ retries = 0; |
286 |
++ } else if (unlikely(retries > MONDO_RETRY_LIMIT)) { |
287 |
++ goto fatal_mondo_timeout; |
288 |
+ } |
289 |
+- } while (1); |
290 |
+ |
291 |
+- if (unlikely(saw_cpu_error)) |
292 |
+- goto fatal_mondo_cpu_error; |
293 |
++ /* Delay a little bit to let other cpus catch up on |
294 |
++ * their cpu mondo queue work. |
295 |
++ */ |
296 |
++ if (!mondo_delivered) |
297 |
++ udelay(usec_wait); |
298 |
+ |
299 |
+- return; |
300 |
++ retries++; |
301 |
++ } while (1); |
302 |
+ |
303 |
+-fatal_mondo_cpu_error: |
304 |
+- printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus " |
305 |
+- "(including %d) were in error state\n", |
306 |
+- this_cpu, saw_cpu_error - 1); |
307 |
++xcall_done: |
308 |
++ if (unlikely(ecpuerror_id > 0)) { |
309 |
++ pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n", |
310 |
++ this_cpu, ecpuerror_id - 1); |
311 |
++ } else if (unlikely(enocpu_id > 0)) { |
312 |
++ pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n", |
313 |
++ this_cpu, enocpu_id - 1); |
314 |
++ } |
315 |
+ return; |
316 |
+ |
317 |
++fatal_errors: |
318 |
++ /* fatal errors include bad alignment, etc */ |
319 |
++ pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n", |
320 |
++ this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa); |
321 |
++ panic("Unexpected SUN4V mondo error %lu\n", status); |
322 |
++ |
323 |
+ fatal_mondo_timeout: |
324 |
+- printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward " |
325 |
+- " progress after %d retries.\n", |
326 |
+- this_cpu, retries); |
327 |
+- goto dump_cpu_list_and_out; |
328 |
+- |
329 |
+-fatal_mondo_error: |
330 |
+- printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n", |
331 |
+- this_cpu, status); |
332 |
+- printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) " |
333 |
+- "mondo_block_pa(%lx)\n", |
334 |
+- this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa); |
335 |
+- |
336 |
+-dump_cpu_list_and_out: |
337 |
+- printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu); |
338 |
+- for (i = 0; i < cnt; i++) |
339 |
+- printk("%u ", cpu_list[i]); |
340 |
+- printk("]\n"); |
341 |
++ /* some cpus being non-responsive to the cpu mondo */ |
342 |
++ pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n", |
343 |
++ this_cpu, first_cpu, (tot_retries + retries), tot_cpus); |
344 |
++ panic("SUN4V mondo timeout panic\n"); |
345 |
+ } |
346 |
+ |
347 |
+ static void (*xcall_deliver_impl)(struct trap_per_cpu *, int); |
348 |
+diff --git a/arch/sparc/kernel/sun4v_ivec.S b/arch/sparc/kernel/sun4v_ivec.S |
349 |
+index 559bc5e9c199..34631995859a 100644 |
350 |
+--- a/arch/sparc/kernel/sun4v_ivec.S |
351 |
++++ b/arch/sparc/kernel/sun4v_ivec.S |
352 |
+@@ -26,6 +26,21 @@ sun4v_cpu_mondo: |
353 |
+ ldxa [%g0] ASI_SCRATCHPAD, %g4 |
354 |
+ sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4 |
355 |
+ |
356 |
++ /* Get smp_processor_id() into %g3 */ |
357 |
++ sethi %hi(trap_block), %g5 |
358 |
++ or %g5, %lo(trap_block), %g5 |
359 |
++ sub %g4, %g5, %g3 |
360 |
++ srlx %g3, TRAP_BLOCK_SZ_SHIFT, %g3 |
361 |
++ |
362 |
++ /* Increment cpu_mondo_counter[smp_processor_id()] */ |
363 |
++ sethi %hi(cpu_mondo_counter), %g5 |
364 |
++ or %g5, %lo(cpu_mondo_counter), %g5 |
365 |
++ sllx %g3, 3, %g3 |
366 |
++ add %g5, %g3, %g5 |
367 |
++ ldx [%g5], %g3 |
368 |
++ add %g3, 1, %g3 |
369 |
++ stx %g3, [%g5] |
370 |
++ |
371 |
+ /* Get CPU mondo queue base phys address into %g7. */ |
372 |
+ ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7 |
373 |
+ |
374 |
+diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c |
375 |
+index cc97a43268ee..d883c5951e8b 100644 |
376 |
+--- a/arch/sparc/kernel/traps_64.c |
377 |
++++ b/arch/sparc/kernel/traps_64.c |
378 |
+@@ -2659,6 +2659,7 @@ void do_getpsr(struct pt_regs *regs) |
379 |
+ } |
380 |
+ } |
381 |
+ |
382 |
++u64 cpu_mondo_counter[NR_CPUS] = {0}; |
383 |
+ struct trap_per_cpu trap_block[NR_CPUS]; |
384 |
+ EXPORT_SYMBOL(trap_block); |
385 |
+ |
386 |
+diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c |
387 |
+index 318b8465d302..06ceddb3a22e 100644 |
388 |
+--- a/arch/x86/boot/string.c |
389 |
++++ b/arch/x86/boot/string.c |
390 |
+@@ -14,6 +14,7 @@ |
391 |
+ |
392 |
+ #include <linux/types.h> |
393 |
+ #include "ctype.h" |
394 |
++#include "string.h" |
395 |
+ |
396 |
+ int memcmp(const void *s1, const void *s2, size_t len) |
397 |
+ { |
398 |
+diff --git a/arch/x86/boot/string.h b/arch/x86/boot/string.h |
399 |
+index 725e820602b1..113588ddb43f 100644 |
400 |
+--- a/arch/x86/boot/string.h |
401 |
++++ b/arch/x86/boot/string.h |
402 |
+@@ -18,4 +18,13 @@ int memcmp(const void *s1, const void *s2, size_t len); |
403 |
+ #define memset(d,c,l) __builtin_memset(d,c,l) |
404 |
+ #define memcmp __builtin_memcmp |
405 |
+ |
406 |
++extern int strcmp(const char *str1, const char *str2); |
407 |
++extern int strncmp(const char *cs, const char *ct, size_t count); |
408 |
++extern size_t strlen(const char *s); |
409 |
++extern char *strstr(const char *s1, const char *s2); |
410 |
++extern size_t strnlen(const char *s, size_t maxlen); |
411 |
++extern unsigned int atou(const char *s); |
412 |
++extern unsigned long long simple_strtoull(const char *cp, char **endp, |
413 |
++ unsigned int base); |
414 |
++ |
415 |
+ #endif /* BOOT_STRING_H */ |
416 |
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c |
417 |
+index cec49ecf5f31..32187f8a49b4 100644 |
418 |
+--- a/arch/x86/kernel/kvm.c |
419 |
++++ b/arch/x86/kernel/kvm.c |
420 |
+@@ -151,6 +151,8 @@ void kvm_async_pf_task_wait(u32 token) |
421 |
+ if (hlist_unhashed(&n.link)) |
422 |
+ break; |
423 |
+ |
424 |
++ rcu_irq_exit(); |
425 |
++ |
426 |
+ if (!n.halted) { |
427 |
+ local_irq_enable(); |
428 |
+ schedule(); |
429 |
+@@ -159,11 +161,11 @@ void kvm_async_pf_task_wait(u32 token) |
430 |
+ /* |
431 |
+ * We cannot reschedule. So halt. |
432 |
+ */ |
433 |
+- rcu_irq_exit(); |
434 |
+ native_safe_halt(); |
435 |
+ local_irq_disable(); |
436 |
+- rcu_irq_enter(); |
437 |
+ } |
438 |
++ |
439 |
++ rcu_irq_enter(); |
440 |
+ } |
441 |
+ if (!n.halted) |
442 |
+ finish_wait(&n.wq, &wait); |
443 |
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c |
444 |
+index e417e1a1d02c..5b2aee83d776 100644 |
445 |
+--- a/drivers/ata/libata-scsi.c |
446 |
++++ b/drivers/ata/libata-scsi.c |
447 |
+@@ -2832,10 +2832,12 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) |
448 |
+ static struct ata_device *ata_find_dev(struct ata_port *ap, int devno) |
449 |
+ { |
450 |
+ if (!sata_pmp_attached(ap)) { |
451 |
+- if (likely(devno < ata_link_max_devices(&ap->link))) |
452 |
++ if (likely(devno >= 0 && |
453 |
++ devno < ata_link_max_devices(&ap->link))) |
454 |
+ return &ap->link.device[devno]; |
455 |
+ } else { |
456 |
+- if (likely(devno < ap->nr_pmp_links)) |
457 |
++ if (likely(devno >= 0 && |
458 |
++ devno < ap->nr_pmp_links)) |
459 |
+ return &ap->pmp_link[devno].device[0]; |
460 |
+ } |
461 |
+ |
462 |
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c |
463 |
+index 6ca35495a5be..1e5cd39d0cc2 100644 |
464 |
+--- a/drivers/block/virtio_blk.c |
465 |
++++ b/drivers/block/virtio_blk.c |
466 |
+@@ -641,11 +641,12 @@ static int virtblk_probe(struct virtio_device *vdev) |
467 |
+ if (err) |
468 |
+ goto out_put_disk; |
469 |
+ |
470 |
+- q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set); |
471 |
++ q = blk_mq_init_queue(&vblk->tag_set); |
472 |
+ if (IS_ERR(q)) { |
473 |
+ err = -ENOMEM; |
474 |
+ goto out_free_tags; |
475 |
+ } |
476 |
++ vblk->disk->queue = q; |
477 |
+ |
478 |
+ q->queuedata = vblk; |
479 |
+ |
480 |
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c |
481 |
+index bf4674aa6405..bb9cd35d7fdf 100644 |
482 |
+--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c |
483 |
++++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c |
484 |
+@@ -296,7 +296,7 @@ static int rcar_du_probe(struct platform_device *pdev) |
485 |
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
486 |
+ rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem); |
487 |
+ if (IS_ERR(rcdu->mmio)) |
488 |
+- ret = PTR_ERR(rcdu->mmio); |
489 |
++ return PTR_ERR(rcdu->mmio); |
490 |
+ |
491 |
+ /* DRM/KMS objects */ |
492 |
+ ddev = drm_dev_alloc(&rcar_du_driver, &pdev->dev); |
493 |
+diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c |
494 |
+index 6a81e084593b..2b59d80a09b8 100644 |
495 |
+--- a/drivers/gpu/drm/virtio/virtgpu_fb.c |
496 |
++++ b/drivers/gpu/drm/virtio/virtgpu_fb.c |
497 |
+@@ -338,7 +338,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper, |
498 |
+ info->fbops = &virtio_gpufb_ops; |
499 |
+ info->pixmap.flags = FB_PIXMAP_SYSTEM; |
500 |
+ |
501 |
+- info->screen_base = obj->vmap; |
502 |
++ info->screen_buffer = obj->vmap; |
503 |
+ info->screen_size = obj->gem_base.size; |
504 |
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); |
505 |
+ drm_fb_helper_fill_var(info, &vfbdev->helper, |
506 |
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c |
507 |
+index b0edb66a291b..0b7f5a701c60 100644 |
508 |
+--- a/drivers/infiniband/ulp/isert/ib_isert.c |
509 |
++++ b/drivers/infiniband/ulp/isert/ib_isert.c |
510 |
+@@ -1581,7 +1581,7 @@ isert_rcv_completion(struct iser_rx_desc *desc, |
511 |
+ struct isert_conn *isert_conn, |
512 |
+ u32 xfer_len) |
513 |
+ { |
514 |
+- struct ib_device *ib_dev = isert_conn->cm_id->device; |
515 |
++ struct ib_device *ib_dev = isert_conn->device->ib_device; |
516 |
+ struct iscsi_hdr *hdr; |
517 |
+ u64 rx_dma; |
518 |
+ int rx_buflen; |
519 |
+diff --git a/drivers/media/pci/saa7164/saa7164-bus.c b/drivers/media/pci/saa7164/saa7164-bus.c |
520 |
+index a18fe5d47238..b4857cd7069e 100644 |
521 |
+--- a/drivers/media/pci/saa7164/saa7164-bus.c |
522 |
++++ b/drivers/media/pci/saa7164/saa7164-bus.c |
523 |
+@@ -393,11 +393,11 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg, |
524 |
+ msg_tmp.size = le16_to_cpu((__force __le16)msg_tmp.size); |
525 |
+ msg_tmp.command = le32_to_cpu((__force __le32)msg_tmp.command); |
526 |
+ msg_tmp.controlselector = le16_to_cpu((__force __le16)msg_tmp.controlselector); |
527 |
++ memcpy(msg, &msg_tmp, sizeof(*msg)); |
528 |
+ |
529 |
+ /* No need to update the read positions, because this was a peek */ |
530 |
+ /* If the caller specifically want to peek, return */ |
531 |
+ if (peekonly) { |
532 |
+- memcpy(msg, &msg_tmp, sizeof(*msg)); |
533 |
+ goto peekout; |
534 |
+ } |
535 |
+ |
536 |
+@@ -442,21 +442,15 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg, |
537 |
+ space_rem = bus->m_dwSizeGetRing - curr_grp; |
538 |
+ |
539 |
+ if (space_rem < sizeof(*msg)) { |
540 |
+- /* msg wraps around the ring */ |
541 |
+- memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, space_rem); |
542 |
+- memcpy_fromio((u8 *)msg + space_rem, bus->m_pdwGetRing, |
543 |
+- sizeof(*msg) - space_rem); |
544 |
+ if (buf) |
545 |
+ memcpy_fromio(buf, bus->m_pdwGetRing + sizeof(*msg) - |
546 |
+ space_rem, buf_size); |
547 |
+ |
548 |
+ } else if (space_rem == sizeof(*msg)) { |
549 |
+- memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg)); |
550 |
+ if (buf) |
551 |
+ memcpy_fromio(buf, bus->m_pdwGetRing, buf_size); |
552 |
+ } else { |
553 |
+ /* Additional data wraps around the ring */ |
554 |
+- memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg)); |
555 |
+ if (buf) { |
556 |
+ memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp + |
557 |
+ sizeof(*msg), space_rem - sizeof(*msg)); |
558 |
+@@ -469,15 +463,10 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg, |
559 |
+ |
560 |
+ } else { |
561 |
+ /* No wrapping */ |
562 |
+- memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg)); |
563 |
+ if (buf) |
564 |
+ memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp + sizeof(*msg), |
565 |
+ buf_size); |
566 |
+ } |
567 |
+- /* Convert from little endian to CPU */ |
568 |
+- msg->size = le16_to_cpu((__force __le16)msg->size); |
569 |
+- msg->command = le32_to_cpu((__force __le32)msg->command); |
570 |
+- msg->controlselector = le16_to_cpu((__force __le16)msg->controlselector); |
571 |
+ |
572 |
+ /* Update the read positions, adjusting the ring */ |
573 |
+ saa7164_writel(bus->m_dwGetReadPos, new_grp); |
574 |
+diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c |
575 |
+index 7767e072d623..1f656a3a84b9 100644 |
576 |
+--- a/drivers/media/platform/davinci/vpfe_capture.c |
577 |
++++ b/drivers/media/platform/davinci/vpfe_capture.c |
578 |
+@@ -1709,27 +1709,9 @@ static long vpfe_param_handler(struct file *file, void *priv, |
579 |
+ |
580 |
+ switch (cmd) { |
581 |
+ case VPFE_CMD_S_CCDC_RAW_PARAMS: |
582 |
++ ret = -EINVAL; |
583 |
+ v4l2_warn(&vpfe_dev->v4l2_dev, |
584 |
+- "VPFE_CMD_S_CCDC_RAW_PARAMS: experimental ioctl\n"); |
585 |
+- if (ccdc_dev->hw_ops.set_params) { |
586 |
+- ret = ccdc_dev->hw_ops.set_params(param); |
587 |
+- if (ret) { |
588 |
+- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, |
589 |
+- "Error setting parameters in CCDC\n"); |
590 |
+- goto unlock_out; |
591 |
+- } |
592 |
+- ret = vpfe_get_ccdc_image_format(vpfe_dev, |
593 |
+- &vpfe_dev->fmt); |
594 |
+- if (ret < 0) { |
595 |
+- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, |
596 |
+- "Invalid image format at CCDC\n"); |
597 |
+- goto unlock_out; |
598 |
+- } |
599 |
+- } else { |
600 |
+- ret = -EINVAL; |
601 |
+- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, |
602 |
+- "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n"); |
603 |
+- } |
604 |
++ "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n"); |
605 |
+ break; |
606 |
+ default: |
607 |
+ ret = -ENOTTY; |
608 |
+diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c |
609 |
+index a32659fcd266..efc21b1da211 100644 |
610 |
+--- a/drivers/media/rc/ir-lirc-codec.c |
611 |
++++ b/drivers/media/rc/ir-lirc-codec.c |
612 |
+@@ -254,7 +254,7 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd, |
613 |
+ return 0; |
614 |
+ |
615 |
+ case LIRC_GET_REC_RESOLUTION: |
616 |
+- val = dev->rx_resolution; |
617 |
++ val = dev->rx_resolution / 1000; |
618 |
+ break; |
619 |
+ |
620 |
+ case LIRC_SET_WIDEBAND_RECEIVER: |
621 |
+diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c |
622 |
+index ecc4a334c507..0a54e7dac0ab 100644 |
623 |
+--- a/drivers/net/ethernet/aurora/nb8800.c |
624 |
++++ b/drivers/net/ethernet/aurora/nb8800.c |
625 |
+@@ -608,7 +608,7 @@ static void nb8800_mac_config(struct net_device *dev) |
626 |
+ mac_mode |= HALF_DUPLEX; |
627 |
+ |
628 |
+ if (gigabit) { |
629 |
+- if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII) |
630 |
++ if (phy_interface_is_rgmii(dev->phydev)) |
631 |
+ mac_mode |= RGMII_MODE; |
632 |
+ |
633 |
+ mac_mode |= GMAC_MODE; |
634 |
+@@ -1295,11 +1295,10 @@ static int nb8800_tangox_init(struct net_device *dev) |
635 |
+ break; |
636 |
+ |
637 |
+ case PHY_INTERFACE_MODE_RGMII: |
638 |
+- pad_mode = PAD_MODE_RGMII; |
639 |
+- break; |
640 |
+- |
641 |
++ case PHY_INTERFACE_MODE_RGMII_ID: |
642 |
++ case PHY_INTERFACE_MODE_RGMII_RXID: |
643 |
+ case PHY_INTERFACE_MODE_RGMII_TXID: |
644 |
+- pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY; |
645 |
++ pad_mode = PAD_MODE_RGMII; |
646 |
+ break; |
647 |
+ |
648 |
+ default: |
649 |
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c |
650 |
+index 21e5b9ed1ead..3613469dc5c6 100644 |
651 |
+--- a/drivers/net/ethernet/broadcom/tg3.c |
652 |
++++ b/drivers/net/ethernet/broadcom/tg3.c |
653 |
+@@ -8722,11 +8722,14 @@ static void tg3_free_consistent(struct tg3 *tp) |
654 |
+ tg3_mem_rx_release(tp); |
655 |
+ tg3_mem_tx_release(tp); |
656 |
+ |
657 |
++ /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */ |
658 |
++ tg3_full_lock(tp, 0); |
659 |
+ if (tp->hw_stats) { |
660 |
+ dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), |
661 |
+ tp->hw_stats, tp->stats_mapping); |
662 |
+ tp->hw_stats = NULL; |
663 |
+ } |
664 |
++ tg3_full_unlock(tp); |
665 |
+ } |
666 |
+ |
667 |
+ /* |
668 |
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c |
669 |
+index cc199063612a..6c66d2979795 100644 |
670 |
+--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c |
671 |
++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c |
672 |
+@@ -630,6 +630,10 @@ static void dump_command(struct mlx5_core_dev *dev, |
673 |
+ pr_debug("\n"); |
674 |
+ } |
675 |
+ |
676 |
++static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg); |
677 |
++static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, |
678 |
++ struct mlx5_cmd_msg *msg); |
679 |
++ |
680 |
+ static void cmd_work_handler(struct work_struct *work) |
681 |
+ { |
682 |
+ struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); |
683 |
+@@ -638,16 +642,27 @@ static void cmd_work_handler(struct work_struct *work) |
684 |
+ struct mlx5_cmd_layout *lay; |
685 |
+ struct semaphore *sem; |
686 |
+ unsigned long flags; |
687 |
++ int alloc_ret; |
688 |
+ |
689 |
+ sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; |
690 |
+ down(sem); |
691 |
+ if (!ent->page_queue) { |
692 |
+- ent->idx = alloc_ent(cmd); |
693 |
+- if (ent->idx < 0) { |
694 |
++ alloc_ret = alloc_ent(cmd); |
695 |
++ if (alloc_ret < 0) { |
696 |
++ if (ent->callback) { |
697 |
++ ent->callback(-EAGAIN, ent->context); |
698 |
++ mlx5_free_cmd_msg(dev, ent->out); |
699 |
++ free_msg(dev, ent->in); |
700 |
++ free_cmd(ent); |
701 |
++ } else { |
702 |
++ ent->ret = -EAGAIN; |
703 |
++ complete(&ent->done); |
704 |
++ } |
705 |
+ mlx5_core_err(dev, "failed to allocate command entry\n"); |
706 |
+ up(sem); |
707 |
+ return; |
708 |
+ } |
709 |
++ ent->idx = alloc_ret; |
710 |
+ } else { |
711 |
+ ent->idx = cmd->max_reg_cmds; |
712 |
+ spin_lock_irqsave(&cmd->alloc_lock, flags); |
713 |
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c |
714 |
+index 4296066a7ad3..479af106aaeb 100644 |
715 |
+--- a/drivers/net/ethernet/renesas/sh_eth.c |
716 |
++++ b/drivers/net/ethernet/renesas/sh_eth.c |
717 |
+@@ -819,6 +819,7 @@ static struct sh_eth_cpu_data r8a7740_data = { |
718 |
+ .rpadir_value = 2 << 16, |
719 |
+ .no_trimd = 1, |
720 |
+ .no_ade = 1, |
721 |
++ .hw_crc = 1, |
722 |
+ .tsu = 1, |
723 |
+ .select_mii = 1, |
724 |
+ .shift_rd0 = 1, |
725 |
+diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c |
726 |
+index bca6a1e72d1d..e1bb802d4a4d 100644 |
727 |
+--- a/drivers/net/irda/mcs7780.c |
728 |
++++ b/drivers/net/irda/mcs7780.c |
729 |
+@@ -141,9 +141,19 @@ static int mcs_set_reg(struct mcs_cb *mcs, __u16 reg, __u16 val) |
730 |
+ static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val) |
731 |
+ { |
732 |
+ struct usb_device *dev = mcs->usbdev; |
733 |
+- int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ, |
734 |
+- MCS_RD_RTYPE, 0, reg, val, 2, |
735 |
+- msecs_to_jiffies(MCS_CTRL_TIMEOUT)); |
736 |
++ void *dmabuf; |
737 |
++ int ret; |
738 |
++ |
739 |
++ dmabuf = kmalloc(sizeof(__u16), GFP_KERNEL); |
740 |
++ if (!dmabuf) |
741 |
++ return -ENOMEM; |
742 |
++ |
743 |
++ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ, |
744 |
++ MCS_RD_RTYPE, 0, reg, dmabuf, 2, |
745 |
++ msecs_to_jiffies(MCS_CTRL_TIMEOUT)); |
746 |
++ |
747 |
++ memcpy(val, dmabuf, sizeof(__u16)); |
748 |
++ kfree(dmabuf); |
749 |
+ |
750 |
+ return ret; |
751 |
+ } |
752 |
+diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c |
753 |
+index 32f10662f4ac..7242dd4b3238 100644 |
754 |
+--- a/drivers/net/phy/dp83867.c |
755 |
++++ b/drivers/net/phy/dp83867.c |
756 |
+@@ -29,6 +29,7 @@ |
757 |
+ #define MII_DP83867_MICR 0x12 |
758 |
+ #define MII_DP83867_ISR 0x13 |
759 |
+ #define DP83867_CTRL 0x1f |
760 |
++#define DP83867_CFG3 0x1e |
761 |
+ |
762 |
+ /* Extended Registers */ |
763 |
+ #define DP83867_RGMIICTL 0x0032 |
764 |
+@@ -89,6 +90,8 @@ static int dp83867_config_intr(struct phy_device *phydev) |
765 |
+ micr_status |= |
766 |
+ (MII_DP83867_MICR_AN_ERR_INT_EN | |
767 |
+ MII_DP83867_MICR_SPEED_CHNG_INT_EN | |
768 |
++ MII_DP83867_MICR_AUTONEG_COMP_INT_EN | |
769 |
++ MII_DP83867_MICR_LINK_STS_CHNG_INT_EN | |
770 |
+ MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN | |
771 |
+ MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN); |
772 |
+ |
773 |
+@@ -184,6 +187,13 @@ static int dp83867_config_init(struct phy_device *phydev) |
774 |
+ DP83867_DEVADDR, phydev->addr, delay); |
775 |
+ } |
776 |
+ |
777 |
++ /* Enable Interrupt output INT_OE in CFG3 register */ |
778 |
++ if (phy_interrupt_is_valid(phydev)) { |
779 |
++ val = phy_read(phydev, DP83867_CFG3); |
780 |
++ val |= BIT(7); |
781 |
++ phy_write(phydev, DP83867_CFG3, val); |
782 |
++ } |
783 |
++ |
784 |
+ return 0; |
785 |
+ } |
786 |
+ |
787 |
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c |
788 |
+index 851c0e121807..49d9f0a789fe 100644 |
789 |
+--- a/drivers/net/phy/phy.c |
790 |
++++ b/drivers/net/phy/phy.c |
791 |
+@@ -541,6 +541,9 @@ void phy_stop_machine(struct phy_device *phydev) |
792 |
+ if (phydev->state > PHY_UP && phydev->state != PHY_HALTED) |
793 |
+ phydev->state = PHY_UP; |
794 |
+ mutex_unlock(&phydev->lock); |
795 |
++ |
796 |
++ /* Now we can run the state machine synchronously */ |
797 |
++ phy_state_machine(&phydev->state_queue.work); |
798 |
+ } |
799 |
+ |
800 |
+ /** |
801 |
+@@ -918,6 +921,15 @@ void phy_state_machine(struct work_struct *work) |
802 |
+ if (old_link != phydev->link) |
803 |
+ phydev->state = PHY_CHANGELINK; |
804 |
+ } |
805 |
++ /* |
806 |
++ * Failsafe: check that nobody set phydev->link=0 between two |
807 |
++ * poll cycles, otherwise we won't leave RUNNING state as long |
808 |
++ * as link remains down. |
809 |
++ */ |
810 |
++ if (!phydev->link && phydev->state == PHY_RUNNING) { |
811 |
++ phydev->state = PHY_CHANGELINK; |
812 |
++ dev_err(&phydev->dev, "no link in PHY_RUNNING\n"); |
813 |
++ } |
814 |
+ break; |
815 |
+ case PHY_CHANGELINK: |
816 |
+ err = phy_read_status(phydev); |
817 |
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c |
818 |
+index 1d1e5f7723ab..8179727d3423 100644 |
819 |
+--- a/drivers/net/phy/phy_device.c |
820 |
++++ b/drivers/net/phy/phy_device.c |
821 |
+@@ -1368,6 +1368,8 @@ static int phy_remove(struct device *dev) |
822 |
+ { |
823 |
+ struct phy_device *phydev = to_phy_device(dev); |
824 |
+ |
825 |
++ cancel_delayed_work_sync(&phydev->state_queue); |
826 |
++ |
827 |
+ mutex_lock(&phydev->lock); |
828 |
+ phydev->state = PHY_DOWN; |
829 |
+ mutex_unlock(&phydev->lock); |
830 |
+diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h |
831 |
+index 0333ab0fd926..34173b5e886f 100644 |
832 |
+--- a/drivers/net/xen-netback/common.h |
833 |
++++ b/drivers/net/xen-netback/common.h |
834 |
+@@ -201,6 +201,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */ |
835 |
+ unsigned long remaining_credit; |
836 |
+ struct timer_list credit_timeout; |
837 |
+ u64 credit_window_start; |
838 |
++ bool rate_limited; |
839 |
+ |
840 |
+ /* Statistics */ |
841 |
+ struct xenvif_stats stats; |
842 |
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c |
843 |
+index e7bd63eb2876..60b26f32d31d 100644 |
844 |
+--- a/drivers/net/xen-netback/interface.c |
845 |
++++ b/drivers/net/xen-netback/interface.c |
846 |
+@@ -105,7 +105,11 @@ static int xenvif_poll(struct napi_struct *napi, int budget) |
847 |
+ |
848 |
+ if (work_done < budget) { |
849 |
+ napi_complete(napi); |
850 |
+- xenvif_napi_schedule_or_enable_events(queue); |
851 |
++ /* If the queue is rate-limited, it shall be |
852 |
++ * rescheduled in the timer callback. |
853 |
++ */ |
854 |
++ if (likely(!queue->rate_limited)) |
855 |
++ xenvif_napi_schedule_or_enable_events(queue); |
856 |
+ } |
857 |
+ |
858 |
+ return work_done; |
859 |
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c |
860 |
+index 1049c34e7d43..72ee1c305cc4 100644 |
861 |
+--- a/drivers/net/xen-netback/netback.c |
862 |
++++ b/drivers/net/xen-netback/netback.c |
863 |
+@@ -687,6 +687,7 @@ static void tx_add_credit(struct xenvif_queue *queue) |
864 |
+ max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */ |
865 |
+ |
866 |
+ queue->remaining_credit = min(max_credit, max_burst); |
867 |
++ queue->rate_limited = false; |
868 |
+ } |
869 |
+ |
870 |
+ void xenvif_tx_credit_callback(unsigned long data) |
871 |
+@@ -1184,8 +1185,10 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size) |
872 |
+ msecs_to_jiffies(queue->credit_usec / 1000); |
873 |
+ |
874 |
+ /* Timer could already be pending in rare cases. */ |
875 |
+- if (timer_pending(&queue->credit_timeout)) |
876 |
++ if (timer_pending(&queue->credit_timeout)) { |
877 |
++ queue->rate_limited = true; |
878 |
+ return true; |
879 |
++ } |
880 |
+ |
881 |
+ /* Passed the point where we can replenish credit? */ |
882 |
+ if (time_after_eq64(now, next_credit)) { |
883 |
+@@ -1200,6 +1203,7 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size) |
884 |
+ mod_timer(&queue->credit_timeout, |
885 |
+ next_credit); |
886 |
+ queue->credit_window_start = next_credit; |
887 |
++ queue->rate_limited = true; |
888 |
+ |
889 |
+ return true; |
890 |
+ } |
891 |
+diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c |
892 |
+index 6b942d9e5b74..1ed85dfc008d 100644 |
893 |
+--- a/drivers/scsi/qla2xxx/qla_attr.c |
894 |
++++ b/drivers/scsi/qla2xxx/qla_attr.c |
895 |
+@@ -329,12 +329,15 @@ qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj, |
896 |
+ struct qla_hw_data *ha = vha->hw; |
897 |
+ ssize_t rval = 0; |
898 |
+ |
899 |
++ mutex_lock(&ha->optrom_mutex); |
900 |
++ |
901 |
+ if (ha->optrom_state != QLA_SREADING) |
902 |
+- return 0; |
903 |
++ goto out; |
904 |
+ |
905 |
+- mutex_lock(&ha->optrom_mutex); |
906 |
+ rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer, |
907 |
+ ha->optrom_region_size); |
908 |
++ |
909 |
++out: |
910 |
+ mutex_unlock(&ha->optrom_mutex); |
911 |
+ |
912 |
+ return rval; |
913 |
+@@ -349,14 +352,19 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj, |
914 |
+ struct device, kobj))); |
915 |
+ struct qla_hw_data *ha = vha->hw; |
916 |
+ |
917 |
+- if (ha->optrom_state != QLA_SWRITING) |
918 |
++ mutex_lock(&ha->optrom_mutex); |
919 |
++ |
920 |
++ if (ha->optrom_state != QLA_SWRITING) { |
921 |
++ mutex_unlock(&ha->optrom_mutex); |
922 |
+ return -EINVAL; |
923 |
+- if (off > ha->optrom_region_size) |
924 |
++ } |
925 |
++ if (off > ha->optrom_region_size) { |
926 |
++ mutex_unlock(&ha->optrom_mutex); |
927 |
+ return -ERANGE; |
928 |
++ } |
929 |
+ if (off + count > ha->optrom_region_size) |
930 |
+ count = ha->optrom_region_size - off; |
931 |
+ |
932 |
+- mutex_lock(&ha->optrom_mutex); |
933 |
+ memcpy(&ha->optrom_buffer[off], buf, count); |
934 |
+ mutex_unlock(&ha->optrom_mutex); |
935 |
+ |
936 |
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c |
937 |
+index a180c000e246..31d5d9c0e10b 100644 |
938 |
+--- a/drivers/target/iscsi/iscsi_target.c |
939 |
++++ b/drivers/target/iscsi/iscsi_target.c |
940 |
+@@ -3965,6 +3965,8 @@ int iscsi_target_tx_thread(void *arg) |
941 |
+ { |
942 |
+ int ret = 0; |
943 |
+ struct iscsi_conn *conn = arg; |
944 |
++ bool conn_freed = false; |
945 |
++ |
946 |
+ /* |
947 |
+ * Allow ourselves to be interrupted by SIGINT so that a |
948 |
+ * connection recovery / failure event can be triggered externally. |
949 |
+@@ -3990,12 +3992,14 @@ get_immediate: |
950 |
+ goto transport_err; |
951 |
+ |
952 |
+ ret = iscsit_handle_response_queue(conn); |
953 |
+- if (ret == 1) |
954 |
++ if (ret == 1) { |
955 |
+ goto get_immediate; |
956 |
+- else if (ret == -ECONNRESET) |
957 |
++ } else if (ret == -ECONNRESET) { |
958 |
++ conn_freed = true; |
959 |
+ goto out; |
960 |
+- else if (ret < 0) |
961 |
++ } else if (ret < 0) { |
962 |
+ goto transport_err; |
963 |
++ } |
964 |
+ } |
965 |
+ |
966 |
+ transport_err: |
967 |
+@@ -4005,8 +4009,13 @@ transport_err: |
968 |
+ * responsible for cleaning up the early connection failure. |
969 |
+ */ |
970 |
+ if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN) |
971 |
+- iscsit_take_action_for_connection_exit(conn); |
972 |
++ iscsit_take_action_for_connection_exit(conn, &conn_freed); |
973 |
+ out: |
974 |
++ if (!conn_freed) { |
975 |
++ while (!kthread_should_stop()) { |
976 |
++ msleep(100); |
977 |
++ } |
978 |
++ } |
979 |
+ return 0; |
980 |
+ } |
981 |
+ |
982 |
+@@ -4105,6 +4114,7 @@ int iscsi_target_rx_thread(void *arg) |
983 |
+ u32 checksum = 0, digest = 0; |
984 |
+ struct iscsi_conn *conn = arg; |
985 |
+ struct kvec iov; |
986 |
++ bool conn_freed = false; |
987 |
+ /* |
988 |
+ * Allow ourselves to be interrupted by SIGINT so that a |
989 |
+ * connection recovery / failure event can be triggered externally. |
990 |
+@@ -4116,7 +4126,7 @@ int iscsi_target_rx_thread(void *arg) |
991 |
+ */ |
992 |
+ rc = wait_for_completion_interruptible(&conn->rx_login_comp); |
993 |
+ if (rc < 0 || iscsi_target_check_conn_state(conn)) |
994 |
+- return 0; |
995 |
++ goto out; |
996 |
+ |
997 |
+ if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { |
998 |
+ struct completion comp; |
999 |
+@@ -4201,7 +4211,13 @@ int iscsi_target_rx_thread(void *arg) |
1000 |
+ transport_err: |
1001 |
+ if (!signal_pending(current)) |
1002 |
+ atomic_set(&conn->transport_failed, 1); |
1003 |
+- iscsit_take_action_for_connection_exit(conn); |
1004 |
++ iscsit_take_action_for_connection_exit(conn, &conn_freed); |
1005 |
++out: |
1006 |
++ if (!conn_freed) { |
1007 |
++ while (!kthread_should_stop()) { |
1008 |
++ msleep(100); |
1009 |
++ } |
1010 |
++ } |
1011 |
+ return 0; |
1012 |
+ } |
1013 |
+ |
1014 |
+@@ -4575,8 +4591,11 @@ static void iscsit_logout_post_handler_closesession( |
1015 |
+ * always sleep waiting for RX/TX thread shutdown to complete |
1016 |
+ * within iscsit_close_connection(). |
1017 |
+ */ |
1018 |
+- if (conn->conn_transport->transport_type == ISCSI_TCP) |
1019 |
++ if (conn->conn_transport->transport_type == ISCSI_TCP) { |
1020 |
+ sleep = cmpxchg(&conn->tx_thread_active, true, false); |
1021 |
++ if (!sleep) |
1022 |
++ return; |
1023 |
++ } |
1024 |
+ |
1025 |
+ atomic_set(&conn->conn_logout_remove, 0); |
1026 |
+ complete(&conn->conn_logout_comp); |
1027 |
+@@ -4592,8 +4611,11 @@ static void iscsit_logout_post_handler_samecid( |
1028 |
+ { |
1029 |
+ int sleep = 1; |
1030 |
+ |
1031 |
+- if (conn->conn_transport->transport_type == ISCSI_TCP) |
1032 |
++ if (conn->conn_transport->transport_type == ISCSI_TCP) { |
1033 |
+ sleep = cmpxchg(&conn->tx_thread_active, true, false); |
1034 |
++ if (!sleep) |
1035 |
++ return; |
1036 |
++ } |
1037 |
+ |
1038 |
+ atomic_set(&conn->conn_logout_remove, 0); |
1039 |
+ complete(&conn->conn_logout_comp); |
1040 |
+diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c |
1041 |
+index 210f6e4830e3..6c88fb021444 100644 |
1042 |
+--- a/drivers/target/iscsi/iscsi_target_erl0.c |
1043 |
++++ b/drivers/target/iscsi/iscsi_target_erl0.c |
1044 |
+@@ -930,8 +930,10 @@ static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn) |
1045 |
+ } |
1046 |
+ } |
1047 |
+ |
1048 |
+-void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) |
1049 |
++void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn, bool *conn_freed) |
1050 |
+ { |
1051 |
++ *conn_freed = false; |
1052 |
++ |
1053 |
+ spin_lock_bh(&conn->state_lock); |
1054 |
+ if (atomic_read(&conn->connection_exit)) { |
1055 |
+ spin_unlock_bh(&conn->state_lock); |
1056 |
+@@ -942,6 +944,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) |
1057 |
+ if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { |
1058 |
+ spin_unlock_bh(&conn->state_lock); |
1059 |
+ iscsit_close_connection(conn); |
1060 |
++ *conn_freed = true; |
1061 |
+ return; |
1062 |
+ } |
1063 |
+ |
1064 |
+@@ -955,4 +958,5 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) |
1065 |
+ spin_unlock_bh(&conn->state_lock); |
1066 |
+ |
1067 |
+ iscsit_handle_connection_cleanup(conn); |
1068 |
++ *conn_freed = true; |
1069 |
+ } |
1070 |
+diff --git a/drivers/target/iscsi/iscsi_target_erl0.h b/drivers/target/iscsi/iscsi_target_erl0.h |
1071 |
+index a9e2f9497fb2..fbc1d84a63c3 100644 |
1072 |
+--- a/drivers/target/iscsi/iscsi_target_erl0.h |
1073 |
++++ b/drivers/target/iscsi/iscsi_target_erl0.h |
1074 |
+@@ -9,6 +9,6 @@ extern int iscsit_stop_time2retain_timer(struct iscsi_session *); |
1075 |
+ extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *); |
1076 |
+ extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int); |
1077 |
+ extern void iscsit_fall_back_to_erl0(struct iscsi_session *); |
1078 |
+-extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *); |
1079 |
++extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *, bool *); |
1080 |
+ |
1081 |
+ #endif /*** ISCSI_TARGET_ERL0_H ***/ |
1082 |
+diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c |
1083 |
+index 4a137b0ae3dc..b19edffa7d98 100644 |
1084 |
+--- a/drivers/target/iscsi/iscsi_target_login.c |
1085 |
++++ b/drivers/target/iscsi/iscsi_target_login.c |
1086 |
+@@ -1436,5 +1436,9 @@ int iscsi_target_login_thread(void *arg) |
1087 |
+ break; |
1088 |
+ } |
1089 |
+ |
1090 |
++ while (!kthread_should_stop()) { |
1091 |
++ msleep(100); |
1092 |
++ } |
1093 |
++ |
1094 |
+ return 0; |
1095 |
+ } |
1096 |
+diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c |
1097 |
+index 549a2bbbf4df..58c629aec73c 100644 |
1098 |
+--- a/drivers/target/iscsi/iscsi_target_nego.c |
1099 |
++++ b/drivers/target/iscsi/iscsi_target_nego.c |
1100 |
+@@ -489,14 +489,60 @@ static void iscsi_target_restore_sock_callbacks(struct iscsi_conn *conn) |
1101 |
+ |
1102 |
+ static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *); |
1103 |
+ |
1104 |
+-static bool iscsi_target_sk_state_check(struct sock *sk) |
1105 |
++static bool __iscsi_target_sk_check_close(struct sock *sk) |
1106 |
+ { |
1107 |
+ if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) { |
1108 |
+- pr_debug("iscsi_target_sk_state_check: TCP_CLOSE_WAIT|TCP_CLOSE," |
1109 |
++ pr_debug("__iscsi_target_sk_check_close: TCP_CLOSE_WAIT|TCP_CLOSE," |
1110 |
+ "returning FALSE\n"); |
1111 |
+- return false; |
1112 |
++ return true; |
1113 |
+ } |
1114 |
+- return true; |
1115 |
++ return false; |
1116 |
++} |
1117 |
++ |
1118 |
++static bool iscsi_target_sk_check_close(struct iscsi_conn *conn) |
1119 |
++{ |
1120 |
++ bool state = false; |
1121 |
++ |
1122 |
++ if (conn->sock) { |
1123 |
++ struct sock *sk = conn->sock->sk; |
1124 |
++ |
1125 |
++ read_lock_bh(&sk->sk_callback_lock); |
1126 |
++ state = (__iscsi_target_sk_check_close(sk) || |
1127 |
++ test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)); |
1128 |
++ read_unlock_bh(&sk->sk_callback_lock); |
1129 |
++ } |
1130 |
++ return state; |
1131 |
++} |
1132 |
++ |
1133 |
++static bool iscsi_target_sk_check_flag(struct iscsi_conn *conn, unsigned int flag) |
1134 |
++{ |
1135 |
++ bool state = false; |
1136 |
++ |
1137 |
++ if (conn->sock) { |
1138 |
++ struct sock *sk = conn->sock->sk; |
1139 |
++ |
1140 |
++ read_lock_bh(&sk->sk_callback_lock); |
1141 |
++ state = test_bit(flag, &conn->login_flags); |
1142 |
++ read_unlock_bh(&sk->sk_callback_lock); |
1143 |
++ } |
1144 |
++ return state; |
1145 |
++} |
1146 |
++ |
1147 |
++static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned int flag) |
1148 |
++{ |
1149 |
++ bool state = false; |
1150 |
++ |
1151 |
++ if (conn->sock) { |
1152 |
++ struct sock *sk = conn->sock->sk; |
1153 |
++ |
1154 |
++ write_lock_bh(&sk->sk_callback_lock); |
1155 |
++ state = (__iscsi_target_sk_check_close(sk) || |
1156 |
++ test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)); |
1157 |
++ if (!state) |
1158 |
++ clear_bit(flag, &conn->login_flags); |
1159 |
++ write_unlock_bh(&sk->sk_callback_lock); |
1160 |
++ } |
1161 |
++ return state; |
1162 |
+ } |
1163 |
+ |
1164 |
+ static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login) |
1165 |
+@@ -536,6 +582,20 @@ static void iscsi_target_do_login_rx(struct work_struct *work) |
1166 |
+ |
1167 |
+ pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n", |
1168 |
+ conn, current->comm, current->pid); |
1169 |
++ /* |
1170 |
++ * If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready() |
1171 |
++ * before initial PDU processing in iscsi_target_start_negotiation() |
1172 |
++ * has completed, go ahead and retry until it's cleared. |
1173 |
++ * |
1174 |
++ * Otherwise if the TCP connection drops while this is occuring, |
1175 |
++ * iscsi_target_start_negotiation() will detect the failure, call |
1176 |
++ * cancel_delayed_work_sync(&conn->login_work), and cleanup the |
1177 |
++ * remaining iscsi connection resources from iscsi_np process context. |
1178 |
++ */ |
1179 |
++ if (iscsi_target_sk_check_flag(conn, LOGIN_FLAGS_INITIAL_PDU)) { |
1180 |
++ schedule_delayed_work(&conn->login_work, msecs_to_jiffies(10)); |
1181 |
++ return; |
1182 |
++ } |
1183 |
+ |
1184 |
+ spin_lock(&tpg->tpg_state_lock); |
1185 |
+ state = (tpg->tpg_state == TPG_STATE_ACTIVE); |
1186 |
+@@ -543,26 +603,12 @@ static void iscsi_target_do_login_rx(struct work_struct *work) |
1187 |
+ |
1188 |
+ if (!state) { |
1189 |
+ pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n"); |
1190 |
+- iscsi_target_restore_sock_callbacks(conn); |
1191 |
+- iscsi_target_login_drop(conn, login); |
1192 |
+- iscsit_deaccess_np(np, tpg, tpg_np); |
1193 |
+- return; |
1194 |
++ goto err; |
1195 |
+ } |
1196 |
+ |
1197 |
+- if (conn->sock) { |
1198 |
+- struct sock *sk = conn->sock->sk; |
1199 |
+- |
1200 |
+- read_lock_bh(&sk->sk_callback_lock); |
1201 |
+- state = iscsi_target_sk_state_check(sk); |
1202 |
+- read_unlock_bh(&sk->sk_callback_lock); |
1203 |
+- |
1204 |
+- if (!state) { |
1205 |
+- pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n"); |
1206 |
+- iscsi_target_restore_sock_callbacks(conn); |
1207 |
+- iscsi_target_login_drop(conn, login); |
1208 |
+- iscsit_deaccess_np(np, tpg, tpg_np); |
1209 |
+- return; |
1210 |
+- } |
1211 |
++ if (iscsi_target_sk_check_close(conn)) { |
1212 |
++ pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n"); |
1213 |
++ goto err; |
1214 |
+ } |
1215 |
+ |
1216 |
+ conn->login_kworker = current; |
1217 |
+@@ -580,34 +626,29 @@ static void iscsi_target_do_login_rx(struct work_struct *work) |
1218 |
+ flush_signals(current); |
1219 |
+ conn->login_kworker = NULL; |
1220 |
+ |
1221 |
+- if (rc < 0) { |
1222 |
+- iscsi_target_restore_sock_callbacks(conn); |
1223 |
+- iscsi_target_login_drop(conn, login); |
1224 |
+- iscsit_deaccess_np(np, tpg, tpg_np); |
1225 |
+- return; |
1226 |
+- } |
1227 |
++ if (rc < 0) |
1228 |
++ goto err; |
1229 |
+ |
1230 |
+ pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n", |
1231 |
+ conn, current->comm, current->pid); |
1232 |
+ |
1233 |
+ rc = iscsi_target_do_login(conn, login); |
1234 |
+ if (rc < 0) { |
1235 |
+- iscsi_target_restore_sock_callbacks(conn); |
1236 |
+- iscsi_target_login_drop(conn, login); |
1237 |
+- iscsit_deaccess_np(np, tpg, tpg_np); |
1238 |
++ goto err; |
1239 |
+ } else if (!rc) { |
1240 |
+- if (conn->sock) { |
1241 |
+- struct sock *sk = conn->sock->sk; |
1242 |
+- |
1243 |
+- write_lock_bh(&sk->sk_callback_lock); |
1244 |
+- clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags); |
1245 |
+- write_unlock_bh(&sk->sk_callback_lock); |
1246 |
+- } |
1247 |
++ if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_READ_ACTIVE)) |
1248 |
++ goto err; |
1249 |
+ } else if (rc == 1) { |
1250 |
+ iscsi_target_nego_release(conn); |
1251 |
+ iscsi_post_login_handler(np, conn, zero_tsih); |
1252 |
+ iscsit_deaccess_np(np, tpg, tpg_np); |
1253 |
+ } |
1254 |
++ return; |
1255 |
++ |
1256 |
++err: |
1257 |
++ iscsi_target_restore_sock_callbacks(conn); |
1258 |
++ iscsi_target_login_drop(conn, login); |
1259 |
++ iscsit_deaccess_np(np, tpg, tpg_np); |
1260 |
+ } |
1261 |
+ |
1262 |
+ static void iscsi_target_do_cleanup(struct work_struct *work) |
1263 |
+@@ -655,31 +696,54 @@ static void iscsi_target_sk_state_change(struct sock *sk) |
1264 |
+ orig_state_change(sk); |
1265 |
+ return; |
1266 |
+ } |
1267 |
++ state = __iscsi_target_sk_check_close(sk); |
1268 |
++ pr_debug("__iscsi_target_sk_close_change: state: %d\n", state); |
1269 |
++ |
1270 |
+ if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) { |
1271 |
+ pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change" |
1272 |
+ " conn: %p\n", conn); |
1273 |
++ if (state) |
1274 |
++ set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags); |
1275 |
+ write_unlock_bh(&sk->sk_callback_lock); |
1276 |
+ orig_state_change(sk); |
1277 |
+ return; |
1278 |
+ } |
1279 |
+- if (test_and_set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) { |
1280 |
++ if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) { |
1281 |
+ pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n", |
1282 |
+ conn); |
1283 |
+ write_unlock_bh(&sk->sk_callback_lock); |
1284 |
+ orig_state_change(sk); |
1285 |
+ return; |
1286 |
+ } |
1287 |
++ /* |
1288 |
++ * If the TCP connection has dropped, go ahead and set LOGIN_FLAGS_CLOSED, |
1289 |
++ * but only queue conn->login_work -> iscsi_target_do_login_rx() |
1290 |
++ * processing if LOGIN_FLAGS_INITIAL_PDU has already been cleared. |
1291 |
++ * |
1292 |
++ * When iscsi_target_do_login_rx() runs, iscsi_target_sk_check_close() |
1293 |
++ * will detect the dropped TCP connection from delayed workqueue context. |
1294 |
++ * |
1295 |
++ * If LOGIN_FLAGS_INITIAL_PDU is still set, which means the initial |
1296 |
++ * iscsi_target_start_negotiation() is running, iscsi_target_do_login() |
1297 |
++ * via iscsi_target_sk_check_close() or iscsi_target_start_negotiation() |
1298 |
++ * via iscsi_target_sk_check_and_clear() is responsible for detecting the |
1299 |
++ * dropped TCP connection in iscsi_np process context, and cleaning up |
1300 |
++ * the remaining iscsi connection resources. |
1301 |
++ */ |
1302 |
++ if (state) { |
1303 |
++ pr_debug("iscsi_target_sk_state_change got failed state\n"); |
1304 |
++ set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags); |
1305 |
++ state = test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags); |
1306 |
++ write_unlock_bh(&sk->sk_callback_lock); |
1307 |
+ |
1308 |
+- state = iscsi_target_sk_state_check(sk); |
1309 |
+- write_unlock_bh(&sk->sk_callback_lock); |
1310 |
+- |
1311 |
+- pr_debug("iscsi_target_sk_state_change: state: %d\n", state); |
1312 |
++ orig_state_change(sk); |
1313 |
+ |
1314 |
+- if (!state) { |
1315 |
+- pr_debug("iscsi_target_sk_state_change got failed state\n"); |
1316 |
+- schedule_delayed_work(&conn->login_cleanup_work, 0); |
1317 |
++ if (!state) |
1318 |
++ schedule_delayed_work(&conn->login_work, 0); |
1319 |
+ return; |
1320 |
+ } |
1321 |
++ write_unlock_bh(&sk->sk_callback_lock); |
1322 |
++ |
1323 |
+ orig_state_change(sk); |
1324 |
+ } |
1325 |
+ |
1326 |
+@@ -944,6 +1008,15 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo |
1327 |
+ if (iscsi_target_handle_csg_one(conn, login) < 0) |
1328 |
+ return -1; |
1329 |
+ if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) { |
1330 |
++ /* |
1331 |
++ * Check to make sure the TCP connection has not |
1332 |
++ * dropped asynchronously while session reinstatement |
1333 |
++ * was occuring in this kthread context, before |
1334 |
++ * transitioning to full feature phase operation. |
1335 |
++ */ |
1336 |
++ if (iscsi_target_sk_check_close(conn)) |
1337 |
++ return -1; |
1338 |
++ |
1339 |
+ login->tsih = conn->sess->tsih; |
1340 |
+ login->login_complete = 1; |
1341 |
+ iscsi_target_restore_sock_callbacks(conn); |
1342 |
+@@ -970,21 +1043,6 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo |
1343 |
+ break; |
1344 |
+ } |
1345 |
+ |
1346 |
+- if (conn->sock) { |
1347 |
+- struct sock *sk = conn->sock->sk; |
1348 |
+- bool state; |
1349 |
+- |
1350 |
+- read_lock_bh(&sk->sk_callback_lock); |
1351 |
+- state = iscsi_target_sk_state_check(sk); |
1352 |
+- read_unlock_bh(&sk->sk_callback_lock); |
1353 |
+- |
1354 |
+- if (!state) { |
1355 |
+- pr_debug("iscsi_target_do_login() failed state for" |
1356 |
+- " conn: %p\n", conn); |
1357 |
+- return -1; |
1358 |
+- } |
1359 |
+- } |
1360 |
+- |
1361 |
+ return 0; |
1362 |
+ } |
1363 |
+ |
1364 |
+@@ -1248,16 +1306,28 @@ int iscsi_target_start_negotiation( |
1365 |
+ { |
1366 |
+ int ret; |
1367 |
+ |
1368 |
++ if (conn->sock) { |
1369 |
++ struct sock *sk = conn->sock->sk; |
1370 |
++ |
1371 |
++ write_lock_bh(&sk->sk_callback_lock); |
1372 |
++ set_bit(LOGIN_FLAGS_READY, &conn->login_flags); |
1373 |
++ set_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags); |
1374 |
++ write_unlock_bh(&sk->sk_callback_lock); |
1375 |
++ } |
1376 |
++ /* |
1377 |
++ * If iscsi_target_do_login returns zero to signal more PDU |
1378 |
++ * exchanges are required to complete the login, go ahead and |
1379 |
++ * clear LOGIN_FLAGS_INITIAL_PDU but only if the TCP connection |
1380 |
++ * is still active. |
1381 |
++ * |
1382 |
++ * Otherwise if TCP connection dropped asynchronously, go ahead |
1383 |
++ * and perform connection cleanup now. |
1384 |
++ */ |
1385 |
+ ret = iscsi_target_do_login(conn, login); |
1386 |
+- if (!ret) { |
1387 |
+- if (conn->sock) { |
1388 |
+- struct sock *sk = conn->sock->sk; |
1389 |
++ if (!ret && iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU)) |
1390 |
++ ret = -1; |
1391 |
+ |
1392 |
+- write_lock_bh(&sk->sk_callback_lock); |
1393 |
+- set_bit(LOGIN_FLAGS_READY, &conn->login_flags); |
1394 |
+- write_unlock_bh(&sk->sk_callback_lock); |
1395 |
+- } |
1396 |
+- } else if (ret < 0) { |
1397 |
++ if (ret < 0) { |
1398 |
+ cancel_delayed_work_sync(&conn->login_work); |
1399 |
+ cancel_delayed_work_sync(&conn->login_cleanup_work); |
1400 |
+ iscsi_target_restore_sock_callbacks(conn); |
1401 |
+diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c |
1402 |
+index f916d18ccb48..b070ddf1dc37 100644 |
1403 |
+--- a/drivers/target/target_core_fabric_configfs.c |
1404 |
++++ b/drivers/target/target_core_fabric_configfs.c |
1405 |
+@@ -92,6 +92,11 @@ static int target_fabric_mappedlun_link( |
1406 |
+ pr_err("Source se_lun->lun_se_dev does not exist\n"); |
1407 |
+ return -EINVAL; |
1408 |
+ } |
1409 |
++ if (lun->lun_shutdown) { |
1410 |
++ pr_err("Unable to create mappedlun symlink because" |
1411 |
++ " lun->lun_shutdown=true\n"); |
1412 |
++ return -EINVAL; |
1413 |
++ } |
1414 |
+ se_tpg = lun->lun_tpg; |
1415 |
+ |
1416 |
+ nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item; |
1417 |
+diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c |
1418 |
+index 899c33b3c734..f69f4902dc07 100644 |
1419 |
+--- a/drivers/target/target_core_tpg.c |
1420 |
++++ b/drivers/target/target_core_tpg.c |
1421 |
+@@ -673,6 +673,8 @@ void core_tpg_remove_lun( |
1422 |
+ */ |
1423 |
+ struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); |
1424 |
+ |
1425 |
++ lun->lun_shutdown = true; |
1426 |
++ |
1427 |
+ core_clear_lun_from_tpg(lun, tpg); |
1428 |
+ /* |
1429 |
+ * Wait for any active I/O references to percpu se_lun->lun_ref to |
1430 |
+@@ -694,6 +696,8 @@ void core_tpg_remove_lun( |
1431 |
+ } |
1432 |
+ if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) |
1433 |
+ hlist_del_rcu(&lun->link); |
1434 |
++ |
1435 |
++ lun->lun_shutdown = false; |
1436 |
+ mutex_unlock(&tpg->tpg_lun_mutex); |
1437 |
+ |
1438 |
+ percpu_ref_exit(&lun->lun_ref); |
1439 |
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c |
1440 |
+index 8772bfc3415b..45ef9975caec 100644 |
1441 |
+--- a/fs/ext4/file.c |
1442 |
++++ b/fs/ext4/file.c |
1443 |
+@@ -500,6 +500,8 @@ static int ext4_find_unwritten_pgoff(struct inode *inode, |
1444 |
+ lastoff = page_offset(page); |
1445 |
+ bh = head = page_buffers(page); |
1446 |
+ do { |
1447 |
++ if (lastoff + bh->b_size <= startoff) |
1448 |
++ goto next; |
1449 |
+ if (buffer_uptodate(bh) || |
1450 |
+ buffer_unwritten(bh)) { |
1451 |
+ if (whence == SEEK_DATA) |
1452 |
+@@ -514,6 +516,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode, |
1453 |
+ unlock_page(page); |
1454 |
+ goto out; |
1455 |
+ } |
1456 |
++next: |
1457 |
+ lastoff += bh->b_size; |
1458 |
+ bh = bh->b_this_page; |
1459 |
+ } while (bh != head); |
1460 |
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c |
1461 |
+index 34038e3598d5..74516efd874c 100644 |
1462 |
+--- a/fs/ext4/resize.c |
1463 |
++++ b/fs/ext4/resize.c |
1464 |
+@@ -1926,7 +1926,8 @@ retry: |
1465 |
+ n_desc_blocks = o_desc_blocks + |
1466 |
+ le16_to_cpu(es->s_reserved_gdt_blocks); |
1467 |
+ n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb); |
1468 |
+- n_blocks_count = n_group * EXT4_BLOCKS_PER_GROUP(sb); |
1469 |
++ n_blocks_count = (ext4_fsblk_t)n_group * |
1470 |
++ EXT4_BLOCKS_PER_GROUP(sb); |
1471 |
+ n_group--; /* set to last group number */ |
1472 |
+ } |
1473 |
+ |
1474 |
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c |
1475 |
+index 86e1cb899957..4f666368aa85 100644 |
1476 |
+--- a/fs/f2fs/super.c |
1477 |
++++ b/fs/f2fs/super.c |
1478 |
+@@ -1078,6 +1078,8 @@ static int sanity_check_ckpt(struct f2fs_sb_info *sbi) |
1479 |
+ unsigned int total, fsmeta; |
1480 |
+ struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); |
1481 |
+ struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); |
1482 |
++ unsigned int main_segs, blocks_per_seg; |
1483 |
++ int i; |
1484 |
+ |
1485 |
+ total = le32_to_cpu(raw_super->segment_count); |
1486 |
+ fsmeta = le32_to_cpu(raw_super->segment_count_ckpt); |
1487 |
+@@ -1089,6 +1091,20 @@ static int sanity_check_ckpt(struct f2fs_sb_info *sbi) |
1488 |
+ if (unlikely(fsmeta >= total)) |
1489 |
+ return 1; |
1490 |
+ |
1491 |
++ main_segs = le32_to_cpu(raw_super->segment_count_main); |
1492 |
++ blocks_per_seg = sbi->blocks_per_seg; |
1493 |
++ |
1494 |
++ for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) { |
1495 |
++ if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs || |
1496 |
++ le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg) |
1497 |
++ return 1; |
1498 |
++ } |
1499 |
++ for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) { |
1500 |
++ if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs || |
1501 |
++ le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg) |
1502 |
++ return 1; |
1503 |
++ } |
1504 |
++ |
1505 |
+ if (unlikely(f2fs_cp_error(sbi))) { |
1506 |
+ f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck"); |
1507 |
+ return 1; |
1508 |
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h |
1509 |
+index 2ccccbfcd532..36f4695aa604 100644 |
1510 |
+--- a/include/linux/mm_types.h |
1511 |
++++ b/include/linux/mm_types.h |
1512 |
+@@ -503,6 +503,10 @@ struct mm_struct { |
1513 |
+ * PROT_NONE or PROT_NUMA mapped page. |
1514 |
+ */ |
1515 |
+ bool tlb_flush_pending; |
1516 |
++#endif |
1517 |
++#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH |
1518 |
++ /* See flush_tlb_batched_pending() */ |
1519 |
++ bool tlb_flush_batched; |
1520 |
+ #endif |
1521 |
+ struct uprobes_state uprobes_state; |
1522 |
+ #ifdef CONFIG_X86_INTEL_MPX |
1523 |
+diff --git a/include/linux/sched.h b/include/linux/sched.h |
1524 |
+index 352213b360d7..eff7c1fad26f 100644 |
1525 |
+--- a/include/linux/sched.h |
1526 |
++++ b/include/linux/sched.h |
1527 |
+@@ -801,6 +801,16 @@ struct signal_struct { |
1528 |
+ |
1529 |
+ #define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ |
1530 |
+ |
1531 |
++#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \ |
1532 |
++ SIGNAL_STOP_CONTINUED) |
1533 |
++ |
1534 |
++static inline void signal_set_stop_flags(struct signal_struct *sig, |
1535 |
++ unsigned int flags) |
1536 |
++{ |
1537 |
++ WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP)); |
1538 |
++ sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags; |
1539 |
++} |
1540 |
++ |
1541 |
+ /* If true, all threads except ->group_exit_task have pending SIGKILL */ |
1542 |
+ static inline int signal_group_exit(const struct signal_struct *sig) |
1543 |
+ { |
1544 |
+diff --git a/include/linux/slab.h b/include/linux/slab.h |
1545 |
+index 2037a861e367..8a2a9ffaf5de 100644 |
1546 |
+--- a/include/linux/slab.h |
1547 |
++++ b/include/linux/slab.h |
1548 |
+@@ -203,7 +203,7 @@ size_t ksize(const void *); |
1549 |
+ * (PAGE_SIZE*2). Larger requests are passed to the page allocator. |
1550 |
+ */ |
1551 |
+ #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) |
1552 |
+-#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT) |
1553 |
++#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) |
1554 |
+ #ifndef KMALLOC_SHIFT_LOW |
1555 |
+ #define KMALLOC_SHIFT_LOW 3 |
1556 |
+ #endif |
1557 |
+@@ -216,7 +216,7 @@ size_t ksize(const void *); |
1558 |
+ * be allocated from the same page. |
1559 |
+ */ |
1560 |
+ #define KMALLOC_SHIFT_HIGH PAGE_SHIFT |
1561 |
+-#define KMALLOC_SHIFT_MAX 30 |
1562 |
++#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) |
1563 |
+ #ifndef KMALLOC_SHIFT_LOW |
1564 |
+ #define KMALLOC_SHIFT_LOW 3 |
1565 |
+ #endif |
1566 |
+diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h |
1567 |
+index 0197358f1e81..262d5c95dfc8 100644 |
1568 |
+--- a/include/linux/workqueue.h |
1569 |
++++ b/include/linux/workqueue.h |
1570 |
+@@ -311,6 +311,7 @@ enum { |
1571 |
+ |
1572 |
+ __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */ |
1573 |
+ __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */ |
1574 |
++ __WQ_ORDERED_EXPLICIT = 1 << 18, /* internal: alloc_ordered_workqueue() */ |
1575 |
+ |
1576 |
+ WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ |
1577 |
+ WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ |
1578 |
+@@ -408,7 +409,8 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, |
1579 |
+ * Pointer to the allocated workqueue on success, %NULL on failure. |
1580 |
+ */ |
1581 |
+ #define alloc_ordered_workqueue(fmt, flags, args...) \ |
1582 |
+- alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args) |
1583 |
++ alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \ |
1584 |
++ __WQ_ORDERED_EXPLICIT | (flags), 1, ##args) |
1585 |
+ |
1586 |
+ #define create_workqueue(name) \ |
1587 |
+ alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, (name)) |
1588 |
+diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h |
1589 |
+index e0f4109e64c6..c2aa73e5e6bb 100644 |
1590 |
+--- a/include/net/iw_handler.h |
1591 |
++++ b/include/net/iw_handler.h |
1592 |
+@@ -556,7 +556,8 @@ iwe_stream_add_point(struct iw_request_info *info, char *stream, char *ends, |
1593 |
+ memcpy(stream + lcp_len, |
1594 |
+ ((char *) &iwe->u) + IW_EV_POINT_OFF, |
1595 |
+ IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN); |
1596 |
+- memcpy(stream + point_len, extra, iwe->u.data.length); |
1597 |
++ if (iwe->u.data.length && extra) |
1598 |
++ memcpy(stream + point_len, extra, iwe->u.data.length); |
1599 |
+ stream += event_len; |
1600 |
+ } |
1601 |
+ return stream; |
1602 |
+diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h |
1603 |
+index ce13cf20f625..d33b17ba51d2 100644 |
1604 |
+--- a/include/net/sctp/sctp.h |
1605 |
++++ b/include/net/sctp/sctp.h |
1606 |
+@@ -444,6 +444,8 @@ _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member) |
1607 |
+ |
1608 |
+ #define _sctp_walk_params(pos, chunk, end, member)\ |
1609 |
+ for (pos.v = chunk->member;\ |
1610 |
++ (pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) <=\ |
1611 |
++ (void *)chunk + end) &&\ |
1612 |
+ pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\ |
1613 |
+ ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\ |
1614 |
+ pos.v += WORD_ROUND(ntohs(pos.p->length))) |
1615 |
+@@ -454,6 +456,8 @@ _sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length)) |
1616 |
+ #define _sctp_walk_errors(err, chunk_hdr, end)\ |
1617 |
+ for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \ |
1618 |
+ sizeof(sctp_chunkhdr_t));\ |
1619 |
++ ((void *)err + offsetof(sctp_errhdr_t, length) + sizeof(err->length) <=\ |
1620 |
++ (void *)chunk_hdr + end) &&\ |
1621 |
+ (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\ |
1622 |
+ ntohs(err->length) >= sizeof(sctp_errhdr_t); \ |
1623 |
+ err = (sctp_errhdr_t *)((void *)err + WORD_ROUND(ntohs(err->length)))) |
1624 |
+diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h |
1625 |
+index e0efe3fcf739..fdda45f26f75 100644 |
1626 |
+--- a/include/target/iscsi/iscsi_target_core.h |
1627 |
++++ b/include/target/iscsi/iscsi_target_core.h |
1628 |
+@@ -562,6 +562,7 @@ struct iscsi_conn { |
1629 |
+ #define LOGIN_FLAGS_READ_ACTIVE 1 |
1630 |
+ #define LOGIN_FLAGS_CLOSED 2 |
1631 |
+ #define LOGIN_FLAGS_READY 4 |
1632 |
++#define LOGIN_FLAGS_INITIAL_PDU 8 |
1633 |
+ unsigned long login_flags; |
1634 |
+ struct delayed_work login_work; |
1635 |
+ struct delayed_work login_cleanup_work; |
1636 |
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h |
1637 |
+index ed66414b91f0..1adf8739980c 100644 |
1638 |
+--- a/include/target/target_core_base.h |
1639 |
++++ b/include/target/target_core_base.h |
1640 |
+@@ -714,6 +714,7 @@ struct se_lun { |
1641 |
+ #define SE_LUN_LINK_MAGIC 0xffff7771 |
1642 |
+ u32 lun_link_magic; |
1643 |
+ u32 lun_access; |
1644 |
++ bool lun_shutdown; |
1645 |
+ u32 lun_index; |
1646 |
+ |
1647 |
+ /* RELATIVE TARGET PORT IDENTIFER */ |
1648 |
+diff --git a/kernel/signal.c b/kernel/signal.c |
1649 |
+index b92a047ddc82..5d50ea899b6d 100644 |
1650 |
+--- a/kernel/signal.c |
1651 |
++++ b/kernel/signal.c |
1652 |
+@@ -346,7 +346,7 @@ static bool task_participate_group_stop(struct task_struct *task) |
1653 |
+ * fresh group stop. Read comment in do_signal_stop() for details. |
1654 |
+ */ |
1655 |
+ if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { |
1656 |
+- sig->flags = SIGNAL_STOP_STOPPED; |
1657 |
++ signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED); |
1658 |
+ return true; |
1659 |
+ } |
1660 |
+ return false; |
1661 |
+@@ -845,7 +845,7 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force) |
1662 |
+ * will take ->siglock, notice SIGNAL_CLD_MASK, and |
1663 |
+ * notify its parent. See get_signal_to_deliver(). |
1664 |
+ */ |
1665 |
+- signal->flags = why | SIGNAL_STOP_CONTINUED; |
1666 |
++ signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED); |
1667 |
+ signal->group_stop_count = 0; |
1668 |
+ signal->group_exit_code = 0; |
1669 |
+ } |
1670 |
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c |
1671 |
+index 2c2f971f3e75..23231237f2e2 100644 |
1672 |
+--- a/kernel/workqueue.c |
1673 |
++++ b/kernel/workqueue.c |
1674 |
+@@ -3647,8 +3647,12 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq, |
1675 |
+ return -EINVAL; |
1676 |
+ |
1677 |
+ /* creating multiple pwqs breaks ordering guarantee */ |
1678 |
+- if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs))) |
1679 |
+- return -EINVAL; |
1680 |
++ if (!list_empty(&wq->pwqs)) { |
1681 |
++ if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) |
1682 |
++ return -EINVAL; |
1683 |
++ |
1684 |
++ wq->flags &= ~__WQ_ORDERED; |
1685 |
++ } |
1686 |
+ |
1687 |
+ ctx = apply_wqattrs_prepare(wq, attrs); |
1688 |
+ |
1689 |
+@@ -3834,6 +3838,16 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, |
1690 |
+ struct workqueue_struct *wq; |
1691 |
+ struct pool_workqueue *pwq; |
1692 |
+ |
1693 |
++ /* |
1694 |
++ * Unbound && max_active == 1 used to imply ordered, which is no |
1695 |
++ * longer the case on NUMA machines due to per-node pools. While |
1696 |
++ * alloc_ordered_workqueue() is the right way to create an ordered |
1697 |
++ * workqueue, keep the previous behavior to avoid subtle breakages |
1698 |
++ * on NUMA. |
1699 |
++ */ |
1700 |
++ if ((flags & WQ_UNBOUND) && max_active == 1) |
1701 |
++ flags |= __WQ_ORDERED; |
1702 |
++ |
1703 |
+ /* see the comment above the definition of WQ_POWER_EFFICIENT */ |
1704 |
+ if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient) |
1705 |
+ flags |= WQ_UNBOUND; |
1706 |
+@@ -4022,13 +4036,14 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) |
1707 |
+ struct pool_workqueue *pwq; |
1708 |
+ |
1709 |
+ /* disallow meddling with max_active for ordered workqueues */ |
1710 |
+- if (WARN_ON(wq->flags & __WQ_ORDERED)) |
1711 |
++ if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) |
1712 |
+ return; |
1713 |
+ |
1714 |
+ max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); |
1715 |
+ |
1716 |
+ mutex_lock(&wq->mutex); |
1717 |
+ |
1718 |
++ wq->flags &= ~__WQ_ORDERED; |
1719 |
+ wq->saved_max_active = max_active; |
1720 |
+ |
1721 |
+ for_each_pwq(pwq, wq) |
1722 |
+@@ -5154,7 +5169,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq) |
1723 |
+ * attributes breaks ordering guarantee. Disallow exposing ordered |
1724 |
+ * workqueues. |
1725 |
+ */ |
1726 |
+- if (WARN_ON(wq->flags & __WQ_ORDERED)) |
1727 |
++ if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) |
1728 |
+ return -EINVAL; |
1729 |
+ |
1730 |
+ wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL); |
1731 |
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug |
1732 |
+index 8c15b29d5adc..b53b375e14bd 100644 |
1733 |
+--- a/lib/Kconfig.debug |
1734 |
++++ b/lib/Kconfig.debug |
1735 |
+@@ -145,7 +145,7 @@ config DEBUG_INFO_REDUCED |
1736 |
+ |
1737 |
+ config DEBUG_INFO_SPLIT |
1738 |
+ bool "Produce split debuginfo in .dwo files" |
1739 |
+- depends on DEBUG_INFO |
1740 |
++ depends on DEBUG_INFO && !FRV |
1741 |
+ help |
1742 |
+ Generate debug info into separate .dwo files. This significantly |
1743 |
+ reduces the build directory size for builds with DEBUG_INFO, |
1744 |
+diff --git a/mm/internal.h b/mm/internal.h |
1745 |
+index 6979b2bd3227..f63f4393d633 100644 |
1746 |
+--- a/mm/internal.h |
1747 |
++++ b/mm/internal.h |
1748 |
+@@ -453,6 +453,7 @@ struct tlbflush_unmap_batch; |
1749 |
+ #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH |
1750 |
+ void try_to_unmap_flush(void); |
1751 |
+ void try_to_unmap_flush_dirty(void); |
1752 |
++void flush_tlb_batched_pending(struct mm_struct *mm); |
1753 |
+ #else |
1754 |
+ static inline void try_to_unmap_flush(void) |
1755 |
+ { |
1756 |
+@@ -460,6 +461,8 @@ static inline void try_to_unmap_flush(void) |
1757 |
+ static inline void try_to_unmap_flush_dirty(void) |
1758 |
+ { |
1759 |
+ } |
1760 |
+- |
1761 |
++static inline void flush_tlb_batched_pending(struct mm_struct *mm) |
1762 |
++{ |
1763 |
++} |
1764 |
+ #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ |
1765 |
+ #endif /* __MM_INTERNAL_H */ |
1766 |
+diff --git a/mm/memory.c b/mm/memory.c |
1767 |
+index e6fa13484447..9ac55172aa7b 100644 |
1768 |
+--- a/mm/memory.c |
1769 |
++++ b/mm/memory.c |
1770 |
+@@ -1127,6 +1127,7 @@ again: |
1771 |
+ init_rss_vec(rss); |
1772 |
+ start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); |
1773 |
+ pte = start_pte; |
1774 |
++ flush_tlb_batched_pending(mm); |
1775 |
+ arch_enter_lazy_mmu_mode(); |
1776 |
+ do { |
1777 |
+ pte_t ptent = *pte; |
1778 |
+diff --git a/mm/mprotect.c b/mm/mprotect.c |
1779 |
+index ef5be8eaab00..c0b4b2a49462 100644 |
1780 |
+--- a/mm/mprotect.c |
1781 |
++++ b/mm/mprotect.c |
1782 |
+@@ -72,6 +72,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, |
1783 |
+ if (!pte) |
1784 |
+ return 0; |
1785 |
+ |
1786 |
++ flush_tlb_batched_pending(vma->vm_mm); |
1787 |
+ arch_enter_lazy_mmu_mode(); |
1788 |
+ do { |
1789 |
+ oldpte = *pte; |
1790 |
+diff --git a/mm/mremap.c b/mm/mremap.c |
1791 |
+index c25bc6268e46..fe7b7f65f4f4 100644 |
1792 |
+--- a/mm/mremap.c |
1793 |
++++ b/mm/mremap.c |
1794 |
+@@ -135,6 +135,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, |
1795 |
+ new_ptl = pte_lockptr(mm, new_pmd); |
1796 |
+ if (new_ptl != old_ptl) |
1797 |
+ spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); |
1798 |
++ flush_tlb_batched_pending(vma->vm_mm); |
1799 |
+ arch_enter_lazy_mmu_mode(); |
1800 |
+ |
1801 |
+ for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, |
1802 |
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c |
1803 |
+index bd17a6bdf131..f9d648fce8cd 100644 |
1804 |
+--- a/mm/page_alloc.c |
1805 |
++++ b/mm/page_alloc.c |
1806 |
+@@ -1527,14 +1527,14 @@ int move_freepages(struct zone *zone, |
1807 |
+ #endif |
1808 |
+ |
1809 |
+ for (page = start_page; page <= end_page;) { |
1810 |
+- /* Make sure we are not inadvertently changing nodes */ |
1811 |
+- VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); |
1812 |
+- |
1813 |
+ if (!pfn_valid_within(page_to_pfn(page))) { |
1814 |
+ page++; |
1815 |
+ continue; |
1816 |
+ } |
1817 |
+ |
1818 |
++ /* Make sure we are not inadvertently changing nodes */ |
1819 |
++ VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); |
1820 |
++ |
1821 |
+ if (!PageBuddy(page)) { |
1822 |
+ page++; |
1823 |
+ continue; |
1824 |
+@@ -5847,8 +5847,8 @@ unsigned long free_reserved_area(void *start, void *end, int poison, char *s) |
1825 |
+ } |
1826 |
+ |
1827 |
+ if (pages && s) |
1828 |
+- pr_info("Freeing %s memory: %ldK (%p - %p)\n", |
1829 |
+- s, pages << (PAGE_SHIFT - 10), start, end); |
1830 |
++ pr_info("Freeing %s memory: %ldK\n", |
1831 |
++ s, pages << (PAGE_SHIFT - 10)); |
1832 |
+ |
1833 |
+ return pages; |
1834 |
+ } |
1835 |
+diff --git a/mm/rmap.c b/mm/rmap.c |
1836 |
+index b577fbb98d4b..ede183c32f45 100644 |
1837 |
+--- a/mm/rmap.c |
1838 |
++++ b/mm/rmap.c |
1839 |
+@@ -648,6 +648,13 @@ static void set_tlb_ubc_flush_pending(struct mm_struct *mm, |
1840 |
+ cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm)); |
1841 |
+ tlb_ubc->flush_required = true; |
1842 |
+ |
1843 |
++ /* |
1844 |
++ * Ensure compiler does not re-order the setting of tlb_flush_batched |
1845 |
++ * before the PTE is cleared. |
1846 |
++ */ |
1847 |
++ barrier(); |
1848 |
++ mm->tlb_flush_batched = true; |
1849 |
++ |
1850 |
+ /* |
1851 |
+ * If the PTE was dirty then it's best to assume it's writable. The |
1852 |
+ * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() |
1853 |
+@@ -675,6 +682,35 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) |
1854 |
+ |
1855 |
+ return should_defer; |
1856 |
+ } |
1857 |
++ |
1858 |
++/* |
1859 |
++ * Reclaim unmaps pages under the PTL but do not flush the TLB prior to |
1860 |
++ * releasing the PTL if TLB flushes are batched. It's possible for a parallel |
1861 |
++ * operation such as mprotect or munmap to race between reclaim unmapping |
1862 |
++ * the page and flushing the page. If this race occurs, it potentially allows |
1863 |
++ * access to data via a stale TLB entry. Tracking all mm's that have TLB |
1864 |
++ * batching in flight would be expensive during reclaim so instead track |
1865 |
++ * whether TLB batching occurred in the past and if so then do a flush here |
1866 |
++ * if required. This will cost one additional flush per reclaim cycle paid |
1867 |
++ * by the first operation at risk such as mprotect and mumap. |
1868 |
++ * |
1869 |
++ * This must be called under the PTL so that an access to tlb_flush_batched |
1870 |
++ * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise |
1871 |
++ * via the PTL. |
1872 |
++ */ |
1873 |
++void flush_tlb_batched_pending(struct mm_struct *mm) |
1874 |
++{ |
1875 |
++ if (mm->tlb_flush_batched) { |
1876 |
++ flush_tlb_mm(mm); |
1877 |
++ |
1878 |
++ /* |
1879 |
++ * Do not allow the compiler to re-order the clearing of |
1880 |
++ * tlb_flush_batched before the tlb is flushed. |
1881 |
++ */ |
1882 |
++ barrier(); |
1883 |
++ mm->tlb_flush_batched = false; |
1884 |
++ } |
1885 |
++} |
1886 |
+ #else |
1887 |
+ static void set_tlb_ubc_flush_pending(struct mm_struct *mm, |
1888 |
+ struct page *page, bool writable) |
1889 |
+diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c |
1890 |
+index b94b1d293506..151e047ce072 100644 |
1891 |
+--- a/net/core/dev_ioctl.c |
1892 |
++++ b/net/core/dev_ioctl.c |
1893 |
+@@ -28,6 +28,7 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg) |
1894 |
+ |
1895 |
+ if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) |
1896 |
+ return -EFAULT; |
1897 |
++ ifr.ifr_name[IFNAMSIZ-1] = 0; |
1898 |
+ |
1899 |
+ error = netdev_get_name(net, ifr.ifr_name, ifr.ifr_ifindex); |
1900 |
+ if (error) |
1901 |
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c |
1902 |
+index 2ec5324a7ff7..5b3d611d8b5f 100644 |
1903 |
+--- a/net/core/rtnetlink.c |
1904 |
++++ b/net/core/rtnetlink.c |
1905 |
+@@ -1742,7 +1742,8 @@ static int do_setlink(const struct sk_buff *skb, |
1906 |
+ struct sockaddr *sa; |
1907 |
+ int len; |
1908 |
+ |
1909 |
+- len = sizeof(sa_family_t) + dev->addr_len; |
1910 |
++ len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len, |
1911 |
++ sizeof(*sa)); |
1912 |
+ sa = kmalloc(len, GFP_KERNEL); |
1913 |
+ if (!sa) { |
1914 |
+ err = -ENOMEM; |
1915 |
+diff --git a/net/dccp/feat.c b/net/dccp/feat.c |
1916 |
+index 1704948e6a12..f227f002c73d 100644 |
1917 |
+--- a/net/dccp/feat.c |
1918 |
++++ b/net/dccp/feat.c |
1919 |
+@@ -1471,9 +1471,12 @@ int dccp_feat_init(struct sock *sk) |
1920 |
+ * singleton values (which always leads to failure). |
1921 |
+ * These settings can still (later) be overridden via sockopts. |
1922 |
+ */ |
1923 |
+- if (ccid_get_builtin_ccids(&tx.val, &tx.len) || |
1924 |
+- ccid_get_builtin_ccids(&rx.val, &rx.len)) |
1925 |
++ if (ccid_get_builtin_ccids(&tx.val, &tx.len)) |
1926 |
+ return -ENOBUFS; |
1927 |
++ if (ccid_get_builtin_ccids(&rx.val, &rx.len)) { |
1928 |
++ kfree(tx.val); |
1929 |
++ return -ENOBUFS; |
1930 |
++ } |
1931 |
+ |
1932 |
+ if (!dccp_feat_prefer(sysctl_dccp_tx_ccid, tx.val, tx.len) || |
1933 |
+ !dccp_feat_prefer(sysctl_dccp_rx_ccid, rx.val, rx.len)) |
1934 |
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c |
1935 |
+index 6467bf392e1b..e217f17997a4 100644 |
1936 |
+--- a/net/dccp/ipv4.c |
1937 |
++++ b/net/dccp/ipv4.c |
1938 |
+@@ -635,6 +635,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) |
1939 |
+ goto drop_and_free; |
1940 |
+ |
1941 |
+ inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); |
1942 |
++ reqsk_put(req); |
1943 |
+ return 0; |
1944 |
+ |
1945 |
+ drop_and_free: |
1946 |
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c |
1947 |
+index 3470ad1843bb..09a9ab65f4e1 100644 |
1948 |
+--- a/net/dccp/ipv6.c |
1949 |
++++ b/net/dccp/ipv6.c |
1950 |
+@@ -376,6 +376,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) |
1951 |
+ goto drop_and_free; |
1952 |
+ |
1953 |
+ inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); |
1954 |
++ reqsk_put(req); |
1955 |
+ return 0; |
1956 |
+ |
1957 |
+ drop_and_free: |
1958 |
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c |
1959 |
+index 66dcb529fd9c..0cb240c749bf 100644 |
1960 |
+--- a/net/ipv4/fib_frontend.c |
1961 |
++++ b/net/ipv4/fib_frontend.c |
1962 |
+@@ -1319,13 +1319,14 @@ static struct pernet_operations fib_net_ops = { |
1963 |
+ |
1964 |
+ void __init ip_fib_init(void) |
1965 |
+ { |
1966 |
+- rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL); |
1967 |
+- rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL); |
1968 |
+- rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL); |
1969 |
++ fib_trie_init(); |
1970 |
+ |
1971 |
+ register_pernet_subsys(&fib_net_ops); |
1972 |
++ |
1973 |
+ register_netdevice_notifier(&fib_netdev_notifier); |
1974 |
+ register_inetaddr_notifier(&fib_inetaddr_notifier); |
1975 |
+ |
1976 |
+- fib_trie_init(); |
1977 |
++ rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL); |
1978 |
++ rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL); |
1979 |
++ rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL); |
1980 |
+ } |
1981 |
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c |
1982 |
+index 2b7283303650..5d58a6703a43 100644 |
1983 |
+--- a/net/ipv4/ip_output.c |
1984 |
++++ b/net/ipv4/ip_output.c |
1985 |
+@@ -922,7 +922,8 @@ static int __ip_append_data(struct sock *sk, |
1986 |
+ csummode = CHECKSUM_PARTIAL; |
1987 |
+ |
1988 |
+ cork->length += length; |
1989 |
+- if (((length > mtu) || (skb && skb_is_gso(skb))) && |
1990 |
++ if ((((length + (skb ? skb->len : fragheaderlen)) > mtu) || |
1991 |
++ (skb && skb_is_gso(skb))) && |
1992 |
+ (sk->sk_protocol == IPPROTO_UDP) && |
1993 |
+ (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len && |
1994 |
+ (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) { |
1995 |
+diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c |
1996 |
+index 4cbe9f0a4281..731b91409625 100644 |
1997 |
+--- a/net/ipv4/syncookies.c |
1998 |
++++ b/net/ipv4/syncookies.c |
1999 |
+@@ -337,6 +337,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) |
2000 |
+ treq = tcp_rsk(req); |
2001 |
+ treq->rcv_isn = ntohl(th->seq) - 1; |
2002 |
+ treq->snt_isn = cookie; |
2003 |
++ treq->txhash = net_tx_rndhash(); |
2004 |
+ req->mss = mss; |
2005 |
+ ireq->ir_num = ntohs(th->dest); |
2006 |
+ ireq->ir_rmt_port = th->source; |
2007 |
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c |
2008 |
+index 150b4923fb72..0de3245ea42f 100644 |
2009 |
+--- a/net/ipv6/ip6_output.c |
2010 |
++++ b/net/ipv6/ip6_output.c |
2011 |
+@@ -647,8 +647,6 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, |
2012 |
+ *prevhdr = NEXTHDR_FRAGMENT; |
2013 |
+ tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC); |
2014 |
+ if (!tmp_hdr) { |
2015 |
+- IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), |
2016 |
+- IPSTATS_MIB_FRAGFAILS); |
2017 |
+ err = -ENOMEM; |
2018 |
+ goto fail; |
2019 |
+ } |
2020 |
+@@ -767,8 +765,6 @@ slow_path: |
2021 |
+ frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) + |
2022 |
+ hroom + troom, GFP_ATOMIC); |
2023 |
+ if (!frag) { |
2024 |
+- IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), |
2025 |
+- IPSTATS_MIB_FRAGFAILS); |
2026 |
+ err = -ENOMEM; |
2027 |
+ goto fail; |
2028 |
+ } |
2029 |
+@@ -1361,7 +1357,7 @@ emsgsize: |
2030 |
+ */ |
2031 |
+ |
2032 |
+ cork->length += length; |
2033 |
+- if ((((length + fragheaderlen) > mtu) || |
2034 |
++ if ((((length + (skb ? skb->len : headersize)) > mtu) || |
2035 |
+ (skb && skb_is_gso(skb))) && |
2036 |
+ (sk->sk_protocol == IPPROTO_UDP) && |
2037 |
+ (rt->dst.dev->features & NETIF_F_UFO) && |
2038 |
+diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c |
2039 |
+index 8b56c5240429..f9f02581c4ca 100644 |
2040 |
+--- a/net/ipv6/output_core.c |
2041 |
++++ b/net/ipv6/output_core.c |
2042 |
+@@ -78,7 +78,7 @@ EXPORT_SYMBOL(ipv6_select_ident); |
2043 |
+ |
2044 |
+ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) |
2045 |
+ { |
2046 |
+- u16 offset = sizeof(struct ipv6hdr); |
2047 |
++ unsigned int offset = sizeof(struct ipv6hdr); |
2048 |
+ unsigned int packet_len = skb_tail_pointer(skb) - |
2049 |
+ skb_network_header(skb); |
2050 |
+ int found_rhdr = 0; |
2051 |
+@@ -86,6 +86,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) |
2052 |
+ |
2053 |
+ while (offset <= packet_len) { |
2054 |
+ struct ipv6_opt_hdr *exthdr; |
2055 |
++ unsigned int len; |
2056 |
+ |
2057 |
+ switch (**nexthdr) { |
2058 |
+ |
2059 |
+@@ -111,7 +112,10 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) |
2060 |
+ |
2061 |
+ exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + |
2062 |
+ offset); |
2063 |
+- offset += ipv6_optlen(exthdr); |
2064 |
++ len = ipv6_optlen(exthdr); |
2065 |
++ if (len + offset >= IPV6_MAXPLEN) |
2066 |
++ return -EINVAL; |
2067 |
++ offset += len; |
2068 |
+ *nexthdr = &exthdr->nexthdr; |
2069 |
+ } |
2070 |
+ |
2071 |
+diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c |
2072 |
+index eaf7ac496d50..aee87282d352 100644 |
2073 |
+--- a/net/ipv6/syncookies.c |
2074 |
++++ b/net/ipv6/syncookies.c |
2075 |
+@@ -210,6 +210,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) |
2076 |
+ treq->snt_synack.v64 = 0; |
2077 |
+ treq->rcv_isn = ntohl(th->seq) - 1; |
2078 |
+ treq->snt_isn = cookie; |
2079 |
++ treq->txhash = net_tx_rndhash(); |
2080 |
+ |
2081 |
+ /* |
2082 |
+ * We need to lookup the dst_entry to get the correct window size. |
2083 |
+diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c |
2084 |
+index ad58d2a6284e..6a2507f24b0f 100644 |
2085 |
+--- a/net/openvswitch/conntrack.c |
2086 |
++++ b/net/openvswitch/conntrack.c |
2087 |
+@@ -577,8 +577,8 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info, |
2088 |
+ |
2089 |
+ nla_for_each_nested(a, attr, rem) { |
2090 |
+ int type = nla_type(a); |
2091 |
+- int maxlen = ovs_ct_attr_lens[type].maxlen; |
2092 |
+- int minlen = ovs_ct_attr_lens[type].minlen; |
2093 |
++ int maxlen; |
2094 |
++ int minlen; |
2095 |
+ |
2096 |
+ if (type > OVS_CT_ATTR_MAX) { |
2097 |
+ OVS_NLERR(log, |
2098 |
+@@ -586,6 +586,9 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info, |
2099 |
+ type, OVS_CT_ATTR_MAX); |
2100 |
+ return -EINVAL; |
2101 |
+ } |
2102 |
++ |
2103 |
++ maxlen = ovs_ct_attr_lens[type].maxlen; |
2104 |
++ minlen = ovs_ct_attr_lens[type].minlen; |
2105 |
+ if (nla_len(a) < minlen || nla_len(a) > maxlen) { |
2106 |
+ OVS_NLERR(log, |
2107 |
+ "Conntrack attr type has unexpected length (type=%d, length=%d, expected=%d)", |
2108 |
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c |
2109 |
+index f8d6a0ca9c03..061771ca2582 100644 |
2110 |
+--- a/net/packet/af_packet.c |
2111 |
++++ b/net/packet/af_packet.c |
2112 |
+@@ -4225,7 +4225,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, |
2113 |
+ register_prot_hook(sk); |
2114 |
+ } |
2115 |
+ spin_unlock(&po->bind_lock); |
2116 |
+- if (closing && (po->tp_version > TPACKET_V2)) { |
2117 |
++ if (pg_vec && (po->tp_version > TPACKET_V2)) { |
2118 |
+ /* Because we don't support block-based V3 on tx-ring */ |
2119 |
+ if (!tx_ring) |
2120 |
+ prb_shutdown_retire_blk_timer(po, rb_queue); |
2121 |
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c |
2122 |
+index 46a34039ecdc..5cab24f52825 100644 |
2123 |
+--- a/sound/pci/hda/patch_realtek.c |
2124 |
++++ b/sound/pci/hda/patch_realtek.c |
2125 |
+@@ -2233,6 +2233,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { |
2126 |
+ SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3), |
2127 |
+ SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT), |
2128 |
+ SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP), |
2129 |
++ SND_PCI_QUIRK(0x104d, 0x9060, "Sony Vaio VPCL14M1R", ALC882_FIXUP_NO_PRIMARY_HP), |
2130 |
+ SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP), |
2131 |
+ SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP), |
2132 |
+ |
2133 |
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c |
2134 |
+index a1e605bbc465..977066ba1769 100644 |
2135 |
+--- a/sound/soc/soc-pcm.c |
2136 |
++++ b/sound/soc/soc-pcm.c |
2137 |
+@@ -181,6 +181,10 @@ int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir, |
2138 |
+ dev_dbg(be->dev, "ASoC: BE %s event %d dir %d\n", |
2139 |
+ be->dai_link->name, event, dir); |
2140 |
+ |
2141 |
++ if ((event == SND_SOC_DAPM_STREAM_STOP) && |
2142 |
++ (be->dpcm[dir].users >= 1)) |
2143 |
++ continue; |
2144 |
++ |
2145 |
+ snd_soc_dapm_stream_event(be, dir, event); |
2146 |
+ } |
2147 |
+ |