Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Wed, 27 Sep 2017 10:38:37
Message-Id: 1506508701.c6f3d3e1793a3838f6ad87a32e56d8cc22451dee.mpagano@gentoo
1 commit: c6f3d3e1793a3838f6ad87a32e56d8cc22451dee
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Sep 27 10:38:21 2017 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Sep 27 10:38:21 2017 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c6f3d3e1
7
8 Linux patch 4.4.89
9
10 0000_README | 4 +
11 1088_linux-4.4.89.patch | 2820 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 2824 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 44fa891..43c1c6e 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -395,6 +395,10 @@ Patch: 1087_linux-4.4.88.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.4.88
21
22 +Patch: 1088_linux-4.4.89.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.4.89
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1088_linux-4.4.89.patch b/1088_linux-4.4.89.patch
31 new file mode 100644
32 index 0000000..6b0d92e
33 --- /dev/null
34 +++ b/1088_linux-4.4.89.patch
35 @@ -0,0 +1,2820 @@
36 +diff --git a/Makefile b/Makefile
37 +index 788d90a0051b..7e4c46b375b3 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 4
43 +-SUBLEVEL = 88
44 ++SUBLEVEL = 89
45 + EXTRAVERSION =
46 + NAME = Blurry Fish Butt
47 +
48 +diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
49 +index 2efb0625331d..db1eee5fe502 100644
50 +--- a/arch/arc/kernel/entry.S
51 ++++ b/arch/arc/kernel/entry.S
52 +@@ -104,6 +104,12 @@ ENTRY(EV_MachineCheck)
53 + lr r0, [efa]
54 + mov r1, sp
55 +
56 ++ ; hardware auto-disables MMU, re-enable it to allow kernel vaddr
57 ++ ; access for say stack unwinding of modules for crash dumps
58 ++ lr r3, [ARC_REG_PID]
59 ++ or r3, r3, MMU_ENABLE
60 ++ sr r3, [ARC_REG_PID]
61 ++
62 + lsr r3, r2, 8
63 + bmsk r3, r3, 7
64 + brne r3, ECR_C_MCHK_DUP_TLB, 1f
65 +diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
66 +index daf2bf52b984..97e9582dcf99 100644
67 +--- a/arch/arc/mm/tlb.c
68 ++++ b/arch/arc/mm/tlb.c
69 +@@ -885,9 +885,6 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
70 +
71 + local_irq_save(flags);
72 +
73 +- /* re-enable the MMU */
74 +- write_aux_reg(ARC_REG_PID, MMU_ENABLE | read_aux_reg(ARC_REG_PID));
75 +-
76 + /* loop thru all sets of TLB */
77 + for (set = 0; set < mmu->sets; set++) {
78 +
79 +diff --git a/arch/mips/math-emu/dp_fmax.c b/arch/mips/math-emu/dp_fmax.c
80 +index fd71b8daaaf2..5bec64f2884e 100644
81 +--- a/arch/mips/math-emu/dp_fmax.c
82 ++++ b/arch/mips/math-emu/dp_fmax.c
83 +@@ -47,14 +47,26 @@ union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y)
84 + case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
85 + return ieee754dp_nanxcpt(x);
86 +
87 +- /* numbers are preferred to NaNs */
88 ++ /*
89 ++ * Quiet NaN handling
90 ++ */
91 ++
92 ++ /*
93 ++ * The case of both inputs quiet NaNs
94 ++ */
95 ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
96 ++ return x;
97 ++
98 ++ /*
99 ++ * The cases of exactly one input quiet NaN (numbers
100 ++ * are here preferred as returned values to NaNs)
101 ++ */
102 + case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
103 + case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
104 + case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
105 + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
106 + return x;
107 +
108 +- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
109 + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
110 + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
111 + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
112 +@@ -80,9 +92,7 @@ union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y)
113 + return ys ? x : y;
114 +
115 + case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
116 +- if (xs == ys)
117 +- return x;
118 +- return ieee754dp_zero(1);
119 ++ return ieee754dp_zero(xs & ys);
120 +
121 + case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
122 + DPDNORMX;
123 +@@ -106,16 +116,32 @@ union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y)
124 + else if (xs < ys)
125 + return x;
126 +
127 +- /* Compare exponent */
128 +- if (xe > ye)
129 +- return x;
130 +- else if (xe < ye)
131 +- return y;
132 ++ /* Signs of inputs are equal, let's compare exponents */
133 ++ if (xs == 0) {
134 ++ /* Inputs are both positive */
135 ++ if (xe > ye)
136 ++ return x;
137 ++ else if (xe < ye)
138 ++ return y;
139 ++ } else {
140 ++ /* Inputs are both negative */
141 ++ if (xe > ye)
142 ++ return y;
143 ++ else if (xe < ye)
144 ++ return x;
145 ++ }
146 +
147 +- /* Compare mantissa */
148 ++ /* Signs and exponents of inputs are equal, let's compare mantissas */
149 ++ if (xs == 0) {
150 ++ /* Inputs are both positive, with equal signs and exponents */
151 ++ if (xm <= ym)
152 ++ return y;
153 ++ return x;
154 ++ }
155 ++ /* Inputs are both negative, with equal signs and exponents */
156 + if (xm <= ym)
157 +- return y;
158 +- return x;
159 ++ return x;
160 ++ return y;
161 + }
162 +
163 + union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
164 +@@ -147,14 +173,26 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
165 + case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
166 + return ieee754dp_nanxcpt(x);
167 +
168 +- /* numbers are preferred to NaNs */
169 ++ /*
170 ++ * Quiet NaN handling
171 ++ */
172 ++
173 ++ /*
174 ++ * The case of both inputs quiet NaNs
175 ++ */
176 ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
177 ++ return x;
178 ++
179 ++ /*
180 ++ * The cases of exactly one input quiet NaN (numbers
181 ++ * are here preferred as returned values to NaNs)
182 ++ */
183 + case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
184 + case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
185 + case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
186 + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
187 + return x;
188 +
189 +- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
190 + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
191 + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
192 + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
193 +@@ -164,6 +202,9 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
194 + /*
195 + * Infinity and zero handling
196 + */
197 ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
198 ++ return ieee754dp_inf(xs & ys);
199 ++
200 + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
201 + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
202 + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
203 +@@ -171,7 +212,6 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
204 + case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
205 + return x;
206 +
207 +- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
208 + case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
209 + case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
210 + case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
211 +@@ -180,9 +220,7 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
212 + return y;
213 +
214 + case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
215 +- if (xs == ys)
216 +- return x;
217 +- return ieee754dp_zero(1);
218 ++ return ieee754dp_zero(xs & ys);
219 +
220 + case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
221 + DPDNORMX;
222 +@@ -207,7 +245,11 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
223 + return y;
224 +
225 + /* Compare mantissa */
226 +- if (xm <= ym)
227 ++ if (xm < ym)
228 + return y;
229 +- return x;
230 ++ else if (xm > ym)
231 ++ return x;
232 ++ else if (xs == 0)
233 ++ return x;
234 ++ return y;
235 + }
236 +diff --git a/arch/mips/math-emu/dp_fmin.c b/arch/mips/math-emu/dp_fmin.c
237 +index c1072b0dfb95..a287b23818d8 100644
238 +--- a/arch/mips/math-emu/dp_fmin.c
239 ++++ b/arch/mips/math-emu/dp_fmin.c
240 +@@ -47,14 +47,26 @@ union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y)
241 + case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
242 + return ieee754dp_nanxcpt(x);
243 +
244 +- /* numbers are preferred to NaNs */
245 ++ /*
246 ++ * Quiet NaN handling
247 ++ */
248 ++
249 ++ /*
250 ++ * The case of both inputs quiet NaNs
251 ++ */
252 ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
253 ++ return x;
254 ++
255 ++ /*
256 ++ * The cases of exactly one input quiet NaN (numbers
257 ++ * are here preferred as returned values to NaNs)
258 ++ */
259 + case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
260 + case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
261 + case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
262 + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
263 + return x;
264 +
265 +- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
266 + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
267 + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
268 + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
269 +@@ -80,9 +92,7 @@ union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y)
270 + return ys ? y : x;
271 +
272 + case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
273 +- if (xs == ys)
274 +- return x;
275 +- return ieee754dp_zero(1);
276 ++ return ieee754dp_zero(xs | ys);
277 +
278 + case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
279 + DPDNORMX;
280 +@@ -106,16 +116,32 @@ union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y)
281 + else if (xs < ys)
282 + return y;
283 +
284 +- /* Compare exponent */
285 +- if (xe > ye)
286 +- return y;
287 +- else if (xe < ye)
288 +- return x;
289 ++ /* Signs of inputs are the same, let's compare exponents */
290 ++ if (xs == 0) {
291 ++ /* Inputs are both positive */
292 ++ if (xe > ye)
293 ++ return y;
294 ++ else if (xe < ye)
295 ++ return x;
296 ++ } else {
297 ++ /* Inputs are both negative */
298 ++ if (xe > ye)
299 ++ return x;
300 ++ else if (xe < ye)
301 ++ return y;
302 ++ }
303 +
304 +- /* Compare mantissa */
305 ++ /* Signs and exponents of inputs are equal, let's compare mantissas */
306 ++ if (xs == 0) {
307 ++ /* Inputs are both positive, with equal signs and exponents */
308 ++ if (xm <= ym)
309 ++ return x;
310 ++ return y;
311 ++ }
312 ++ /* Inputs are both negative, with equal signs and exponents */
313 + if (xm <= ym)
314 +- return x;
315 +- return y;
316 ++ return y;
317 ++ return x;
318 + }
319 +
320 + union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
321 +@@ -147,14 +173,26 @@ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
322 + case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
323 + return ieee754dp_nanxcpt(x);
324 +
325 +- /* numbers are preferred to NaNs */
326 ++ /*
327 ++ * Quiet NaN handling
328 ++ */
329 ++
330 ++ /*
331 ++ * The case of both inputs quiet NaNs
332 ++ */
333 ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
334 ++ return x;
335 ++
336 ++ /*
337 ++ * The cases of exactly one input quiet NaN (numbers
338 ++ * are here preferred as returned values to NaNs)
339 ++ */
340 + case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
341 + case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
342 + case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
343 + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
344 + return x;
345 +
346 +- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
347 + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
348 + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
349 + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
350 +@@ -164,25 +202,25 @@ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
351 + /*
352 + * Infinity and zero handling
353 + */
354 ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
355 ++ return ieee754dp_inf(xs | ys);
356 ++
357 + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
358 + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
359 + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
360 + case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
361 + case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
362 +- return x;
363 ++ return y;
364 +
365 +- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
366 + case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
367 + case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
368 + case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
369 + case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
370 + case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
371 +- return y;
372 ++ return x;
373 +
374 + case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
375 +- if (xs == ys)
376 +- return x;
377 +- return ieee754dp_zero(1);
378 ++ return ieee754dp_zero(xs | ys);
379 +
380 + case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
381 + DPDNORMX;
382 +@@ -207,7 +245,11 @@ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
383 + return x;
384 +
385 + /* Compare mantissa */
386 +- if (xm <= ym)
387 ++ if (xm < ym)
388 ++ return x;
389 ++ else if (xm > ym)
390 ++ return y;
391 ++ else if (xs == 1)
392 + return x;
393 + return y;
394 + }
395 +diff --git a/arch/mips/math-emu/sp_fmax.c b/arch/mips/math-emu/sp_fmax.c
396 +index 4d000844e48e..74a5a00d2f22 100644
397 +--- a/arch/mips/math-emu/sp_fmax.c
398 ++++ b/arch/mips/math-emu/sp_fmax.c
399 +@@ -47,14 +47,26 @@ union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y)
400 + case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
401 + return ieee754sp_nanxcpt(x);
402 +
403 +- /* numbers are preferred to NaNs */
404 ++ /*
405 ++ * Quiet NaN handling
406 ++ */
407 ++
408 ++ /*
409 ++ * The case of both inputs quiet NaNs
410 ++ */
411 ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
412 ++ return x;
413 ++
414 ++ /*
415 ++ * The cases of exactly one input quiet NaN (numbers
416 ++ * are here preferred as returned values to NaNs)
417 ++ */
418 + case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
419 + case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
420 + case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
421 + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
422 + return x;
423 +
424 +- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
425 + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
426 + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
427 + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
428 +@@ -80,9 +92,7 @@ union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y)
429 + return ys ? x : y;
430 +
431 + case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
432 +- if (xs == ys)
433 +- return x;
434 +- return ieee754sp_zero(1);
435 ++ return ieee754sp_zero(xs & ys);
436 +
437 + case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
438 + SPDNORMX;
439 +@@ -106,16 +116,32 @@ union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y)
440 + else if (xs < ys)
441 + return x;
442 +
443 +- /* Compare exponent */
444 +- if (xe > ye)
445 +- return x;
446 +- else if (xe < ye)
447 +- return y;
448 ++ /* Signs of inputs are equal, let's compare exponents */
449 ++ if (xs == 0) {
450 ++ /* Inputs are both positive */
451 ++ if (xe > ye)
452 ++ return x;
453 ++ else if (xe < ye)
454 ++ return y;
455 ++ } else {
456 ++ /* Inputs are both negative */
457 ++ if (xe > ye)
458 ++ return y;
459 ++ else if (xe < ye)
460 ++ return x;
461 ++ }
462 +
463 +- /* Compare mantissa */
464 ++ /* Signs and exponents of inputs are equal, let's compare mantissas */
465 ++ if (xs == 0) {
466 ++ /* Inputs are both positive, with equal signs and exponents */
467 ++ if (xm <= ym)
468 ++ return y;
469 ++ return x;
470 ++ }
471 ++ /* Inputs are both negative, with equal signs and exponents */
472 + if (xm <= ym)
473 +- return y;
474 +- return x;
475 ++ return x;
476 ++ return y;
477 + }
478 +
479 + union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
480 +@@ -147,14 +173,26 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
481 + case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
482 + return ieee754sp_nanxcpt(x);
483 +
484 +- /* numbers are preferred to NaNs */
485 ++ /*
486 ++ * Quiet NaN handling
487 ++ */
488 ++
489 ++ /*
490 ++ * The case of both inputs quiet NaNs
491 ++ */
492 ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
493 ++ return x;
494 ++
495 ++ /*
496 ++ * The cases of exactly one input quiet NaN (numbers
497 ++ * are here preferred as returned values to NaNs)
498 ++ */
499 + case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
500 + case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
501 + case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
502 + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
503 + return x;
504 +
505 +- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
506 + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
507 + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
508 + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
509 +@@ -164,6 +202,9 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
510 + /*
511 + * Infinity and zero handling
512 + */
513 ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
514 ++ return ieee754sp_inf(xs & ys);
515 ++
516 + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
517 + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
518 + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
519 +@@ -171,7 +212,6 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
520 + case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
521 + return x;
522 +
523 +- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
524 + case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
525 + case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
526 + case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
527 +@@ -180,9 +220,7 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
528 + return y;
529 +
530 + case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
531 +- if (xs == ys)
532 +- return x;
533 +- return ieee754sp_zero(1);
534 ++ return ieee754sp_zero(xs & ys);
535 +
536 + case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
537 + SPDNORMX;
538 +@@ -207,7 +245,11 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
539 + return y;
540 +
541 + /* Compare mantissa */
542 +- if (xm <= ym)
543 ++ if (xm < ym)
544 + return y;
545 +- return x;
546 ++ else if (xm > ym)
547 ++ return x;
548 ++ else if (xs == 0)
549 ++ return x;
550 ++ return y;
551 + }
552 +diff --git a/arch/mips/math-emu/sp_fmin.c b/arch/mips/math-emu/sp_fmin.c
553 +index 4eb1bb9e9dec..c51385f46b09 100644
554 +--- a/arch/mips/math-emu/sp_fmin.c
555 ++++ b/arch/mips/math-emu/sp_fmin.c
556 +@@ -47,14 +47,26 @@ union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y)
557 + case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
558 + return ieee754sp_nanxcpt(x);
559 +
560 +- /* numbers are preferred to NaNs */
561 ++ /*
562 ++ * Quiet NaN handling
563 ++ */
564 ++
565 ++ /*
566 ++ * The case of both inputs quiet NaNs
567 ++ */
568 ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
569 ++ return x;
570 ++
571 ++ /*
572 ++ * The cases of exactly one input quiet NaN (numbers
573 ++ * are here preferred as returned values to NaNs)
574 ++ */
575 + case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
576 + case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
577 + case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
578 + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
579 + return x;
580 +
581 +- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
582 + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
583 + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
584 + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
585 +@@ -80,9 +92,7 @@ union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y)
586 + return ys ? y : x;
587 +
588 + case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
589 +- if (xs == ys)
590 +- return x;
591 +- return ieee754sp_zero(1);
592 ++ return ieee754sp_zero(xs | ys);
593 +
594 + case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
595 + SPDNORMX;
596 +@@ -106,16 +116,32 @@ union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y)
597 + else if (xs < ys)
598 + return y;
599 +
600 +- /* Compare exponent */
601 +- if (xe > ye)
602 +- return y;
603 +- else if (xe < ye)
604 +- return x;
605 ++ /* Signs of inputs are the same, let's compare exponents */
606 ++ if (xs == 0) {
607 ++ /* Inputs are both positive */
608 ++ if (xe > ye)
609 ++ return y;
610 ++ else if (xe < ye)
611 ++ return x;
612 ++ } else {
613 ++ /* Inputs are both negative */
614 ++ if (xe > ye)
615 ++ return x;
616 ++ else if (xe < ye)
617 ++ return y;
618 ++ }
619 +
620 +- /* Compare mantissa */
621 ++ /* Signs and exponents of inputs are equal, let's compare mantissas */
622 ++ if (xs == 0) {
623 ++ /* Inputs are both positive, with equal signs and exponents */
624 ++ if (xm <= ym)
625 ++ return x;
626 ++ return y;
627 ++ }
628 ++ /* Inputs are both negative, with equal signs and exponents */
629 + if (xm <= ym)
630 +- return x;
631 +- return y;
632 ++ return y;
633 ++ return x;
634 + }
635 +
636 + union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
637 +@@ -147,14 +173,26 @@ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
638 + case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
639 + return ieee754sp_nanxcpt(x);
640 +
641 +- /* numbers are preferred to NaNs */
642 ++ /*
643 ++ * Quiet NaN handling
644 ++ */
645 ++
646 ++ /*
647 ++ * The case of both inputs quiet NaNs
648 ++ */
649 ++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
650 ++ return x;
651 ++
652 ++ /*
653 ++ * The cases of exactly one input quiet NaN (numbers
654 ++ * are here preferred as returned values to NaNs)
655 ++ */
656 + case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
657 + case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
658 + case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
659 + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
660 + return x;
661 +
662 +- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
663 + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
664 + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
665 + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
666 +@@ -164,25 +202,25 @@ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
667 + /*
668 + * Infinity and zero handling
669 + */
670 ++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
671 ++ return ieee754sp_inf(xs | ys);
672 ++
673 + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
674 + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
675 + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
676 + case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
677 + case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
678 +- return x;
679 ++ return y;
680 +
681 +- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
682 + case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
683 + case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
684 + case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
685 + case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
686 + case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
687 +- return y;
688 ++ return x;
689 +
690 + case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
691 +- if (xs == ys)
692 +- return x;
693 +- return ieee754sp_zero(1);
694 ++ return ieee754sp_zero(xs | ys);
695 +
696 + case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
697 + SPDNORMX;
698 +@@ -207,7 +245,11 @@ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
699 + return x;
700 +
701 + /* Compare mantissa */
702 +- if (xm <= ym)
703 ++ if (xm < ym)
704 ++ return x;
705 ++ else if (xm > ym)
706 ++ return y;
707 ++ else if (xs == 1)
708 + return x;
709 + return y;
710 + }
711 +diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
712 +index 91e5c1758b5c..64e016abb2a5 100644
713 +--- a/arch/powerpc/kernel/align.c
714 ++++ b/arch/powerpc/kernel/align.c
715 +@@ -236,6 +236,28 @@ static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr)
716 +
717 + #define SWIZ_PTR(p) ((unsigned char __user *)((p) ^ swiz))
718 +
719 ++#define __get_user_or_set_dar(_regs, _dest, _addr) \
720 ++ ({ \
721 ++ int rc = 0; \
722 ++ typeof(_addr) __addr = (_addr); \
723 ++ if (__get_user_inatomic(_dest, __addr)) { \
724 ++ _regs->dar = (unsigned long)__addr; \
725 ++ rc = -EFAULT; \
726 ++ } \
727 ++ rc; \
728 ++ })
729 ++
730 ++#define __put_user_or_set_dar(_regs, _src, _addr) \
731 ++ ({ \
732 ++ int rc = 0; \
733 ++ typeof(_addr) __addr = (_addr); \
734 ++ if (__put_user_inatomic(_src, __addr)) { \
735 ++ _regs->dar = (unsigned long)__addr; \
736 ++ rc = -EFAULT; \
737 ++ } \
738 ++ rc; \
739 ++ })
740 ++
741 + static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
742 + unsigned int reg, unsigned int nb,
743 + unsigned int flags, unsigned int instr,
744 +@@ -264,9 +286,10 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
745 + } else {
746 + unsigned long pc = regs->nip ^ (swiz & 4);
747 +
748 +- if (__get_user_inatomic(instr,
749 +- (unsigned int __user *)pc))
750 ++ if (__get_user_or_set_dar(regs, instr,
751 ++ (unsigned int __user *)pc))
752 + return -EFAULT;
753 ++
754 + if (swiz == 0 && (flags & SW))
755 + instr = cpu_to_le32(instr);
756 + nb = (instr >> 11) & 0x1f;
757 +@@ -310,31 +333,31 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
758 + ((nb0 + 3) / 4) * sizeof(unsigned long));
759 +
760 + for (i = 0; i < nb; ++i, ++p)
761 +- if (__get_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
762 +- SWIZ_PTR(p)))
763 ++ if (__get_user_or_set_dar(regs, REG_BYTE(rptr, i ^ bswiz),
764 ++ SWIZ_PTR(p)))
765 + return -EFAULT;
766 + if (nb0 > 0) {
767 + rptr = &regs->gpr[0];
768 + addr += nb;
769 + for (i = 0; i < nb0; ++i, ++p)
770 +- if (__get_user_inatomic(REG_BYTE(rptr,
771 +- i ^ bswiz),
772 +- SWIZ_PTR(p)))
773 ++ if (__get_user_or_set_dar(regs,
774 ++ REG_BYTE(rptr, i ^ bswiz),
775 ++ SWIZ_PTR(p)))
776 + return -EFAULT;
777 + }
778 +
779 + } else {
780 + for (i = 0; i < nb; ++i, ++p)
781 +- if (__put_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
782 +- SWIZ_PTR(p)))
783 ++ if (__put_user_or_set_dar(regs, REG_BYTE(rptr, i ^ bswiz),
784 ++ SWIZ_PTR(p)))
785 + return -EFAULT;
786 + if (nb0 > 0) {
787 + rptr = &regs->gpr[0];
788 + addr += nb;
789 + for (i = 0; i < nb0; ++i, ++p)
790 +- if (__put_user_inatomic(REG_BYTE(rptr,
791 +- i ^ bswiz),
792 +- SWIZ_PTR(p)))
793 ++ if (__put_user_or_set_dar(regs,
794 ++ REG_BYTE(rptr, i ^ bswiz),
795 ++ SWIZ_PTR(p)))
796 + return -EFAULT;
797 + }
798 + }
799 +@@ -346,29 +369,32 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
800 + * Only POWER6 has these instructions, and it does true little-endian,
801 + * so we don't need the address swizzling.
802 + */
803 +-static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg,
804 +- unsigned int flags)
805 ++static int emulate_fp_pair(struct pt_regs *regs, unsigned char __user *addr,
806 ++ unsigned int reg, unsigned int flags)
807 + {
808 + char *ptr0 = (char *) &current->thread.TS_FPR(reg);
809 + char *ptr1 = (char *) &current->thread.TS_FPR(reg+1);
810 +- int i, ret, sw = 0;
811 ++ int i, sw = 0;
812 +
813 + if (reg & 1)
814 + return 0; /* invalid form: FRS/FRT must be even */
815 + if (flags & SW)
816 + sw = 7;
817 +- ret = 0;
818 ++
819 + for (i = 0; i < 8; ++i) {
820 + if (!(flags & ST)) {
821 +- ret |= __get_user(ptr0[i^sw], addr + i);
822 +- ret |= __get_user(ptr1[i^sw], addr + i + 8);
823 ++ if (__get_user_or_set_dar(regs, ptr0[i^sw], addr + i))
824 ++ return -EFAULT;
825 ++ if (__get_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
826 ++ return -EFAULT;
827 + } else {
828 +- ret |= __put_user(ptr0[i^sw], addr + i);
829 +- ret |= __put_user(ptr1[i^sw], addr + i + 8);
830 ++ if (__put_user_or_set_dar(regs, ptr0[i^sw], addr + i))
831 ++ return -EFAULT;
832 ++ if (__put_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
833 ++ return -EFAULT;
834 + }
835 + }
836 +- if (ret)
837 +- return -EFAULT;
838 ++
839 + return 1; /* exception handled and fixed up */
840 + }
841 +
842 +@@ -378,24 +404,27 @@ static int emulate_lq_stq(struct pt_regs *regs, unsigned char __user *addr,
843 + {
844 + char *ptr0 = (char *)&regs->gpr[reg];
845 + char *ptr1 = (char *)&regs->gpr[reg+1];
846 +- int i, ret, sw = 0;
847 ++ int i, sw = 0;
848 +
849 + if (reg & 1)
850 + return 0; /* invalid form: GPR must be even */
851 + if (flags & SW)
852 + sw = 7;
853 +- ret = 0;
854 ++
855 + for (i = 0; i < 8; ++i) {
856 + if (!(flags & ST)) {
857 +- ret |= __get_user(ptr0[i^sw], addr + i);
858 +- ret |= __get_user(ptr1[i^sw], addr + i + 8);
859 ++ if (__get_user_or_set_dar(regs, ptr0[i^sw], addr + i))
860 ++ return -EFAULT;
861 ++ if (__get_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
862 ++ return -EFAULT;
863 + } else {
864 +- ret |= __put_user(ptr0[i^sw], addr + i);
865 +- ret |= __put_user(ptr1[i^sw], addr + i + 8);
866 ++ if (__put_user_or_set_dar(regs, ptr0[i^sw], addr + i))
867 ++ return -EFAULT;
868 ++ if (__put_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
869 ++ return -EFAULT;
870 + }
871 + }
872 +- if (ret)
873 +- return -EFAULT;
874 ++
875 + return 1; /* exception handled and fixed up */
876 + }
877 + #endif /* CONFIG_PPC64 */
878 +@@ -688,9 +717,14 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
879 + for (j = 0; j < length; j += elsize) {
880 + for (i = 0; i < elsize; ++i) {
881 + if (flags & ST)
882 +- ret |= __put_user(ptr[i^sw], addr + i);
883 ++ ret = __put_user_or_set_dar(regs, ptr[i^sw],
884 ++ addr + i);
885 + else
886 +- ret |= __get_user(ptr[i^sw], addr + i);
887 ++ ret = __get_user_or_set_dar(regs, ptr[i^sw],
888 ++ addr + i);
889 ++
890 ++ if (ret)
891 ++ return ret;
892 + }
893 + ptr += elsize;
894 + #ifdef __LITTLE_ENDIAN__
895 +@@ -740,7 +774,7 @@ int fix_alignment(struct pt_regs *regs)
896 + unsigned int dsisr;
897 + unsigned char __user *addr;
898 + unsigned long p, swiz;
899 +- int ret, i;
900 ++ int i;
901 + union data {
902 + u64 ll;
903 + double dd;
904 +@@ -923,7 +957,7 @@ int fix_alignment(struct pt_regs *regs)
905 + if (flags & F) {
906 + /* Special case for 16-byte FP loads and stores */
907 + PPC_WARN_ALIGNMENT(fp_pair, regs);
908 +- return emulate_fp_pair(addr, reg, flags);
909 ++ return emulate_fp_pair(regs, addr, reg, flags);
910 + } else {
911 + #ifdef CONFIG_PPC64
912 + /* Special case for 16-byte loads and stores */
913 +@@ -953,15 +987,12 @@ int fix_alignment(struct pt_regs *regs)
914 + }
915 +
916 + data.ll = 0;
917 +- ret = 0;
918 + p = (unsigned long)addr;
919 +
920 + for (i = 0; i < nb; i++)
921 +- ret |= __get_user_inatomic(data.v[start + i],
922 +- SWIZ_PTR(p++));
923 +-
924 +- if (unlikely(ret))
925 +- return -EFAULT;
926 ++ if (__get_user_or_set_dar(regs, data.v[start + i],
927 ++ SWIZ_PTR(p++)))
928 ++ return -EFAULT;
929 +
930 + } else if (flags & F) {
931 + data.ll = current->thread.TS_FPR(reg);
932 +@@ -1031,15 +1062,13 @@ int fix_alignment(struct pt_regs *regs)
933 + break;
934 + }
935 +
936 +- ret = 0;
937 + p = (unsigned long)addr;
938 +
939 + for (i = 0; i < nb; i++)
940 +- ret |= __put_user_inatomic(data.v[start + i],
941 +- SWIZ_PTR(p++));
942 ++ if (__put_user_or_set_dar(regs, data.v[start + i],
943 ++ SWIZ_PTR(p++)))
944 ++ return -EFAULT;
945 +
946 +- if (unlikely(ret))
947 +- return -EFAULT;
948 + } else if (flags & F)
949 + current->thread.TS_FPR(reg) = data.ll;
950 + else
951 +diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
952 +index bcd3d6199464..bb16a58cf7e4 100644
953 +--- a/arch/x86/include/asm/elf.h
954 ++++ b/arch/x86/include/asm/elf.h
955 +@@ -204,6 +204,7 @@ void set_personality_ia32(bool);
956 +
957 + #define ELF_CORE_COPY_REGS(pr_reg, regs) \
958 + do { \
959 ++ unsigned long base; \
960 + unsigned v; \
961 + (pr_reg)[0] = (regs)->r15; \
962 + (pr_reg)[1] = (regs)->r14; \
963 +@@ -226,8 +227,8 @@ do { \
964 + (pr_reg)[18] = (regs)->flags; \
965 + (pr_reg)[19] = (regs)->sp; \
966 + (pr_reg)[20] = (regs)->ss; \
967 +- (pr_reg)[21] = current->thread.fs; \
968 +- (pr_reg)[22] = current->thread.gs; \
969 ++ rdmsrl(MSR_FS_BASE, base); (pr_reg)[21] = base; \
970 ++ rdmsrl(MSR_KERNEL_GS_BASE, base); (pr_reg)[22] = base; \
971 + asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v; \
972 + asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v; \
973 + asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v; \
974 +diff --git a/block/blk-core.c b/block/blk-core.c
975 +index ef083e7a37c5..119658534dfd 100644
976 +--- a/block/blk-core.c
977 ++++ b/block/blk-core.c
978 +@@ -233,7 +233,7 @@ EXPORT_SYMBOL(blk_start_queue_async);
979 + **/
980 + void blk_start_queue(struct request_queue *q)
981 + {
982 +- WARN_ON(!irqs_disabled());
983 ++ WARN_ON(!in_interrupt() && !irqs_disabled());
984 +
985 + queue_flag_clear(QUEUE_FLAG_STOPPED, q);
986 + __blk_run_queue(q);
987 +diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
988 +index b3b0004ea8ac..d12782dc9683 100644
989 +--- a/crypto/algif_skcipher.c
990 ++++ b/crypto/algif_skcipher.c
991 +@@ -143,8 +143,10 @@ static int skcipher_alloc_sgl(struct sock *sk)
992 + sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
993 + sgl->cur = 0;
994 +
995 +- if (sg)
996 ++ if (sg) {
997 + sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
998 ++ sg_unmark_end(sg + (MAX_SGL_ENTS - 1));
999 ++ }
1000 +
1001 + list_add_tail(&sgl->list, &ctx->tsgl);
1002 + }
1003 +diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
1004 +index 586f9168ffa4..47d1e834f3f4 100644
1005 +--- a/drivers/block/skd_main.c
1006 ++++ b/drivers/block/skd_main.c
1007 +@@ -2214,6 +2214,9 @@ static void skd_send_fitmsg(struct skd_device *skdev,
1008 + */
1009 + qcmd |= FIT_QCMD_MSGSIZE_64;
1010 +
1011 ++ /* Make sure skd_msg_buf is written before the doorbell is triggered. */
1012 ++ smp_wmb();
1013 ++
1014 + SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
1015 +
1016 + }
1017 +@@ -2260,6 +2263,9 @@ static void skd_send_special_fitmsg(struct skd_device *skdev,
1018 + qcmd = skspcl->mb_dma_address;
1019 + qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
1020 +
1021 ++ /* Make sure skd_msg_buf is written before the doorbell is triggered. */
1022 ++ smp_wmb();
1023 ++
1024 + SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
1025 + }
1026 +
1027 +@@ -4679,15 +4685,16 @@ static void skd_free_disk(struct skd_device *skdev)
1028 + {
1029 + struct gendisk *disk = skdev->disk;
1030 +
1031 +- if (disk != NULL) {
1032 +- struct request_queue *q = disk->queue;
1033 ++ if (disk && (disk->flags & GENHD_FL_UP))
1034 ++ del_gendisk(disk);
1035 +
1036 +- if (disk->flags & GENHD_FL_UP)
1037 +- del_gendisk(disk);
1038 +- if (q)
1039 +- blk_cleanup_queue(q);
1040 +- put_disk(disk);
1041 ++ if (skdev->queue) {
1042 ++ blk_cleanup_queue(skdev->queue);
1043 ++ skdev->queue = NULL;
1044 ++ disk->queue = NULL;
1045 + }
1046 ++
1047 ++ put_disk(disk);
1048 + skdev->disk = NULL;
1049 + }
1050 +
1051 +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
1052 +index 5be14ad29d46..dbf09836ff30 100644
1053 +--- a/drivers/input/serio/i8042-x86ia64io.h
1054 ++++ b/drivers/input/serio/i8042-x86ia64io.h
1055 +@@ -904,6 +904,13 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
1056 + DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
1057 + },
1058 + },
1059 ++ {
1060 ++ /* Gigabyte P57 - Elantech touchpad */
1061 ++ .matches = {
1062 ++ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
1063 ++ DMI_MATCH(DMI_PRODUCT_NAME, "P57"),
1064 ++ },
1065 ++ },
1066 + {
1067 + /* Schenker XMG C504 - Elantech touchpad */
1068 + .matches = {
1069 +diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
1070 +index c3ea03c9a1a8..02619cabda8b 100644
1071 +--- a/drivers/md/bcache/bcache.h
1072 ++++ b/drivers/md/bcache/bcache.h
1073 +@@ -333,6 +333,7 @@ struct cached_dev {
1074 + /* Limit number of writeback bios in flight */
1075 + struct semaphore in_flight;
1076 + struct task_struct *writeback_thread;
1077 ++ struct workqueue_struct *writeback_write_wq;
1078 +
1079 + struct keybuf writeback_keys;
1080 +
1081 +diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
1082 +index 2410df1c2a05..6c4c7caea693 100644
1083 +--- a/drivers/md/bcache/request.c
1084 ++++ b/drivers/md/bcache/request.c
1085 +@@ -196,12 +196,12 @@ static void bch_data_insert_start(struct closure *cl)
1086 + struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
1087 + struct bio *bio = op->bio, *n;
1088 +
1089 +- if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
1090 +- wake_up_gc(op->c);
1091 +-
1092 + if (op->bypass)
1093 + return bch_data_invalidate(cl);
1094 +
1095 ++ if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
1096 ++ wake_up_gc(op->c);
1097 ++
1098 + /*
1099 + * Journal writes are marked REQ_FLUSH; if the original write was a
1100 + * flush, it'll wait on the journal write.
1101 +diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
1102 +index 7b5880b8874c..c5ceea9222ff 100644
1103 +--- a/drivers/md/bcache/super.c
1104 ++++ b/drivers/md/bcache/super.c
1105 +@@ -1023,7 +1023,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
1106 + }
1107 +
1108 + if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
1109 +- bch_sectors_dirty_init(dc);
1110 ++ bch_sectors_dirty_init(&dc->disk);
1111 + atomic_set(&dc->has_dirty, 1);
1112 + atomic_inc(&dc->count);
1113 + bch_writeback_queue(dc);
1114 +@@ -1056,6 +1056,8 @@ static void cached_dev_free(struct closure *cl)
1115 + cancel_delayed_work_sync(&dc->writeback_rate_update);
1116 + if (!IS_ERR_OR_NULL(dc->writeback_thread))
1117 + kthread_stop(dc->writeback_thread);
1118 ++ if (dc->writeback_write_wq)
1119 ++ destroy_workqueue(dc->writeback_write_wq);
1120 +
1121 + mutex_lock(&bch_register_lock);
1122 +
1123 +@@ -1227,6 +1229,7 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
1124 + goto err;
1125 +
1126 + bcache_device_attach(d, c, u - c->uuids);
1127 ++ bch_sectors_dirty_init(d);
1128 + bch_flash_dev_request_init(d);
1129 + add_disk(d->disk);
1130 +
1131 +@@ -1959,6 +1962,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
1132 + else
1133 + err = "device busy";
1134 + mutex_unlock(&bch_register_lock);
1135 ++ if (!IS_ERR(bdev))
1136 ++ bdput(bdev);
1137 + if (attr == &ksysfs_register_quiet)
1138 + goto out;
1139 + }
1140 +diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
1141 +index b3ff57d61dde..4fbb5532f24c 100644
1142 +--- a/drivers/md/bcache/sysfs.c
1143 ++++ b/drivers/md/bcache/sysfs.c
1144 +@@ -191,7 +191,7 @@ STORE(__cached_dev)
1145 + {
1146 + struct cached_dev *dc = container_of(kobj, struct cached_dev,
1147 + disk.kobj);
1148 +- unsigned v = size;
1149 ++ ssize_t v = size;
1150 + struct cache_set *c;
1151 + struct kobj_uevent_env *env;
1152 +
1153 +@@ -226,7 +226,7 @@ STORE(__cached_dev)
1154 + bch_cached_dev_run(dc);
1155 +
1156 + if (attr == &sysfs_cache_mode) {
1157 +- ssize_t v = bch_read_string_list(buf, bch_cache_modes + 1);
1158 ++ v = bch_read_string_list(buf, bch_cache_modes + 1);
1159 +
1160 + if (v < 0)
1161 + return v;
1162 +diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
1163 +index db3ae4c2b223..6c18e3ec3e48 100644
1164 +--- a/drivers/md/bcache/util.c
1165 ++++ b/drivers/md/bcache/util.c
1166 +@@ -73,24 +73,44 @@ STRTO_H(strtouint, unsigned int)
1167 + STRTO_H(strtoll, long long)
1168 + STRTO_H(strtoull, unsigned long long)
1169 +
1170 ++/**
1171 ++ * bch_hprint() - formats @v to human readable string for sysfs.
1172 ++ *
1173 ++ * @v - signed 64 bit integer
1174 ++ * @buf - the (at least 8 byte) buffer to format the result into.
1175 ++ *
1176 ++ * Returns the number of bytes used by format.
1177 ++ */
1178 + ssize_t bch_hprint(char *buf, int64_t v)
1179 + {
1180 + static const char units[] = "?kMGTPEZY";
1181 +- char dec[4] = "";
1182 +- int u, t = 0;
1183 +-
1184 +- for (u = 0; v >= 1024 || v <= -1024; u++) {
1185 +- t = v & ~(~0 << 10);
1186 +- v >>= 10;
1187 +- }
1188 +-
1189 +- if (!u)
1190 +- return sprintf(buf, "%llu", v);
1191 +-
1192 +- if (v < 100 && v > -100)
1193 +- snprintf(dec, sizeof(dec), ".%i", t / 100);
1194 +-
1195 +- return sprintf(buf, "%lli%s%c", v, dec, units[u]);
1196 ++ int u = 0, t;
1197 ++
1198 ++ uint64_t q;
1199 ++
1200 ++ if (v < 0)
1201 ++ q = -v;
1202 ++ else
1203 ++ q = v;
1204 ++
1205 ++ /* For as long as the number is more than 3 digits, but at least
1206 ++ * once, shift right / divide by 1024. Keep the remainder for
1207 ++ * a digit after the decimal point.
1208 ++ */
1209 ++ do {
1210 ++ u++;
1211 ++
1212 ++ t = q & ~(~0 << 10);
1213 ++ q >>= 10;
1214 ++ } while (q >= 1000);
1215 ++
1216 ++ if (v < 0)
1217 ++ /* '-', up to 3 digits, '.', 1 digit, 1 character, null;
1218 ++ * yields 8 bytes.
1219 ++ */
1220 ++ return sprintf(buf, "-%llu.%i%c", q, t * 10 / 1024, units[u]);
1221 ++ else
1222 ++ return sprintf(buf, "%llu.%i%c", q, t * 10 / 1024, units[u]);
1223 + }
1224 +
1225 + ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[],
1226 +diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
1227 +index b9346cd9cda1..bbb1dc9e1639 100644
1228 +--- a/drivers/md/bcache/writeback.c
1229 ++++ b/drivers/md/bcache/writeback.c
1230 +@@ -21,7 +21,8 @@
1231 + static void __update_writeback_rate(struct cached_dev *dc)
1232 + {
1233 + struct cache_set *c = dc->disk.c;
1234 +- uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size;
1235 ++ uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
1236 ++ bcache_flash_devs_sectors_dirty(c);
1237 + uint64_t cache_dirty_target =
1238 + div_u64(cache_sectors * dc->writeback_percent, 100);
1239 +
1240 +@@ -190,7 +191,7 @@ static void write_dirty(struct closure *cl)
1241 +
1242 + closure_bio_submit(&io->bio, cl);
1243 +
1244 +- continue_at(cl, write_dirty_finish, system_wq);
1245 ++ continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
1246 + }
1247 +
1248 + static void read_dirty_endio(struct bio *bio)
1249 +@@ -210,7 +211,7 @@ static void read_dirty_submit(struct closure *cl)
1250 +
1251 + closure_bio_submit(&io->bio, cl);
1252 +
1253 +- continue_at(cl, write_dirty, system_wq);
1254 ++ continue_at(cl, write_dirty, io->dc->writeback_write_wq);
1255 + }
1256 +
1257 + static void read_dirty(struct cached_dev *dc)
1258 +@@ -488,17 +489,17 @@ static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
1259 + return MAP_CONTINUE;
1260 + }
1261 +
1262 +-void bch_sectors_dirty_init(struct cached_dev *dc)
1263 ++void bch_sectors_dirty_init(struct bcache_device *d)
1264 + {
1265 + struct sectors_dirty_init op;
1266 +
1267 + bch_btree_op_init(&op.op, -1);
1268 +- op.inode = dc->disk.id;
1269 ++ op.inode = d->id;
1270 +
1271 +- bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
1272 ++ bch_btree_map_keys(&op.op, d->c, &KEY(op.inode, 0, 0),
1273 + sectors_dirty_init_fn, 0);
1274 +
1275 +- dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk);
1276 ++ d->sectors_dirty_last = bcache_dev_sectors_dirty(d);
1277 + }
1278 +
1279 + void bch_cached_dev_writeback_init(struct cached_dev *dc)
1280 +@@ -522,6 +523,11 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
1281 +
1282 + int bch_cached_dev_writeback_start(struct cached_dev *dc)
1283 + {
1284 ++ dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq",
1285 ++ WQ_MEM_RECLAIM, 0);
1286 ++ if (!dc->writeback_write_wq)
1287 ++ return -ENOMEM;
1288 ++
1289 + dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
1290 + "bcache_writeback");
1291 + if (IS_ERR(dc->writeback_thread))
1292 +diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
1293 +index 073a042aed24..daec4fd782ea 100644
1294 +--- a/drivers/md/bcache/writeback.h
1295 ++++ b/drivers/md/bcache/writeback.h
1296 +@@ -14,6 +14,25 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
1297 + return ret;
1298 + }
1299 +
1300 ++static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c)
1301 ++{
1302 ++ uint64_t i, ret = 0;
1303 ++
1304 ++ mutex_lock(&bch_register_lock);
1305 ++
1306 ++ for (i = 0; i < c->nr_uuids; i++) {
1307 ++ struct bcache_device *d = c->devices[i];
1308 ++
1309 ++ if (!d || !UUID_FLASH_ONLY(&c->uuids[i]))
1310 ++ continue;
1311 ++ ret += bcache_dev_sectors_dirty(d);
1312 ++ }
1313 ++
1314 ++ mutex_unlock(&bch_register_lock);
1315 ++
1316 ++ return ret;
1317 ++}
1318 ++
1319 + static inline unsigned offset_to_stripe(struct bcache_device *d,
1320 + uint64_t offset)
1321 + {
1322 +@@ -85,7 +104,7 @@ static inline void bch_writeback_add(struct cached_dev *dc)
1323 +
1324 + void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
1325 +
1326 +-void bch_sectors_dirty_init(struct cached_dev *dc);
1327 ++void bch_sectors_dirty_init(struct bcache_device *);
1328 + void bch_cached_dev_writeback_init(struct cached_dev *);
1329 + int bch_cached_dev_writeback_start(struct cached_dev *);
1330 +
1331 +diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
1332 +index 4f22e919787a..7a50728b9389 100644
1333 +--- a/drivers/md/bitmap.c
1334 ++++ b/drivers/md/bitmap.c
1335 +@@ -1960,6 +1960,11 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
1336 + long pages;
1337 + struct bitmap_page *new_bp;
1338 +
1339 ++ if (bitmap->storage.file && !init) {
1340 ++ pr_info("md: cannot resize file-based bitmap\n");
1341 ++ return -EINVAL;
1342 ++ }
1343 ++
1344 + if (chunksize == 0) {
1345 + /* If there is enough space, leave the chunk size unchanged,
1346 + * else increase by factor of two until there is enough space.
1347 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
1348 +index 8f60520c8392..5eac08ffc697 100644
1349 +--- a/drivers/md/raid5.c
1350 ++++ b/drivers/md/raid5.c
1351 +@@ -5822,6 +5822,8 @@ static void raid5_do_work(struct work_struct *work)
1352 +
1353 + spin_unlock_irq(&conf->device_lock);
1354 +
1355 ++ r5l_flush_stripe_to_raid(conf->log);
1356 ++
1357 + async_tx_issue_pending_all();
1358 + blk_finish_plug(&plug);
1359 +
1360 +diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
1361 +index 3e59b288b8a8..618e4e2b4207 100644
1362 +--- a/drivers/media/usb/uvc/uvc_ctrl.c
1363 ++++ b/drivers/media/usb/uvc/uvc_ctrl.c
1364 +@@ -2001,6 +2001,13 @@ int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
1365 + goto done;
1366 + }
1367 +
1368 ++ /* Validate the user-provided bit-size and offset */
1369 ++ if (mapping->size > 32 ||
1370 ++ mapping->offset + mapping->size > ctrl->info.size * 8) {
1371 ++ ret = -EINVAL;
1372 ++ goto done;
1373 ++ }
1374 ++
1375 + list_for_each_entry(map, &ctrl->info.mappings, list) {
1376 + if (mapping->id == map->id) {
1377 + uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s', "
1378 +diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
1379 +index 109f687d1cbd..4379b949bb93 100644
1380 +--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
1381 ++++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
1382 +@@ -773,7 +773,8 @@ static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *u
1383 + copy_to_user(&up->u, &kp->u, sizeof(kp->u)) ||
1384 + put_user(kp->pending, &up->pending) ||
1385 + put_user(kp->sequence, &up->sequence) ||
1386 +- compat_put_timespec(&kp->timestamp, &up->timestamp) ||
1387 ++ put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
1388 ++ put_user(kp->timestamp.tv_nsec, &up->timestamp.tv_nsec) ||
1389 + put_user(kp->id, &up->id) ||
1390 + copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32)))
1391 + return -EFAULT;
1392 +diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
1393 +index 4cd2a7d0124f..7923bfdc9b30 100644
1394 +--- a/drivers/net/ethernet/freescale/gianfar.c
1395 ++++ b/drivers/net/ethernet/freescale/gianfar.c
1396 +@@ -3676,7 +3676,7 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
1397 + u32 tempval1 = gfar_read(&regs->maccfg1);
1398 + u32 tempval = gfar_read(&regs->maccfg2);
1399 + u32 ecntrl = gfar_read(&regs->ecntrl);
1400 +- u32 tx_flow_oldval = (tempval & MACCFG1_TX_FLOW);
1401 ++ u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
1402 +
1403 + if (phydev->duplex != priv->oldduplex) {
1404 + if (!(phydev->duplex))
1405 +diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
1406 +index 829be21f97b2..be258d90de9e 100644
1407 +--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
1408 ++++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
1409 +@@ -724,7 +724,7 @@ static void ql_build_coredump_seg_header(
1410 + seg_hdr->cookie = MPI_COREDUMP_COOKIE;
1411 + seg_hdr->segNum = seg_number;
1412 + seg_hdr->segSize = seg_size;
1413 +- memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
1414 ++ strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
1415 + }
1416 +
1417 + /*
1418 +diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
1419 +index 49d9f0a789fe..7d0690433ee0 100644
1420 +--- a/drivers/net/phy/phy.c
1421 ++++ b/drivers/net/phy/phy.c
1422 +@@ -541,9 +541,6 @@ void phy_stop_machine(struct phy_device *phydev)
1423 + if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
1424 + phydev->state = PHY_UP;
1425 + mutex_unlock(&phydev->lock);
1426 +-
1427 +- /* Now we can run the state machine synchronously */
1428 +- phy_state_machine(&phydev->state_queue.work);
1429 + }
1430 +
1431 + /**
1432 +diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
1433 +index 7d223e9080ef..77dddee2753a 100644
1434 +--- a/drivers/pci/hotplug/shpchp_hpc.c
1435 ++++ b/drivers/pci/hotplug/shpchp_hpc.c
1436 +@@ -1062,6 +1062,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
1437 + if (rc) {
1438 + ctrl_info(ctrl, "Can't get msi for the hotplug controller\n");
1439 + ctrl_info(ctrl, "Use INTx for the hotplug controller\n");
1440 ++ } else {
1441 ++ pci_set_master(pdev);
1442 + }
1443 +
1444 + rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED,
1445 +diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
1446 +index d5bf36ec8a75..34367d172961 100644
1447 +--- a/drivers/s390/scsi/zfcp_dbf.c
1448 ++++ b/drivers/s390/scsi/zfcp_dbf.c
1449 +@@ -3,7 +3,7 @@
1450 + *
1451 + * Debug traces for zfcp.
1452 + *
1453 +- * Copyright IBM Corp. 2002, 2016
1454 ++ * Copyright IBM Corp. 2002, 2017
1455 + */
1456 +
1457 + #define KMSG_COMPONENT "zfcp"
1458 +@@ -447,6 +447,7 @@ static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
1459 + struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
1460 + struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
1461 + struct scatterlist *resp_entry = ct_els->resp;
1462 ++ struct fc_ct_hdr *resph;
1463 + struct fc_gpn_ft_resp *acc;
1464 + int max_entries, x, last = 0;
1465 +
1466 +@@ -473,6 +474,13 @@ static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
1467 + return len; /* not GPN_FT response so do not cap */
1468 +
1469 + acc = sg_virt(resp_entry);
1470 ++
1471 ++ /* cap all but accept CT responses to at least the CT header */
1472 ++ resph = (struct fc_ct_hdr *)acc;
1473 ++ if ((ct_els->status) ||
1474 ++ (resph->ct_cmd != cpu_to_be16(FC_FS_ACC)))
1475 ++ return max(FC_CT_HDR_LEN, ZFCP_DBF_SAN_MAX_PAYLOAD);
1476 ++
1477 + max_entries = (reqh->ct_mr_size * 4 / sizeof(struct fc_gpn_ft_resp))
1478 + + 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
1479 + * to account for header as 1st pseudo "entry" */;
1480 +@@ -555,8 +563,8 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
1481 + rec->scsi_retries = sc->retries;
1482 + rec->scsi_allowed = sc->allowed;
1483 + rec->scsi_id = sc->device->id;
1484 +- /* struct zfcp_dbf_scsi needs to be updated to handle 64bit LUNs */
1485 + rec->scsi_lun = (u32)sc->device->lun;
1486 ++ rec->scsi_lun_64_hi = (u32)(sc->device->lun >> 32);
1487 + rec->host_scribble = (unsigned long)sc->host_scribble;
1488 +
1489 + memcpy(rec->scsi_opcode, sc->cmnd,
1490 +@@ -564,19 +572,32 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
1491 +
1492 + if (fsf) {
1493 + rec->fsf_req_id = fsf->req_id;
1494 ++ rec->pl_len = FCP_RESP_WITH_EXT;
1495 + fcp_rsp = (struct fcp_resp_with_ext *)
1496 + &(fsf->qtcb->bottom.io.fcp_rsp);
1497 ++ /* mandatory parts of FCP_RSP IU in this SCSI record */
1498 + memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT);
1499 + if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) {
1500 + fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
1501 + rec->fcp_rsp_info = fcp_rsp_info->rsp_code;
1502 ++ rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_rsp_len);
1503 + }
1504 + if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
1505 +- rec->pl_len = min((u16)SCSI_SENSE_BUFFERSIZE,
1506 +- (u16)ZFCP_DBF_PAY_MAX_REC);
1507 +- zfcp_dbf_pl_write(dbf, sc->sense_buffer, rec->pl_len,
1508 +- "fcp_sns", fsf->req_id);
1509 ++ rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_sns_len);
1510 + }
1511 ++ /* complete FCP_RSP IU in associated PAYload record
1512 ++ * but only if there are optional parts
1513 ++ */
1514 ++ if (fcp_rsp->resp.fr_flags != 0)
1515 ++ zfcp_dbf_pl_write(
1516 ++ dbf, fcp_rsp,
1517 ++ /* at least one full PAY record
1518 ++ * but not beyond hardware response field
1519 ++ */
1520 ++ min_t(u16, max_t(u16, rec->pl_len,
1521 ++ ZFCP_DBF_PAY_MAX_REC),
1522 ++ FSF_FCP_RSP_SIZE),
1523 ++ "fcp_riu", fsf->req_id);
1524 + }
1525 +
1526 + debug_event(dbf->scsi, level, rec, sizeof(*rec));
1527 +diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
1528 +index db186d44cfaf..b60667c145fd 100644
1529 +--- a/drivers/s390/scsi/zfcp_dbf.h
1530 ++++ b/drivers/s390/scsi/zfcp_dbf.h
1531 +@@ -2,7 +2,7 @@
1532 + * zfcp device driver
1533 + * debug feature declarations
1534 + *
1535 +- * Copyright IBM Corp. 2008, 2016
1536 ++ * Copyright IBM Corp. 2008, 2017
1537 + */
1538 +
1539 + #ifndef ZFCP_DBF_H
1540 +@@ -204,7 +204,7 @@ enum zfcp_dbf_scsi_id {
1541 + * @id: unique number of recovery record type
1542 + * @tag: identifier string specifying the location of initiation
1543 + * @scsi_id: scsi device id
1544 +- * @scsi_lun: scsi device logical unit number
1545 ++ * @scsi_lun: scsi device logical unit number, low part of 64 bit, old 32 bit
1546 + * @scsi_result: scsi result
1547 + * @scsi_retries: current retry number of scsi request
1548 + * @scsi_allowed: allowed retries
1549 +@@ -214,6 +214,7 @@ enum zfcp_dbf_scsi_id {
1550 + * @host_scribble: LLD specific data attached to SCSI request
1551 + * @pl_len: length of paload stored as zfcp_dbf_pay
1552 + * @fsf_rsp: response for fsf request
1553 ++ * @scsi_lun_64_hi: scsi device logical unit number, high part of 64 bit
1554 + */
1555 + struct zfcp_dbf_scsi {
1556 + u8 id;
1557 +@@ -230,6 +231,7 @@ struct zfcp_dbf_scsi {
1558 + u64 host_scribble;
1559 + u16 pl_len;
1560 + struct fcp_resp_with_ext fcp_rsp;
1561 ++ u32 scsi_lun_64_hi;
1562 + } __packed;
1563 +
1564 + /**
1565 +@@ -323,7 +325,11 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
1566 + {
1567 + struct fsf_qtcb *qtcb = req->qtcb;
1568 +
1569 +- if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
1570 ++ if (unlikely(req->status & (ZFCP_STATUS_FSFREQ_DISMISSED |
1571 ++ ZFCP_STATUS_FSFREQ_ERROR))) {
1572 ++ zfcp_dbf_hba_fsf_resp("fs_rerr", 3, req);
1573 ++
1574 ++ } else if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
1575 + (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
1576 + zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
1577 +
1578 +@@ -401,7 +407,8 @@ void zfcp_dbf_scsi_abort(char *tag, struct scsi_cmnd *scmd,
1579 + * @flag: indicates type of reset (Target Reset, Logical Unit Reset)
1580 + */
1581 + static inline
1582 +-void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
1583 ++void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag,
1584 ++ struct zfcp_fsf_req *fsf_req)
1585 + {
1586 + char tmp_tag[ZFCP_DBF_TAG_LEN];
1587 +
1588 +@@ -411,7 +418,7 @@ void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
1589 + memcpy(tmp_tag, "lr_", 3);
1590 +
1591 + memcpy(&tmp_tag[3], tag, 4);
1592 +- _zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL);
1593 ++ _zfcp_dbf_scsi(tmp_tag, 1, scmnd, fsf_req);
1594 + }
1595 +
1596 + /**
1597 +diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
1598 +index df2b541c8287..a2275825186f 100644
1599 +--- a/drivers/s390/scsi/zfcp_fc.h
1600 ++++ b/drivers/s390/scsi/zfcp_fc.h
1601 +@@ -4,7 +4,7 @@
1602 + * Fibre Channel related definitions and inline functions for the zfcp
1603 + * device driver
1604 + *
1605 +- * Copyright IBM Corp. 2009
1606 ++ * Copyright IBM Corp. 2009, 2017
1607 + */
1608 +
1609 + #ifndef ZFCP_FC_H
1610 +@@ -279,6 +279,10 @@ void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp,
1611 + !(rsp_flags & FCP_SNS_LEN_VAL) &&
1612 + fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
1613 + set_host_byte(scsi, DID_ERROR);
1614 ++ } else if (unlikely(rsp_flags & FCP_RESID_OVER)) {
1615 ++ /* FCP_DL was not sufficient for SCSI data length */
1616 ++ if (fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
1617 ++ set_host_byte(scsi, DID_ERROR);
1618 + }
1619 + }
1620 +
1621 +diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
1622 +index 27ff38f839fc..1964391db904 100644
1623 +--- a/drivers/s390/scsi/zfcp_fsf.c
1624 ++++ b/drivers/s390/scsi/zfcp_fsf.c
1625 +@@ -928,8 +928,8 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
1626 +
1627 + switch (header->fsf_status) {
1628 + case FSF_GOOD:
1629 +- zfcp_dbf_san_res("fsscth2", req);
1630 + ct->status = 0;
1631 ++ zfcp_dbf_san_res("fsscth2", req);
1632 + break;
1633 + case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1634 + zfcp_fsf_class_not_supp(req);
1635 +@@ -1109,8 +1109,8 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1636 +
1637 + switch (header->fsf_status) {
1638 + case FSF_GOOD:
1639 +- zfcp_dbf_san_res("fsselh1", req);
1640 + send_els->status = 0;
1641 ++ zfcp_dbf_san_res("fsselh1", req);
1642 + break;
1643 + case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1644 + zfcp_fsf_class_not_supp(req);
1645 +@@ -2258,7 +2258,8 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
1646 + fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
1647 + zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
1648 +
1649 +- if (scsi_prot_sg_count(scsi_cmnd)) {
1650 ++ if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) &&
1651 ++ scsi_prot_sg_count(scsi_cmnd)) {
1652 + zfcp_qdio_set_data_div(qdio, &req->qdio_req,
1653 + scsi_prot_sg_count(scsi_cmnd));
1654 + retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
1655 +diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
1656 +index 07ffdbb5107f..9bd9b9a29dfc 100644
1657 +--- a/drivers/s390/scsi/zfcp_scsi.c
1658 ++++ b/drivers/s390/scsi/zfcp_scsi.c
1659 +@@ -3,7 +3,7 @@
1660 + *
1661 + * Interface to Linux SCSI midlayer.
1662 + *
1663 +- * Copyright IBM Corp. 2002, 2016
1664 ++ * Copyright IBM Corp. 2002, 2017
1665 + */
1666 +
1667 + #define KMSG_COMPONENT "zfcp"
1668 +@@ -273,25 +273,29 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
1669 +
1670 + zfcp_erp_wait(adapter);
1671 + ret = fc_block_scsi_eh(scpnt);
1672 +- if (ret)
1673 ++ if (ret) {
1674 ++ zfcp_dbf_scsi_devreset("fiof", scpnt, tm_flags, NULL);
1675 + return ret;
1676 ++ }
1677 +
1678 + if (!(atomic_read(&adapter->status) &
1679 + ZFCP_STATUS_COMMON_RUNNING)) {
1680 +- zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags);
1681 ++ zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags, NULL);
1682 + return SUCCESS;
1683 + }
1684 + }
1685 +- if (!fsf_req)
1686 ++ if (!fsf_req) {
1687 ++ zfcp_dbf_scsi_devreset("reqf", scpnt, tm_flags, NULL);
1688 + return FAILED;
1689 ++ }
1690 +
1691 + wait_for_completion(&fsf_req->completion);
1692 +
1693 + if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
1694 +- zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags);
1695 ++ zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags, fsf_req);
1696 + retval = FAILED;
1697 + } else {
1698 +- zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags);
1699 ++ zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags, fsf_req);
1700 + zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags);
1701 + }
1702 +
1703 +diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
1704 +index 17c440b9d086..6835bae33ec4 100644
1705 +--- a/drivers/scsi/megaraid/megaraid_sas_base.c
1706 ++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
1707 +@@ -1824,9 +1824,12 @@ static void megasas_complete_outstanding_ioctls(struct megasas_instance *instanc
1708 + if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
1709 + cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
1710 + if (cmd_mfi->sync_cmd &&
1711 +- cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)
1712 ++ (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
1713 ++ cmd_mfi->frame->hdr.cmd_status =
1714 ++ MFI_STAT_WRONG_STATE;
1715 + megasas_complete_cmd(instance,
1716 + cmd_mfi, DID_OK);
1717 ++ }
1718 + }
1719 + }
1720 + } else {
1721 +@@ -5094,6 +5097,14 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
1722 + prev_aen.word =
1723 + le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
1724 +
1725 ++ if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) ||
1726 ++ (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) {
1727 ++ dev_info(&instance->pdev->dev,
1728 ++ "%s %d out of range class %d send by application\n",
1729 ++ __func__, __LINE__, curr_aen.members.class);
1730 ++ return 0;
1731 ++ }
1732 ++
1733 + /*
1734 + * A class whose enum value is smaller is inclusive of all
1735 + * higher values. If a PROGRESS (= -1) was previously
1736 +diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
1737 +index 1ed85dfc008d..ac12ee844bfc 100644
1738 +--- a/drivers/scsi/qla2xxx/qla_attr.c
1739 ++++ b/drivers/scsi/qla2xxx/qla_attr.c
1740 +@@ -404,6 +404,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
1741 + return -EINVAL;
1742 + if (start > ha->optrom_size)
1743 + return -EINVAL;
1744 ++ if (size > ha->optrom_size - start)
1745 ++ size = ha->optrom_size - start;
1746 +
1747 + mutex_lock(&ha->optrom_mutex);
1748 + switch (val) {
1749 +@@ -429,8 +431,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
1750 + }
1751 +
1752 + ha->optrom_region_start = start;
1753 +- ha->optrom_region_size = start + size > ha->optrom_size ?
1754 +- ha->optrom_size - start : size;
1755 ++ ha->optrom_region_size = start + size;
1756 +
1757 + ha->optrom_state = QLA_SREADING;
1758 + ha->optrom_buffer = vmalloc(ha->optrom_region_size);
1759 +@@ -503,8 +504,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
1760 + }
1761 +
1762 + ha->optrom_region_start = start;
1763 +- ha->optrom_region_size = start + size > ha->optrom_size ?
1764 +- ha->optrom_size - start : size;
1765 ++ ha->optrom_region_size = start + size;
1766 +
1767 + ha->optrom_state = QLA_SWRITING;
1768 + ha->optrom_buffer = vmalloc(ha->optrom_region_size);
1769 +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
1770 +index 71325972e503..39e8b5dc23fa 100644
1771 +--- a/drivers/scsi/sg.c
1772 ++++ b/drivers/scsi/sg.c
1773 +@@ -133,7 +133,7 @@ struct sg_device; /* forward declarations */
1774 + struct sg_fd;
1775 +
1776 + typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
1777 +- struct sg_request *nextrp; /* NULL -> tail request (slist) */
1778 ++ struct list_head entry; /* list entry */
1779 + struct sg_fd *parentfp; /* NULL -> not in use */
1780 + Sg_scatter_hold data; /* hold buffer, perhaps scatter list */
1781 + sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */
1782 +@@ -157,8 +157,7 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
1783 + int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
1784 + int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */
1785 + Sg_scatter_hold reserve; /* buffer held for this file descriptor */
1786 +- unsigned save_scat_len; /* original length of trunc. scat. element */
1787 +- Sg_request *headrp; /* head of request slist, NULL->empty */
1788 ++ struct list_head rq_list; /* head of request list */
1789 + struct fasync_struct *async_qp; /* used by asynchronous notification */
1790 + Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
1791 + char low_dma; /* as in parent but possibly overridden to 1 */
1792 +@@ -840,6 +839,39 @@ static int max_sectors_bytes(struct request_queue *q)
1793 + return max_sectors << 9;
1794 + }
1795 +
1796 ++static void
1797 ++sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo)
1798 ++{
1799 ++ Sg_request *srp;
1800 ++ int val;
1801 ++ unsigned int ms;
1802 ++
1803 ++ val = 0;
1804 ++ list_for_each_entry(srp, &sfp->rq_list, entry) {
1805 ++ if (val > SG_MAX_QUEUE)
1806 ++ break;
1807 ++ rinfo[val].req_state = srp->done + 1;
1808 ++ rinfo[val].problem =
1809 ++ srp->header.masked_status &
1810 ++ srp->header.host_status &
1811 ++ srp->header.driver_status;
1812 ++ if (srp->done)
1813 ++ rinfo[val].duration =
1814 ++ srp->header.duration;
1815 ++ else {
1816 ++ ms = jiffies_to_msecs(jiffies);
1817 ++ rinfo[val].duration =
1818 ++ (ms > srp->header.duration) ?
1819 ++ (ms - srp->header.duration) : 0;
1820 ++ }
1821 ++ rinfo[val].orphan = srp->orphan;
1822 ++ rinfo[val].sg_io_owned = srp->sg_io_owned;
1823 ++ rinfo[val].pack_id = srp->header.pack_id;
1824 ++ rinfo[val].usr_ptr = srp->header.usr_ptr;
1825 ++ val++;
1826 ++ }
1827 ++}
1828 ++
1829 + static long
1830 + sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1831 + {
1832 +@@ -951,7 +983,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1833 + if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
1834 + return -EFAULT;
1835 + read_lock_irqsave(&sfp->rq_list_lock, iflags);
1836 +- for (srp = sfp->headrp; srp; srp = srp->nextrp) {
1837 ++ list_for_each_entry(srp, &sfp->rq_list, entry) {
1838 + if ((1 == srp->done) && (!srp->sg_io_owned)) {
1839 + read_unlock_irqrestore(&sfp->rq_list_lock,
1840 + iflags);
1841 +@@ -964,7 +996,8 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1842 + return 0;
1843 + case SG_GET_NUM_WAITING:
1844 + read_lock_irqsave(&sfp->rq_list_lock, iflags);
1845 +- for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) {
1846 ++ val = 0;
1847 ++ list_for_each_entry(srp, &sfp->rq_list, entry) {
1848 + if ((1 == srp->done) && (!srp->sg_io_owned))
1849 + ++val;
1850 + }
1851 +@@ -1032,42 +1065,15 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1852 + return -EFAULT;
1853 + else {
1854 + sg_req_info_t *rinfo;
1855 +- unsigned int ms;
1856 +
1857 +- rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
1858 +- GFP_KERNEL);
1859 ++ rinfo = kzalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
1860 ++ GFP_KERNEL);
1861 + if (!rinfo)
1862 + return -ENOMEM;
1863 + read_lock_irqsave(&sfp->rq_list_lock, iflags);
1864 +- for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
1865 +- ++val, srp = srp ? srp->nextrp : srp) {
1866 +- memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
1867 +- if (srp) {
1868 +- rinfo[val].req_state = srp->done + 1;
1869 +- rinfo[val].problem =
1870 +- srp->header.masked_status &
1871 +- srp->header.host_status &
1872 +- srp->header.driver_status;
1873 +- if (srp->done)
1874 +- rinfo[val].duration =
1875 +- srp->header.duration;
1876 +- else {
1877 +- ms = jiffies_to_msecs(jiffies);
1878 +- rinfo[val].duration =
1879 +- (ms > srp->header.duration) ?
1880 +- (ms - srp->header.duration) : 0;
1881 +- }
1882 +- rinfo[val].orphan = srp->orphan;
1883 +- rinfo[val].sg_io_owned =
1884 +- srp->sg_io_owned;
1885 +- rinfo[val].pack_id =
1886 +- srp->header.pack_id;
1887 +- rinfo[val].usr_ptr =
1888 +- srp->header.usr_ptr;
1889 +- }
1890 +- }
1891 ++ sg_fill_request_table(sfp, rinfo);
1892 + read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1893 +- result = __copy_to_user(p, rinfo,
1894 ++ result = __copy_to_user(p, rinfo,
1895 + SZ_SG_REQ_INFO * SG_MAX_QUEUE);
1896 + result = result ? -EFAULT : 0;
1897 + kfree(rinfo);
1898 +@@ -1173,7 +1179,7 @@ sg_poll(struct file *filp, poll_table * wait)
1899 + return POLLERR;
1900 + poll_wait(filp, &sfp->read_wait, wait);
1901 + read_lock_irqsave(&sfp->rq_list_lock, iflags);
1902 +- for (srp = sfp->headrp; srp; srp = srp->nextrp) {
1903 ++ list_for_each_entry(srp, &sfp->rq_list, entry) {
1904 + /* if any read waiting, flag it */
1905 + if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
1906 + res = POLLIN | POLLRDNORM;
1907 +@@ -2059,7 +2065,6 @@ sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
1908 + req_schp->pages = NULL;
1909 + req_schp->page_order = 0;
1910 + req_schp->sglist_len = 0;
1911 +- sfp->save_scat_len = 0;
1912 + srp->res_used = 0;
1913 + /* Called without mutex lock to avoid deadlock */
1914 + sfp->res_in_use = 0;
1915 +@@ -2072,7 +2077,7 @@ sg_get_rq_mark(Sg_fd * sfp, int pack_id)
1916 + unsigned long iflags;
1917 +
1918 + write_lock_irqsave(&sfp->rq_list_lock, iflags);
1919 +- for (resp = sfp->headrp; resp; resp = resp->nextrp) {
1920 ++ list_for_each_entry(resp, &sfp->rq_list, entry) {
1921 + /* look for requests that are ready + not SG_IO owned */
1922 + if ((1 == resp->done) && (!resp->sg_io_owned) &&
1923 + ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
1924 +@@ -2090,70 +2095,45 @@ sg_add_request(Sg_fd * sfp)
1925 + {
1926 + int k;
1927 + unsigned long iflags;
1928 +- Sg_request *resp;
1929 + Sg_request *rp = sfp->req_arr;
1930 +
1931 + write_lock_irqsave(&sfp->rq_list_lock, iflags);
1932 +- resp = sfp->headrp;
1933 +- if (!resp) {
1934 +- memset(rp, 0, sizeof (Sg_request));
1935 +- rp->parentfp = sfp;
1936 +- resp = rp;
1937 +- sfp->headrp = resp;
1938 +- } else {
1939 +- if (0 == sfp->cmd_q)
1940 +- resp = NULL; /* command queuing disallowed */
1941 +- else {
1942 +- for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
1943 +- if (!rp->parentfp)
1944 +- break;
1945 +- }
1946 +- if (k < SG_MAX_QUEUE) {
1947 +- memset(rp, 0, sizeof (Sg_request));
1948 +- rp->parentfp = sfp;
1949 +- while (resp->nextrp)
1950 +- resp = resp->nextrp;
1951 +- resp->nextrp = rp;
1952 +- resp = rp;
1953 +- } else
1954 +- resp = NULL;
1955 ++ if (!list_empty(&sfp->rq_list)) {
1956 ++ if (!sfp->cmd_q)
1957 ++ goto out_unlock;
1958 ++
1959 ++ for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
1960 ++ if (!rp->parentfp)
1961 ++ break;
1962 + }
1963 ++ if (k >= SG_MAX_QUEUE)
1964 ++ goto out_unlock;
1965 + }
1966 +- if (resp) {
1967 +- resp->nextrp = NULL;
1968 +- resp->header.duration = jiffies_to_msecs(jiffies);
1969 +- }
1970 ++ memset(rp, 0, sizeof (Sg_request));
1971 ++ rp->parentfp = sfp;
1972 ++ rp->header.duration = jiffies_to_msecs(jiffies);
1973 ++ list_add_tail(&rp->entry, &sfp->rq_list);
1974 + write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1975 +- return resp;
1976 ++ return rp;
1977 ++out_unlock:
1978 ++ write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1979 ++ return NULL;
1980 + }
1981 +
1982 + /* Return of 1 for found; 0 for not found */
1983 + static int
1984 + sg_remove_request(Sg_fd * sfp, Sg_request * srp)
1985 + {
1986 +- Sg_request *prev_rp;
1987 +- Sg_request *rp;
1988 + unsigned long iflags;
1989 + int res = 0;
1990 +
1991 +- if ((!sfp) || (!srp) || (!sfp->headrp))
1992 ++ if (!sfp || !srp || list_empty(&sfp->rq_list))
1993 + return res;
1994 + write_lock_irqsave(&sfp->rq_list_lock, iflags);
1995 +- prev_rp = sfp->headrp;
1996 +- if (srp == prev_rp) {
1997 +- sfp->headrp = prev_rp->nextrp;
1998 +- prev_rp->parentfp = NULL;
1999 ++ if (!list_empty(&srp->entry)) {
2000 ++ list_del(&srp->entry);
2001 ++ srp->parentfp = NULL;
2002 + res = 1;
2003 +- } else {
2004 +- while ((rp = prev_rp->nextrp)) {
2005 +- if (srp == rp) {
2006 +- prev_rp->nextrp = rp->nextrp;
2007 +- rp->parentfp = NULL;
2008 +- res = 1;
2009 +- break;
2010 +- }
2011 +- prev_rp = rp;
2012 +- }
2013 + }
2014 + write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2015 + return res;
2016 +@@ -2172,7 +2152,7 @@ sg_add_sfp(Sg_device * sdp)
2017 +
2018 + init_waitqueue_head(&sfp->read_wait);
2019 + rwlock_init(&sfp->rq_list_lock);
2020 +-
2021 ++ INIT_LIST_HEAD(&sfp->rq_list);
2022 + kref_init(&sfp->f_ref);
2023 + mutex_init(&sfp->f_mutex);
2024 + sfp->timeout = SG_DEFAULT_TIMEOUT;
2025 +@@ -2213,10 +2193,13 @@ sg_remove_sfp_usercontext(struct work_struct *work)
2026 + {
2027 + struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
2028 + struct sg_device *sdp = sfp->parentdp;
2029 ++ Sg_request *srp;
2030 +
2031 + /* Cleanup any responses which were never read(). */
2032 +- while (sfp->headrp)
2033 +- sg_finish_rem_req(sfp->headrp);
2034 ++ while (!list_empty(&sfp->rq_list)) {
2035 ++ srp = list_first_entry(&sfp->rq_list, Sg_request, entry);
2036 ++ sg_finish_rem_req(srp);
2037 ++ }
2038 +
2039 + if (sfp->reserve.bufflen > 0) {
2040 + SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
2041 +@@ -2619,7 +2602,7 @@ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
2042 + /* must be called while holding sg_index_lock */
2043 + static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
2044 + {
2045 +- int k, m, new_interface, blen, usg;
2046 ++ int k, new_interface, blen, usg;
2047 + Sg_request *srp;
2048 + Sg_fd *fp;
2049 + const sg_io_hdr_t *hp;
2050 +@@ -2639,13 +2622,11 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
2051 + seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
2052 + (int) fp->cmd_q, (int) fp->force_packid,
2053 + (int) fp->keep_orphan);
2054 +- for (m = 0, srp = fp->headrp;
2055 +- srp != NULL;
2056 +- ++m, srp = srp->nextrp) {
2057 ++ list_for_each_entry(srp, &fp->rq_list, entry) {
2058 + hp = &srp->header;
2059 + new_interface = (hp->interface_id == '\0') ? 0 : 1;
2060 + if (srp->res_used) {
2061 +- if (new_interface &&
2062 ++ if (new_interface &&
2063 + (SG_FLAG_MMAP_IO & hp->flags))
2064 + cp = " mmap>> ";
2065 + else
2066 +@@ -2676,7 +2657,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
2067 + seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
2068 + (int) srp->data.cmd_opcode);
2069 + }
2070 +- if (0 == m)
2071 ++ if (list_empty(&fp->rq_list))
2072 + seq_puts(s, " No requests active\n");
2073 + read_unlock(&fp->rq_list_lock);
2074 + }
2075 +diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
2076 +index cd5c1c060481..6df2841cb7f9 100644
2077 +--- a/drivers/scsi/storvsc_drv.c
2078 ++++ b/drivers/scsi/storvsc_drv.c
2079 +@@ -1511,6 +1511,8 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
2080 + ret = storvsc_do_io(dev, cmd_request);
2081 +
2082 + if (ret == -EAGAIN) {
2083 ++ if (payload_sz > sizeof(cmd_request->mpb))
2084 ++ kfree(payload);
2085 + /* no more space */
2086 + return SCSI_MLQUEUE_DEVICE_BUSY;
2087 + }
2088 +diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
2089 +index fb31eecb708d..8f3566cde3eb 100644
2090 +--- a/drivers/tty/tty_buffer.c
2091 ++++ b/drivers/tty/tty_buffer.c
2092 +@@ -361,6 +361,32 @@ int tty_insert_flip_string_flags(struct tty_port *port,
2093 + }
2094 + EXPORT_SYMBOL(tty_insert_flip_string_flags);
2095 +
2096 ++/**
2097 ++ * __tty_insert_flip_char - Add one character to the tty buffer
2098 ++ * @port: tty port
2099 ++ * @ch: character
2100 ++ * @flag: flag byte
2101 ++ *
2102 ++ * Queue a single byte to the tty buffering, with an optional flag.
2103 ++ * This is the slow path of tty_insert_flip_char.
2104 ++ */
2105 ++int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag)
2106 ++{
2107 ++ struct tty_buffer *tb;
2108 ++ int flags = (flag == TTY_NORMAL) ? TTYB_NORMAL : 0;
2109 ++
2110 ++ if (!__tty_buffer_request_room(port, 1, flags))
2111 ++ return 0;
2112 ++
2113 ++ tb = port->buf.tail;
2114 ++ if (~tb->flags & TTYB_NORMAL)
2115 ++ *flag_buf_ptr(tb, tb->used) = flag;
2116 ++ *char_buf_ptr(tb, tb->used++) = ch;
2117 ++
2118 ++ return 1;
2119 ++}
2120 ++EXPORT_SYMBOL(__tty_insert_flip_char);
2121 ++
2122 + /**
2123 + * tty_schedule_flip - push characters to ldisc
2124 + * @port: tty port to push from
2125 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
2126 +index 68345a9e59b8..32941cd6d34b 100644
2127 +--- a/fs/ext4/super.c
2128 ++++ b/fs/ext4/super.c
2129 +@@ -2205,6 +2205,7 @@ static void ext4_orphan_cleanup(struct super_block *sb,
2130 + unsigned int s_flags = sb->s_flags;
2131 + int nr_orphans = 0, nr_truncates = 0;
2132 + #ifdef CONFIG_QUOTA
2133 ++ int quota_update = 0;
2134 + int i;
2135 + #endif
2136 + if (!es->s_last_orphan) {
2137 +@@ -2243,14 +2244,32 @@ static void ext4_orphan_cleanup(struct super_block *sb,
2138 + #ifdef CONFIG_QUOTA
2139 + /* Needed for iput() to work correctly and not trash data */
2140 + sb->s_flags |= MS_ACTIVE;
2141 +- /* Turn on quotas so that they are updated correctly */
2142 ++
2143 ++ /*
2144 ++ * Turn on quotas which were not enabled for read-only mounts if
2145 ++ * filesystem has quota feature, so that they are updated correctly.
2146 ++ */
2147 ++ if (ext4_has_feature_quota(sb) && (s_flags & MS_RDONLY)) {
2148 ++ int ret = ext4_enable_quotas(sb);
2149 ++
2150 ++ if (!ret)
2151 ++ quota_update = 1;
2152 ++ else
2153 ++ ext4_msg(sb, KERN_ERR,
2154 ++ "Cannot turn on quotas: error %d", ret);
2155 ++ }
2156 ++
2157 ++ /* Turn on journaled quotas used for old sytle */
2158 + for (i = 0; i < EXT4_MAXQUOTAS; i++) {
2159 + if (EXT4_SB(sb)->s_qf_names[i]) {
2160 + int ret = ext4_quota_on_mount(sb, i);
2161 +- if (ret < 0)
2162 ++
2163 ++ if (!ret)
2164 ++ quota_update = 1;
2165 ++ else
2166 + ext4_msg(sb, KERN_ERR,
2167 + "Cannot turn on journaled "
2168 +- "quota: error %d", ret);
2169 ++ "quota: type %d: error %d", i, ret);
2170 + }
2171 + }
2172 + #endif
2173 +@@ -2309,10 +2328,12 @@ static void ext4_orphan_cleanup(struct super_block *sb,
2174 + ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
2175 + PLURAL(nr_truncates));
2176 + #ifdef CONFIG_QUOTA
2177 +- /* Turn quotas off */
2178 +- for (i = 0; i < EXT4_MAXQUOTAS; i++) {
2179 +- if (sb_dqopt(sb)->files[i])
2180 +- dquot_quota_off(sb, i);
2181 ++ /* Turn off quotas if they were enabled for orphan cleanup */
2182 ++ if (quota_update) {
2183 ++ for (i = 0; i < EXT4_MAXQUOTAS; i++) {
2184 ++ if (sb_dqopt(sb)->files[i])
2185 ++ dquot_quota_off(sb, i);
2186 ++ }
2187 + }
2188 + #endif
2189 + sb->s_flags = s_flags; /* Restore MS_RDONLY status */
2190 +@@ -5120,6 +5141,9 @@ static int ext4_enable_quotas(struct super_block *sb)
2191 + err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
2192 + DQUOT_USAGE_ENABLED);
2193 + if (err) {
2194 ++ for (type--; type >= 0; type--)
2195 ++ dquot_quota_off(sb, type);
2196 ++
2197 + ext4_warning(sb,
2198 + "Failed to enable quota tracking "
2199 + "(type=%d, err=%d). Please run "
2200 +diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
2201 +index cbf74f47cce8..e32f349f341b 100644
2202 +--- a/fs/f2fs/recovery.c
2203 ++++ b/fs/f2fs/recovery.c
2204 +@@ -276,7 +276,7 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
2205 + return 0;
2206 +
2207 + /* Get the previous summary */
2208 +- for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
2209 ++ for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2210 + struct curseg_info *curseg = CURSEG_I(sbi, i);
2211 + if (curseg->segno == segno) {
2212 + sum = curseg->sum_blk->entries[blkoff];
2213 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
2214 +index c7f1ce41442a..9e5a6842346e 100644
2215 +--- a/fs/nfsd/nfs4state.c
2216 ++++ b/fs/nfsd/nfs4state.c
2217 +@@ -1145,9 +1145,7 @@ static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
2218 +
2219 + static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
2220 + {
2221 +- struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
2222 +-
2223 +- lockdep_assert_held(&oo->oo_owner.so_client->cl_lock);
2224 ++ lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
2225 +
2226 + list_del_init(&stp->st_locks);
2227 + nfs4_unhash_stid(&stp->st_stid);
2228 +@@ -1156,12 +1154,12 @@ static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
2229 +
2230 + static void release_lock_stateid(struct nfs4_ol_stateid *stp)
2231 + {
2232 +- struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
2233 ++ struct nfs4_client *clp = stp->st_stid.sc_client;
2234 + bool unhashed;
2235 +
2236 +- spin_lock(&oo->oo_owner.so_client->cl_lock);
2237 ++ spin_lock(&clp->cl_lock);
2238 + unhashed = unhash_lock_stateid(stp);
2239 +- spin_unlock(&oo->oo_owner.so_client->cl_lock);
2240 ++ spin_unlock(&clp->cl_lock);
2241 + if (unhashed)
2242 + nfs4_put_stid(&stp->st_stid);
2243 + }
2244 +diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h
2245 +index c28dd523f96e..d43837f2ce3a 100644
2246 +--- a/include/linux/tty_flip.h
2247 ++++ b/include/linux/tty_flip.h
2248 +@@ -12,6 +12,7 @@ extern int tty_prepare_flip_string(struct tty_port *port,
2249 + unsigned char **chars, size_t size);
2250 + extern void tty_flip_buffer_push(struct tty_port *port);
2251 + void tty_schedule_flip(struct tty_port *port);
2252 ++int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag);
2253 +
2254 + static inline int tty_insert_flip_char(struct tty_port *port,
2255 + unsigned char ch, char flag)
2256 +@@ -26,7 +27,7 @@ static inline int tty_insert_flip_char(struct tty_port *port,
2257 + *char_buf_ptr(tb, tb->used++) = ch;
2258 + return 1;
2259 + }
2260 +- return tty_insert_flip_string_flags(port, &ch, &flag, 1);
2261 ++ return __tty_insert_flip_char(port, ch, flag);
2262 + }
2263 +
2264 + static inline int tty_insert_flip_string(struct tty_port *port,
2265 +diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
2266 +index ac42bbb37b2d..c26a6e4dc306 100644
2267 +--- a/include/net/inet_frag.h
2268 ++++ b/include/net/inet_frag.h
2269 +@@ -1,14 +1,9 @@
2270 + #ifndef __NET_FRAG_H__
2271 + #define __NET_FRAG_H__
2272 +
2273 +-#include <linux/percpu_counter.h>
2274 +-
2275 + struct netns_frags {
2276 +- /* The percpu_counter "mem" need to be cacheline aligned.
2277 +- * mem.count must not share cacheline with other writers
2278 +- */
2279 +- struct percpu_counter mem ____cacheline_aligned_in_smp;
2280 +-
2281 ++ /* Keep atomic mem on separate cachelines in structs that include it */
2282 ++ atomic_t mem ____cacheline_aligned_in_smp;
2283 + /* sysctls */
2284 + int timeout;
2285 + int high_thresh;
2286 +@@ -108,15 +103,10 @@ struct inet_frags {
2287 + int inet_frags_init(struct inet_frags *);
2288 + void inet_frags_fini(struct inet_frags *);
2289 +
2290 +-static inline int inet_frags_init_net(struct netns_frags *nf)
2291 +-{
2292 +- return percpu_counter_init(&nf->mem, 0, GFP_KERNEL);
2293 +-}
2294 +-static inline void inet_frags_uninit_net(struct netns_frags *nf)
2295 ++static inline void inet_frags_init_net(struct netns_frags *nf)
2296 + {
2297 +- percpu_counter_destroy(&nf->mem);
2298 ++ atomic_set(&nf->mem, 0);
2299 + }
2300 +-
2301 + void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
2302 +
2303 + void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
2304 +@@ -140,37 +130,24 @@ static inline bool inet_frag_evicting(struct inet_frag_queue *q)
2305 +
2306 + /* Memory Tracking Functions. */
2307 +
2308 +-/* The default percpu_counter batch size is not big enough to scale to
2309 +- * fragmentation mem acct sizes.
2310 +- * The mem size of a 64K fragment is approx:
2311 +- * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
2312 +- */
2313 +-static unsigned int frag_percpu_counter_batch = 130000;
2314 +-
2315 + static inline int frag_mem_limit(struct netns_frags *nf)
2316 + {
2317 +- return percpu_counter_read(&nf->mem);
2318 ++ return atomic_read(&nf->mem);
2319 + }
2320 +
2321 + static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
2322 + {
2323 +- __percpu_counter_add(&nf->mem, -i, frag_percpu_counter_batch);
2324 ++ atomic_sub(i, &nf->mem);
2325 + }
2326 +
2327 + static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
2328 + {
2329 +- __percpu_counter_add(&nf->mem, i, frag_percpu_counter_batch);
2330 ++ atomic_add(i, &nf->mem);
2331 + }
2332 +
2333 +-static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf)
2334 ++static inline int sum_frag_mem_limit(struct netns_frags *nf)
2335 + {
2336 +- unsigned int res;
2337 +-
2338 +- local_bh_disable();
2339 +- res = percpu_counter_sum_positive(&nf->mem);
2340 +- local_bh_enable();
2341 +-
2342 +- return res;
2343 ++ return atomic_read(&nf->mem);
2344 + }
2345 +
2346 + /* RFC 3168 support :
2347 +diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
2348 +index fb961a576abe..fa5e703a14ed 100644
2349 +--- a/include/net/ip6_fib.h
2350 ++++ b/include/net/ip6_fib.h
2351 +@@ -68,6 +68,7 @@ struct fib6_node {
2352 + __u16 fn_flags;
2353 + int fn_sernum;
2354 + struct rt6_info *rr_ptr;
2355 ++ struct rcu_head rcu;
2356 + };
2357 +
2358 + #ifndef CONFIG_IPV6_SUBTREES
2359 +@@ -102,7 +103,7 @@ struct rt6_info {
2360 + * the same cache line.
2361 + */
2362 + struct fib6_table *rt6i_table;
2363 +- struct fib6_node *rt6i_node;
2364 ++ struct fib6_node __rcu *rt6i_node;
2365 +
2366 + struct in6_addr rt6i_gateway;
2367 +
2368 +@@ -165,13 +166,40 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
2369 + rt0->rt6i_flags |= RTF_EXPIRES;
2370 + }
2371 +
2372 ++/* Function to safely get fn->sernum for passed in rt
2373 ++ * and store result in passed in cookie.
2374 ++ * Return true if we can get cookie safely
2375 ++ * Return false if not
2376 ++ */
2377 ++static inline bool rt6_get_cookie_safe(const struct rt6_info *rt,
2378 ++ u32 *cookie)
2379 ++{
2380 ++ struct fib6_node *fn;
2381 ++ bool status = false;
2382 ++
2383 ++ rcu_read_lock();
2384 ++ fn = rcu_dereference(rt->rt6i_node);
2385 ++
2386 ++ if (fn) {
2387 ++ *cookie = fn->fn_sernum;
2388 ++ status = true;
2389 ++ }
2390 ++
2391 ++ rcu_read_unlock();
2392 ++ return status;
2393 ++}
2394 ++
2395 + static inline u32 rt6_get_cookie(const struct rt6_info *rt)
2396 + {
2397 ++ u32 cookie = 0;
2398 ++
2399 + if (rt->rt6i_flags & RTF_PCPU ||
2400 + (unlikely(rt->dst.flags & DST_NOCACHE) && rt->dst.from))
2401 + rt = (struct rt6_info *)(rt->dst.from);
2402 +
2403 +- return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
2404 ++ rt6_get_cookie_safe(rt, &cookie);
2405 ++
2406 ++ return cookie;
2407 + }
2408 +
2409 + static inline void ip6_rt_put(struct rt6_info *rt)
2410 +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
2411 +index eba904bae48c..38d73a6e2857 100644
2412 +--- a/kernel/trace/ftrace.c
2413 ++++ b/kernel/trace/ftrace.c
2414 +@@ -2667,13 +2667,14 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2415 +
2416 + if (!command || !ftrace_enabled) {
2417 + /*
2418 +- * If these are control ops, they still need their
2419 +- * per_cpu field freed. Since, function tracing is
2420 ++ * If these are dynamic or control ops, they still
2421 ++ * need their data freed. Since, function tracing is
2422 + * not currently active, we can just free them
2423 + * without synchronizing all CPUs.
2424 + */
2425 +- if (ops->flags & FTRACE_OPS_FL_CONTROL)
2426 +- control_ops_free(ops);
2427 ++ if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL))
2428 ++ goto free_ops;
2429 ++
2430 + return 0;
2431 + }
2432 +
2433 +@@ -2728,6 +2729,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2434 + if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
2435 + schedule_on_each_cpu(ftrace_sync);
2436 +
2437 ++ free_ops:
2438 + arch_ftrace_trampoline_free(ops);
2439 +
2440 + if (ops->flags & FTRACE_OPS_FL_CONTROL)
2441 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
2442 +index d59ebd9d21df..4743066010c4 100644
2443 +--- a/kernel/trace/trace.c
2444 ++++ b/kernel/trace/trace.c
2445 +@@ -5237,7 +5237,7 @@ static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
2446 + tracing_reset_online_cpus(&tr->trace_buffer);
2447 +
2448 + #ifdef CONFIG_TRACER_MAX_TRACE
2449 +- if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
2450 ++ if (tr->max_buffer.buffer)
2451 + ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
2452 + tracing_reset_online_cpus(&tr->max_buffer);
2453 + #endif
2454 +diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
2455 +index b0f86ea77881..ca70d11b8aa7 100644
2456 +--- a/kernel/trace/trace_selftest.c
2457 ++++ b/kernel/trace/trace_selftest.c
2458 +@@ -272,7 +272,7 @@ static int trace_selftest_ops(struct trace_array *tr, int cnt)
2459 + goto out_free;
2460 + if (cnt > 1) {
2461 + if (trace_selftest_test_global_cnt == 0)
2462 +- goto out;
2463 ++ goto out_free;
2464 + }
2465 + if (trace_selftest_test_dyn_cnt == 0)
2466 + goto out_free;
2467 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
2468 +index 53286b2f5b1c..6b5421ae86c6 100644
2469 +--- a/mm/page_alloc.c
2470 ++++ b/mm/page_alloc.c
2471 +@@ -1748,13 +1748,25 @@ static void unreserve_highatomic_pageblock(const struct alloc_context *ac)
2472 + struct page, lru);
2473 +
2474 + /*
2475 +- * It should never happen but changes to locking could
2476 +- * inadvertently allow a per-cpu drain to add pages
2477 +- * to MIGRATE_HIGHATOMIC while unreserving so be safe
2478 +- * and watch for underflows.
2479 ++ * In page freeing path, migratetype change is racy so
2480 ++ * we can counter several free pages in a pageblock
2481 ++ * in this loop althoug we changed the pageblock type
2482 ++ * from highatomic to ac->migratetype. So we should
2483 ++ * adjust the count once.
2484 + */
2485 +- zone->nr_reserved_highatomic -= min(pageblock_nr_pages,
2486 +- zone->nr_reserved_highatomic);
2487 ++ if (get_pageblock_migratetype(page) ==
2488 ++ MIGRATE_HIGHATOMIC) {
2489 ++ /*
2490 ++ * It should never happen but changes to
2491 ++ * locking could inadvertently allow a per-cpu
2492 ++ * drain to add pages to MIGRATE_HIGHATOMIC
2493 ++ * while unreserving so be safe and watch for
2494 ++ * underflows.
2495 ++ */
2496 ++ zone->nr_reserved_highatomic -= min(
2497 ++ pageblock_nr_pages,
2498 ++ zone->nr_reserved_highatomic);
2499 ++ }
2500 +
2501 + /*
2502 + * Convert to ac->migratetype and avoid the normal
2503 +diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
2504 +index 6b437e8760d3..12e8cf4bda9f 100644
2505 +--- a/net/ieee802154/6lowpan/reassembly.c
2506 ++++ b/net/ieee802154/6lowpan/reassembly.c
2507 +@@ -580,19 +580,14 @@ static int __net_init lowpan_frags_init_net(struct net *net)
2508 + {
2509 + struct netns_ieee802154_lowpan *ieee802154_lowpan =
2510 + net_ieee802154_lowpan(net);
2511 +- int res;
2512 +
2513 + ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
2514 + ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
2515 + ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
2516 +
2517 +- res = inet_frags_init_net(&ieee802154_lowpan->frags);
2518 +- if (res)
2519 +- return res;
2520 +- res = lowpan_frags_ns_sysctl_register(net);
2521 +- if (res)
2522 +- inet_frags_uninit_net(&ieee802154_lowpan->frags);
2523 +- return res;
2524 ++ inet_frags_init_net(&ieee802154_lowpan->frags);
2525 ++
2526 ++ return lowpan_frags_ns_sysctl_register(net);
2527 + }
2528 +
2529 + static void __net_exit lowpan_frags_exit_net(struct net *net)
2530 +diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
2531 +index fe144dae7372..c5fb2f694ed0 100644
2532 +--- a/net/ipv4/inet_fragment.c
2533 ++++ b/net/ipv4/inet_fragment.c
2534 +@@ -234,10 +234,8 @@ evict_again:
2535 + cond_resched();
2536 +
2537 + if (read_seqretry(&f->rnd_seqlock, seq) ||
2538 +- percpu_counter_sum(&nf->mem))
2539 ++ sum_frag_mem_limit(nf))
2540 + goto evict_again;
2541 +-
2542 +- percpu_counter_destroy(&nf->mem);
2543 + }
2544 + EXPORT_SYMBOL(inet_frags_exit_net);
2545 +
2546 +diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
2547 +index b8a0607dab96..e2e162432aa3 100644
2548 +--- a/net/ipv4/ip_fragment.c
2549 ++++ b/net/ipv4/ip_fragment.c
2550 +@@ -840,8 +840,6 @@ static void __init ip4_frags_ctl_register(void)
2551 +
2552 + static int __net_init ipv4_frags_init_net(struct net *net)
2553 + {
2554 +- int res;
2555 +-
2556 + /* Fragment cache limits.
2557 + *
2558 + * The fragment memory accounting code, (tries to) account for
2559 +@@ -865,13 +863,9 @@ static int __net_init ipv4_frags_init_net(struct net *net)
2560 + */
2561 + net->ipv4.frags.timeout = IP_FRAG_TIME;
2562 +
2563 +- res = inet_frags_init_net(&net->ipv4.frags);
2564 +- if (res)
2565 +- return res;
2566 +- res = ip4_frags_ns_ctl_register(net);
2567 +- if (res)
2568 +- inet_frags_uninit_net(&net->ipv4.frags);
2569 +- return res;
2570 ++ inet_frags_init_net(&net->ipv4.frags);
2571 ++
2572 ++ return ip4_frags_ns_ctl_register(net);
2573 + }
2574 +
2575 + static void __net_exit ipv4_frags_exit_net(struct net *net)
2576 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
2577 +index 0870a86e9d96..5597120c8ffd 100644
2578 +--- a/net/ipv4/tcp.c
2579 ++++ b/net/ipv4/tcp.c
2580 +@@ -2260,6 +2260,10 @@ int tcp_disconnect(struct sock *sk, int flags)
2581 + tcp_set_ca_state(sk, TCP_CA_Open);
2582 + tcp_clear_retrans(tp);
2583 + inet_csk_delack_init(sk);
2584 ++ /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
2585 ++ * issue in __tcp_select_window()
2586 ++ */
2587 ++ icsk->icsk_ack.rcv_mss = TCP_MIN_MSS;
2588 + tcp_init_send_head(sk);
2589 + memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
2590 + __sk_dst_reset(sk);
2591 +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
2592 +index 735b22b1b4ea..92174881844d 100644
2593 +--- a/net/ipv6/addrconf.c
2594 ++++ b/net/ipv6/addrconf.c
2595 +@@ -5152,7 +5152,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
2596 + * our DAD process, so we don't need
2597 + * to do it again
2598 + */
2599 +- if (!(ifp->rt->rt6i_node))
2600 ++ if (!rcu_access_pointer(ifp->rt->rt6i_node))
2601 + ip6_ins_rt(ifp->rt);
2602 + if (ifp->idev->cnf.forwarding)
2603 + addrconf_join_anycast(ifp);
2604 +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
2605 +index aad8cdf15472..c23e02a7ccb0 100644
2606 +--- a/net/ipv6/ip6_fib.c
2607 ++++ b/net/ipv6/ip6_fib.c
2608 +@@ -150,11 +150,23 @@ static struct fib6_node *node_alloc(void)
2609 + return fn;
2610 + }
2611 +
2612 +-static void node_free(struct fib6_node *fn)
2613 ++static void node_free_immediate(struct fib6_node *fn)
2614 ++{
2615 ++ kmem_cache_free(fib6_node_kmem, fn);
2616 ++}
2617 ++
2618 ++static void node_free_rcu(struct rcu_head *head)
2619 + {
2620 ++ struct fib6_node *fn = container_of(head, struct fib6_node, rcu);
2621 ++
2622 + kmem_cache_free(fib6_node_kmem, fn);
2623 + }
2624 +
2625 ++static void node_free(struct fib6_node *fn)
2626 ++{
2627 ++ call_rcu(&fn->rcu, node_free_rcu);
2628 ++}
2629 ++
2630 + static void rt6_rcu_free(struct rt6_info *rt)
2631 + {
2632 + call_rcu(&rt->dst.rcu_head, dst_rcu_free);
2633 +@@ -191,6 +203,12 @@ static void rt6_release(struct rt6_info *rt)
2634 + }
2635 + }
2636 +
2637 ++static void fib6_free_table(struct fib6_table *table)
2638 ++{
2639 ++ inetpeer_invalidate_tree(&table->tb6_peers);
2640 ++ kfree(table);
2641 ++}
2642 ++
2643 + static void fib6_link_table(struct net *net, struct fib6_table *tb)
2644 + {
2645 + unsigned int h;
2646 +@@ -588,9 +606,9 @@ insert_above:
2647 +
2648 + if (!in || !ln) {
2649 + if (in)
2650 +- node_free(in);
2651 ++ node_free_immediate(in);
2652 + if (ln)
2653 +- node_free(ln);
2654 ++ node_free_immediate(ln);
2655 + return ERR_PTR(-ENOMEM);
2656 + }
2657 +
2658 +@@ -857,7 +875,7 @@ add:
2659 +
2660 + rt->dst.rt6_next = iter;
2661 + *ins = rt;
2662 +- rt->rt6i_node = fn;
2663 ++ rcu_assign_pointer(rt->rt6i_node, fn);
2664 + atomic_inc(&rt->rt6i_ref);
2665 + inet6_rt_notify(RTM_NEWROUTE, rt, info, 0);
2666 + info->nl_net->ipv6.rt6_stats->fib_rt_entries++;
2667 +@@ -882,7 +900,7 @@ add:
2668 + return err;
2669 +
2670 + *ins = rt;
2671 +- rt->rt6i_node = fn;
2672 ++ rcu_assign_pointer(rt->rt6i_node, fn);
2673 + rt->dst.rt6_next = iter->dst.rt6_next;
2674 + atomic_inc(&rt->rt6i_ref);
2675 + inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE);
2676 +@@ -1015,7 +1033,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
2677 + root, and then (in failure) stale node
2678 + in main tree.
2679 + */
2680 +- node_free(sfn);
2681 ++ node_free_immediate(sfn);
2682 + err = PTR_ERR(sn);
2683 + goto failure;
2684 + }
2685 +@@ -1442,8 +1460,9 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
2686 +
2687 + int fib6_del(struct rt6_info *rt, struct nl_info *info)
2688 + {
2689 ++ struct fib6_node *fn = rcu_dereference_protected(rt->rt6i_node,
2690 ++ lockdep_is_held(&rt->rt6i_table->tb6_lock));
2691 + struct net *net = info->nl_net;
2692 +- struct fib6_node *fn = rt->rt6i_node;
2693 + struct rt6_info **rtp;
2694 +
2695 + #if RT6_DEBUG >= 2
2696 +@@ -1632,7 +1651,9 @@ static int fib6_clean_node(struct fib6_walker *w)
2697 + if (res) {
2698 + #if RT6_DEBUG >= 2
2699 + pr_debug("%s: del failed: rt=%p@%p err=%d\n",
2700 +- __func__, rt, rt->rt6i_node, res);
2701 ++ __func__, rt,
2702 ++ rcu_access_pointer(rt->rt6i_node),
2703 ++ res);
2704 + #endif
2705 + continue;
2706 + }
2707 +@@ -1870,15 +1891,22 @@ out_timer:
2708 +
2709 + static void fib6_net_exit(struct net *net)
2710 + {
2711 ++ unsigned int i;
2712 ++
2713 + rt6_ifdown(net, NULL);
2714 + del_timer_sync(&net->ipv6.ip6_fib_timer);
2715 +
2716 +-#ifdef CONFIG_IPV6_MULTIPLE_TABLES
2717 +- inetpeer_invalidate_tree(&net->ipv6.fib6_local_tbl->tb6_peers);
2718 +- kfree(net->ipv6.fib6_local_tbl);
2719 +-#endif
2720 +- inetpeer_invalidate_tree(&net->ipv6.fib6_main_tbl->tb6_peers);
2721 +- kfree(net->ipv6.fib6_main_tbl);
2722 ++ for (i = 0; i < FIB6_TABLE_HASHSZ; i++) {
2723 ++ struct hlist_head *head = &net->ipv6.fib_table_hash[i];
2724 ++ struct hlist_node *tmp;
2725 ++ struct fib6_table *tb;
2726 ++
2727 ++ hlist_for_each_entry_safe(tb, tmp, head, tb6_hlist) {
2728 ++ hlist_del(&tb->tb6_hlist);
2729 ++ fib6_free_table(tb);
2730 ++ }
2731 ++ }
2732 ++
2733 + kfree(net->ipv6.fib_table_hash);
2734 + kfree(net->ipv6.rt6_stats);
2735 + }
2736 +diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
2737 +index bab4441ed4e4..eb2dc39f7066 100644
2738 +--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
2739 ++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
2740 +@@ -649,18 +649,12 @@ EXPORT_SYMBOL_GPL(nf_ct_frag6_consume_orig);
2741 +
2742 + static int nf_ct_net_init(struct net *net)
2743 + {
2744 +- int res;
2745 +-
2746 + net->nf_frag.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
2747 + net->nf_frag.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
2748 + net->nf_frag.frags.timeout = IPV6_FRAG_TIMEOUT;
2749 +- res = inet_frags_init_net(&net->nf_frag.frags);
2750 +- if (res)
2751 +- return res;
2752 +- res = nf_ct_frag6_sysctl_register(net);
2753 +- if (res)
2754 +- inet_frags_uninit_net(&net->nf_frag.frags);
2755 +- return res;
2756 ++ inet_frags_init_net(&net->nf_frag.frags);
2757 ++
2758 ++ return nf_ct_frag6_sysctl_register(net);
2759 + }
2760 +
2761 + static void nf_ct_net_exit(struct net *net)
2762 +diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
2763 +index f9f02581c4ca..f99a04674419 100644
2764 +--- a/net/ipv6/output_core.c
2765 ++++ b/net/ipv6/output_core.c
2766 +@@ -86,7 +86,6 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
2767 +
2768 + while (offset <= packet_len) {
2769 + struct ipv6_opt_hdr *exthdr;
2770 +- unsigned int len;
2771 +
2772 + switch (**nexthdr) {
2773 +
2774 +@@ -112,10 +111,9 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
2775 +
2776 + exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
2777 + offset);
2778 +- len = ipv6_optlen(exthdr);
2779 +- if (len + offset >= IPV6_MAXPLEN)
2780 ++ offset += ipv6_optlen(exthdr);
2781 ++ if (offset > IPV6_MAXPLEN)
2782 + return -EINVAL;
2783 +- offset += len;
2784 + *nexthdr = &exthdr->nexthdr;
2785 + }
2786 +
2787 +diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
2788 +index a234552a7e3d..58f2139ebb5e 100644
2789 +--- a/net/ipv6/reassembly.c
2790 ++++ b/net/ipv6/reassembly.c
2791 +@@ -708,19 +708,13 @@ static void ip6_frags_sysctl_unregister(void)
2792 +
2793 + static int __net_init ipv6_frags_init_net(struct net *net)
2794 + {
2795 +- int res;
2796 +-
2797 + net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
2798 + net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
2799 + net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
2800 +
2801 +- res = inet_frags_init_net(&net->ipv6.frags);
2802 +- if (res)
2803 +- return res;
2804 +- res = ip6_frags_ns_sysctl_register(net);
2805 +- if (res)
2806 +- inet_frags_uninit_net(&net->ipv6.frags);
2807 +- return res;
2808 ++ inet_frags_init_net(&net->ipv6.frags);
2809 ++
2810 ++ return ip6_frags_ns_sysctl_register(net);
2811 + }
2812 +
2813 + static void __net_exit ipv6_frags_exit_net(struct net *net)
2814 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
2815 +index ef335070e98a..48917437550e 100644
2816 +--- a/net/ipv6/route.c
2817 ++++ b/net/ipv6/route.c
2818 +@@ -1248,7 +1248,9 @@ static void rt6_dst_from_metrics_check(struct rt6_info *rt)
2819 +
2820 + static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
2821 + {
2822 +- if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
2823 ++ u32 rt_cookie;
2824 ++
2825 ++ if (!rt6_get_cookie_safe(rt, &rt_cookie) || rt_cookie != cookie)
2826 + return NULL;
2827 +
2828 + if (rt6_check_expired(rt))
2829 +@@ -1316,8 +1318,14 @@ static void ip6_link_failure(struct sk_buff *skb)
2830 + if (rt->rt6i_flags & RTF_CACHE) {
2831 + dst_hold(&rt->dst);
2832 + ip6_del_rt(rt);
2833 +- } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
2834 +- rt->rt6i_node->fn_sernum = -1;
2835 ++ } else {
2836 ++ struct fib6_node *fn;
2837 ++
2838 ++ rcu_read_lock();
2839 ++ fn = rcu_dereference(rt->rt6i_node);
2840 ++ if (fn && (rt->rt6i_flags & RTF_DEFAULT))
2841 ++ fn->fn_sernum = -1;
2842 ++ rcu_read_unlock();
2843 + }
2844 + }
2845 + }
2846 +@@ -1334,7 +1342,8 @@ static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
2847 + static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
2848 + {
2849 + return !(rt->rt6i_flags & RTF_CACHE) &&
2850 +- (rt->rt6i_flags & RTF_PCPU || rt->rt6i_node);
2851 ++ (rt->rt6i_flags & RTF_PCPU ||
2852 ++ rcu_access_pointer(rt->rt6i_node));
2853 + }
2854 +
2855 + static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,