Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:3.15 commit in: /
Date: Tue, 08 Jul 2014 18:30:58
Message-Id: 1404844247.1d92f75ee86e750cd47eb0715ba6ba4d2b6d68ee.mpagano@gentoo
1 commit: 1d92f75ee86e750cd47eb0715ba6ba4d2b6d68ee
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Jul 8 18:30:47 2014 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Jul 8 18:30:47 2014 +0000
6 URL: http://git.overlays.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=1d92f75e
7
8 Linux patch 3.15.4
9
10 ---
11 0000_README | 4 +
12 1003_linux-3.15.4.patch | 2843 +++++++++++++++++++++++++++++++++++++++++++++++
13 2 files changed, 2847 insertions(+)
14
15 diff --git a/0000_README b/0000_README
16 index 236fcc9..be75984 100644
17 --- a/0000_README
18 +++ b/0000_README
19 @@ -55,6 +55,10 @@ Patch: 1002_linux-3.15.3.patch
20 From: http://www.kernel.org
21 Desc: Linux 3.15.3
22
23 +Patch: 1003_linux-3.15.4.patch
24 +From: http://www.kernel.org
25 +Desc: Linux 3.15.4
26 +
27 Patch: 1700_enable-thinkpad-micled.patch
28 From: https://bugs.gentoo.org/show_bug.cgi?id=449248
29 Desc: Enable mic mute led in thinkpads
30
31 diff --git a/1003_linux-3.15.4.patch b/1003_linux-3.15.4.patch
32 new file mode 100644
33 index 0000000..2d3f2db
34 --- /dev/null
35 +++ b/1003_linux-3.15.4.patch
36 @@ -0,0 +1,2843 @@
37 +diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
38 +index 2a8e89e13e45..7e9abb8a276b 100644
39 +--- a/Documentation/SubmittingPatches
40 ++++ b/Documentation/SubmittingPatches
41 +@@ -132,6 +132,20 @@ Example:
42 + platform_set_drvdata(), but left the variable "dev" unused,
43 + delete it.
44 +
45 ++If your patch fixes a bug in a specific commit, e.g. you found an issue using
46 ++git-bisect, please use the 'Fixes:' tag with the first 12 characters of the
47 ++SHA-1 ID, and the one line summary.
48 ++Example:
49 ++
50 ++ Fixes: e21d2170f366 ("video: remove unnecessary platform_set_drvdata()")
51 ++
52 ++The following git-config settings can be used to add a pretty format for
53 ++outputting the above style in the git log or git show commands
54 ++
55 ++ [core]
56 ++ abbrev = 12
57 ++ [pretty]
58 ++ fixes = Fixes: %h (\"%s\")
59 +
60 + 3) Separate your changes.
61 +
62 +@@ -443,7 +457,7 @@ person it names. This tag documents that potentially interested parties
63 + have been included in the discussion
64 +
65 +
66 +-14) Using Reported-by:, Tested-by:, Reviewed-by: and Suggested-by:
67 ++14) Using Reported-by:, Tested-by:, Reviewed-by:, Suggested-by: and Fixes:
68 +
69 + If this patch fixes a problem reported by somebody else, consider adding a
70 + Reported-by: tag to credit the reporter for their contribution. Please
71 +@@ -498,6 +512,12 @@ idea was not posted in a public forum. That said, if we diligently credit our
72 + idea reporters, they will, hopefully, be inspired to help us again in the
73 + future.
74 +
75 ++A Fixes: tag indicates that the patch fixes an issue in a previous commit. It
76 ++is used to make it easy to determine where a bug originated, which can help
77 ++review a bug fix. This tag also assists the stable kernel team in determining
78 ++which stable kernel versions should receive your fix. This is the preferred
79 ++method for indicating a bug fixed by the patch. See #2 above for more details.
80 ++
81 +
82 + 15) The canonical patch format
83 +
84 +diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
85 +index 85c362d8ea34..d1ab5e17eb13 100644
86 +--- a/Documentation/sound/alsa/HD-Audio-Models.txt
87 ++++ b/Documentation/sound/alsa/HD-Audio-Models.txt
88 +@@ -286,6 +286,11 @@ STAC92HD83*
89 + hp-inv-led HP with broken BIOS for inverted mute LED
90 + auto BIOS setup (default)
91 +
92 ++STAC92HD95
93 ++==========
94 ++ hp-led LED support for HP laptops
95 ++ hp-bass Bass HPF setup for HP Spectre 13
96 ++
97 + STAC9872
98 + ========
99 + vaio VAIO laptop without SPDIF
100 +diff --git a/Makefile b/Makefile
101 +index 2e37d8b0bb96..25ecc1dd5bb5 100644
102 +--- a/Makefile
103 ++++ b/Makefile
104 +@@ -1,6 +1,6 @@
105 + VERSION = 3
106 + PATCHLEVEL = 15
107 +-SUBLEVEL = 3
108 ++SUBLEVEL = 4
109 + EXTRAVERSION =
110 + NAME = Shuffling Zombie Juror
111 +
112 +diff --git a/arch/mips/include/asm/sigcontext.h b/arch/mips/include/asm/sigcontext.h
113 +index f54bdbe85c0d..eeeb0f48c767 100644
114 +--- a/arch/mips/include/asm/sigcontext.h
115 ++++ b/arch/mips/include/asm/sigcontext.h
116 +@@ -32,8 +32,6 @@ struct sigcontext32 {
117 + __u32 sc_lo2;
118 + __u32 sc_hi3;
119 + __u32 sc_lo3;
120 +- __u64 sc_msaregs[32]; /* Most significant 64 bits */
121 +- __u32 sc_msa_csr;
122 + };
123 + #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
124 + #endif /* _ASM_SIGCONTEXT_H */
125 +diff --git a/arch/mips/include/uapi/asm/sigcontext.h b/arch/mips/include/uapi/asm/sigcontext.h
126 +index 681c17603a48..6c9906f59c6e 100644
127 +--- a/arch/mips/include/uapi/asm/sigcontext.h
128 ++++ b/arch/mips/include/uapi/asm/sigcontext.h
129 +@@ -12,10 +12,6 @@
130 + #include <linux/types.h>
131 + #include <asm/sgidefs.h>
132 +
133 +-/* Bits which may be set in sc_used_math */
134 +-#define USEDMATH_FP (1 << 0)
135 +-#define USEDMATH_MSA (1 << 1)
136 +-
137 + #if _MIPS_SIM == _MIPS_SIM_ABI32
138 +
139 + /*
140 +@@ -41,8 +37,6 @@ struct sigcontext {
141 + unsigned long sc_lo2;
142 + unsigned long sc_hi3;
143 + unsigned long sc_lo3;
144 +- unsigned long long sc_msaregs[32]; /* Most significant 64 bits */
145 +- unsigned long sc_msa_csr;
146 + };
147 +
148 + #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
149 +@@ -76,8 +70,6 @@ struct sigcontext {
150 + __u32 sc_used_math;
151 + __u32 sc_dsp;
152 + __u32 sc_reserved;
153 +- __u64 sc_msaregs[32];
154 +- __u32 sc_msa_csr;
155 + };
156 +
157 +
158 +diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
159 +index 0ea75c244b48..7ff80622c8d9 100644
160 +--- a/arch/mips/kernel/asm-offsets.c
161 ++++ b/arch/mips/kernel/asm-offsets.c
162 +@@ -295,7 +295,6 @@ void output_sc_defines(void)
163 + OFFSET(SC_LO2, sigcontext, sc_lo2);
164 + OFFSET(SC_HI3, sigcontext, sc_hi3);
165 + OFFSET(SC_LO3, sigcontext, sc_lo3);
166 +- OFFSET(SC_MSAREGS, sigcontext, sc_msaregs);
167 + BLANK();
168 + }
169 + #endif
170 +@@ -310,7 +309,6 @@ void output_sc_defines(void)
171 + OFFSET(SC_MDLO, sigcontext, sc_mdlo);
172 + OFFSET(SC_PC, sigcontext, sc_pc);
173 + OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr);
174 +- OFFSET(SC_MSAREGS, sigcontext, sc_msaregs);
175 + BLANK();
176 + }
177 + #endif
178 +@@ -322,7 +320,6 @@ void output_sc32_defines(void)
179 + OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs);
180 + OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr);
181 + OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir);
182 +- OFFSET(SC32_MSAREGS, sigcontext32, sc_msaregs);
183 + BLANK();
184 + }
185 + #endif
186 +diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
187 +index fab40f7d2e03..ac9facc08694 100644
188 +--- a/arch/mips/kernel/irq-msc01.c
189 ++++ b/arch/mips/kernel/irq-msc01.c
190 +@@ -131,7 +131,7 @@ void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqma
191 +
192 + board_bind_eic_interrupt = &msc_bind_eic_interrupt;
193 +
194 +- for (; nirq >= 0; nirq--, imp++) {
195 ++ for (; nirq > 0; nirq--, imp++) {
196 + int n = imp->im_irq;
197 +
198 + switch (imp->im_type) {
199 +diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
200 +index 71814272d148..8352523568e6 100644
201 +--- a/arch/mips/kernel/r4k_fpu.S
202 ++++ b/arch/mips/kernel/r4k_fpu.S
203 +@@ -13,7 +13,6 @@
204 + * Copyright (C) 1999, 2001 Silicon Graphics, Inc.
205 + */
206 + #include <asm/asm.h>
207 +-#include <asm/asmmacro.h>
208 + #include <asm/errno.h>
209 + #include <asm/fpregdef.h>
210 + #include <asm/mipsregs.h>
211 +@@ -246,218 +245,6 @@ LEAF(_restore_fp_context32)
212 + END(_restore_fp_context32)
213 + #endif
214 +
215 +-#ifdef CONFIG_CPU_HAS_MSA
216 +-
217 +- .macro save_sc_msareg wr, off, sc, tmp
218 +-#ifdef CONFIG_64BIT
219 +- copy_u_d \tmp, \wr, 1
220 +- EX sd \tmp, (\off+(\wr*8))(\sc)
221 +-#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
222 +- copy_u_w \tmp, \wr, 2
223 +- EX sw \tmp, (\off+(\wr*8)+0)(\sc)
224 +- copy_u_w \tmp, \wr, 3
225 +- EX sw \tmp, (\off+(\wr*8)+4)(\sc)
226 +-#else /* CONFIG_CPU_BIG_ENDIAN */
227 +- copy_u_w \tmp, \wr, 2
228 +- EX sw \tmp, (\off+(\wr*8)+4)(\sc)
229 +- copy_u_w \tmp, \wr, 3
230 +- EX sw \tmp, (\off+(\wr*8)+0)(\sc)
231 +-#endif
232 +- .endm
233 +-
234 +-/*
235 +- * int _save_msa_context(struct sigcontext *sc)
236 +- *
237 +- * Save the upper 64 bits of each vector register along with the MSA_CSR
238 +- * register into sc. Returns zero on success, else non-zero.
239 +- */
240 +-LEAF(_save_msa_context)
241 +- save_sc_msareg 0, SC_MSAREGS, a0, t0
242 +- save_sc_msareg 1, SC_MSAREGS, a0, t0
243 +- save_sc_msareg 2, SC_MSAREGS, a0, t0
244 +- save_sc_msareg 3, SC_MSAREGS, a0, t0
245 +- save_sc_msareg 4, SC_MSAREGS, a0, t0
246 +- save_sc_msareg 5, SC_MSAREGS, a0, t0
247 +- save_sc_msareg 6, SC_MSAREGS, a0, t0
248 +- save_sc_msareg 7, SC_MSAREGS, a0, t0
249 +- save_sc_msareg 8, SC_MSAREGS, a0, t0
250 +- save_sc_msareg 9, SC_MSAREGS, a0, t0
251 +- save_sc_msareg 10, SC_MSAREGS, a0, t0
252 +- save_sc_msareg 11, SC_MSAREGS, a0, t0
253 +- save_sc_msareg 12, SC_MSAREGS, a0, t0
254 +- save_sc_msareg 13, SC_MSAREGS, a0, t0
255 +- save_sc_msareg 14, SC_MSAREGS, a0, t0
256 +- save_sc_msareg 15, SC_MSAREGS, a0, t0
257 +- save_sc_msareg 16, SC_MSAREGS, a0, t0
258 +- save_sc_msareg 17, SC_MSAREGS, a0, t0
259 +- save_sc_msareg 18, SC_MSAREGS, a0, t0
260 +- save_sc_msareg 19, SC_MSAREGS, a0, t0
261 +- save_sc_msareg 20, SC_MSAREGS, a0, t0
262 +- save_sc_msareg 21, SC_MSAREGS, a0, t0
263 +- save_sc_msareg 22, SC_MSAREGS, a0, t0
264 +- save_sc_msareg 23, SC_MSAREGS, a0, t0
265 +- save_sc_msareg 24, SC_MSAREGS, a0, t0
266 +- save_sc_msareg 25, SC_MSAREGS, a0, t0
267 +- save_sc_msareg 26, SC_MSAREGS, a0, t0
268 +- save_sc_msareg 27, SC_MSAREGS, a0, t0
269 +- save_sc_msareg 28, SC_MSAREGS, a0, t0
270 +- save_sc_msareg 29, SC_MSAREGS, a0, t0
271 +- save_sc_msareg 30, SC_MSAREGS, a0, t0
272 +- save_sc_msareg 31, SC_MSAREGS, a0, t0
273 +- jr ra
274 +- li v0, 0
275 +- END(_save_msa_context)
276 +-
277 +-#ifdef CONFIG_MIPS32_COMPAT
278 +-
279 +-/*
280 +- * int _save_msa_context32(struct sigcontext32 *sc)
281 +- *
282 +- * Save the upper 64 bits of each vector register along with the MSA_CSR
283 +- * register into sc. Returns zero on success, else non-zero.
284 +- */
285 +-LEAF(_save_msa_context32)
286 +- save_sc_msareg 0, SC32_MSAREGS, a0, t0
287 +- save_sc_msareg 1, SC32_MSAREGS, a0, t0
288 +- save_sc_msareg 2, SC32_MSAREGS, a0, t0
289 +- save_sc_msareg 3, SC32_MSAREGS, a0, t0
290 +- save_sc_msareg 4, SC32_MSAREGS, a0, t0
291 +- save_sc_msareg 5, SC32_MSAREGS, a0, t0
292 +- save_sc_msareg 6, SC32_MSAREGS, a0, t0
293 +- save_sc_msareg 7, SC32_MSAREGS, a0, t0
294 +- save_sc_msareg 8, SC32_MSAREGS, a0, t0
295 +- save_sc_msareg 9, SC32_MSAREGS, a0, t0
296 +- save_sc_msareg 10, SC32_MSAREGS, a0, t0
297 +- save_sc_msareg 11, SC32_MSAREGS, a0, t0
298 +- save_sc_msareg 12, SC32_MSAREGS, a0, t0
299 +- save_sc_msareg 13, SC32_MSAREGS, a0, t0
300 +- save_sc_msareg 14, SC32_MSAREGS, a0, t0
301 +- save_sc_msareg 15, SC32_MSAREGS, a0, t0
302 +- save_sc_msareg 16, SC32_MSAREGS, a0, t0
303 +- save_sc_msareg 17, SC32_MSAREGS, a0, t0
304 +- save_sc_msareg 18, SC32_MSAREGS, a0, t0
305 +- save_sc_msareg 19, SC32_MSAREGS, a0, t0
306 +- save_sc_msareg 20, SC32_MSAREGS, a0, t0
307 +- save_sc_msareg 21, SC32_MSAREGS, a0, t0
308 +- save_sc_msareg 22, SC32_MSAREGS, a0, t0
309 +- save_sc_msareg 23, SC32_MSAREGS, a0, t0
310 +- save_sc_msareg 24, SC32_MSAREGS, a0, t0
311 +- save_sc_msareg 25, SC32_MSAREGS, a0, t0
312 +- save_sc_msareg 26, SC32_MSAREGS, a0, t0
313 +- save_sc_msareg 27, SC32_MSAREGS, a0, t0
314 +- save_sc_msareg 28, SC32_MSAREGS, a0, t0
315 +- save_sc_msareg 29, SC32_MSAREGS, a0, t0
316 +- save_sc_msareg 30, SC32_MSAREGS, a0, t0
317 +- save_sc_msareg 31, SC32_MSAREGS, a0, t0
318 +- jr ra
319 +- li v0, 0
320 +- END(_save_msa_context32)
321 +-
322 +-#endif /* CONFIG_MIPS32_COMPAT */
323 +-
324 +- .macro restore_sc_msareg wr, off, sc, tmp
325 +-#ifdef CONFIG_64BIT
326 +- EX ld \tmp, (\off+(\wr*8))(\sc)
327 +- insert_d \wr, 1, \tmp
328 +-#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
329 +- EX lw \tmp, (\off+(\wr*8)+0)(\sc)
330 +- insert_w \wr, 2, \tmp
331 +- EX lw \tmp, (\off+(\wr*8)+4)(\sc)
332 +- insert_w \wr, 3, \tmp
333 +-#else /* CONFIG_CPU_BIG_ENDIAN */
334 +- EX lw \tmp, (\off+(\wr*8)+4)(\sc)
335 +- insert_w \wr, 2, \tmp
336 +- EX lw \tmp, (\off+(\wr*8)+0)(\sc)
337 +- insert_w \wr, 3, \tmp
338 +-#endif
339 +- .endm
340 +-
341 +-/*
342 +- * int _restore_msa_context(struct sigcontext *sc)
343 +- */
344 +-LEAF(_restore_msa_context)
345 +- restore_sc_msareg 0, SC_MSAREGS, a0, t0
346 +- restore_sc_msareg 1, SC_MSAREGS, a0, t0
347 +- restore_sc_msareg 2, SC_MSAREGS, a0, t0
348 +- restore_sc_msareg 3, SC_MSAREGS, a0, t0
349 +- restore_sc_msareg 4, SC_MSAREGS, a0, t0
350 +- restore_sc_msareg 5, SC_MSAREGS, a0, t0
351 +- restore_sc_msareg 6, SC_MSAREGS, a0, t0
352 +- restore_sc_msareg 7, SC_MSAREGS, a0, t0
353 +- restore_sc_msareg 8, SC_MSAREGS, a0, t0
354 +- restore_sc_msareg 9, SC_MSAREGS, a0, t0
355 +- restore_sc_msareg 10, SC_MSAREGS, a0, t0
356 +- restore_sc_msareg 11, SC_MSAREGS, a0, t0
357 +- restore_sc_msareg 12, SC_MSAREGS, a0, t0
358 +- restore_sc_msareg 13, SC_MSAREGS, a0, t0
359 +- restore_sc_msareg 14, SC_MSAREGS, a0, t0
360 +- restore_sc_msareg 15, SC_MSAREGS, a0, t0
361 +- restore_sc_msareg 16, SC_MSAREGS, a0, t0
362 +- restore_sc_msareg 17, SC_MSAREGS, a0, t0
363 +- restore_sc_msareg 18, SC_MSAREGS, a0, t0
364 +- restore_sc_msareg 19, SC_MSAREGS, a0, t0
365 +- restore_sc_msareg 20, SC_MSAREGS, a0, t0
366 +- restore_sc_msareg 21, SC_MSAREGS, a0, t0
367 +- restore_sc_msareg 22, SC_MSAREGS, a0, t0
368 +- restore_sc_msareg 23, SC_MSAREGS, a0, t0
369 +- restore_sc_msareg 24, SC_MSAREGS, a0, t0
370 +- restore_sc_msareg 25, SC_MSAREGS, a0, t0
371 +- restore_sc_msareg 26, SC_MSAREGS, a0, t0
372 +- restore_sc_msareg 27, SC_MSAREGS, a0, t0
373 +- restore_sc_msareg 28, SC_MSAREGS, a0, t0
374 +- restore_sc_msareg 29, SC_MSAREGS, a0, t0
375 +- restore_sc_msareg 30, SC_MSAREGS, a0, t0
376 +- restore_sc_msareg 31, SC_MSAREGS, a0, t0
377 +- jr ra
378 +- li v0, 0
379 +- END(_restore_msa_context)
380 +-
381 +-#ifdef CONFIG_MIPS32_COMPAT
382 +-
383 +-/*
384 +- * int _restore_msa_context32(struct sigcontext32 *sc)
385 +- */
386 +-LEAF(_restore_msa_context32)
387 +- restore_sc_msareg 0, SC32_MSAREGS, a0, t0
388 +- restore_sc_msareg 1, SC32_MSAREGS, a0, t0
389 +- restore_sc_msareg 2, SC32_MSAREGS, a0, t0
390 +- restore_sc_msareg 3, SC32_MSAREGS, a0, t0
391 +- restore_sc_msareg 4, SC32_MSAREGS, a0, t0
392 +- restore_sc_msareg 5, SC32_MSAREGS, a0, t0
393 +- restore_sc_msareg 6, SC32_MSAREGS, a0, t0
394 +- restore_sc_msareg 7, SC32_MSAREGS, a0, t0
395 +- restore_sc_msareg 8, SC32_MSAREGS, a0, t0
396 +- restore_sc_msareg 9, SC32_MSAREGS, a0, t0
397 +- restore_sc_msareg 10, SC32_MSAREGS, a0, t0
398 +- restore_sc_msareg 11, SC32_MSAREGS, a0, t0
399 +- restore_sc_msareg 12, SC32_MSAREGS, a0, t0
400 +- restore_sc_msareg 13, SC32_MSAREGS, a0, t0
401 +- restore_sc_msareg 14, SC32_MSAREGS, a0, t0
402 +- restore_sc_msareg 15, SC32_MSAREGS, a0, t0
403 +- restore_sc_msareg 16, SC32_MSAREGS, a0, t0
404 +- restore_sc_msareg 17, SC32_MSAREGS, a0, t0
405 +- restore_sc_msareg 18, SC32_MSAREGS, a0, t0
406 +- restore_sc_msareg 19, SC32_MSAREGS, a0, t0
407 +- restore_sc_msareg 20, SC32_MSAREGS, a0, t0
408 +- restore_sc_msareg 21, SC32_MSAREGS, a0, t0
409 +- restore_sc_msareg 22, SC32_MSAREGS, a0, t0
410 +- restore_sc_msareg 23, SC32_MSAREGS, a0, t0
411 +- restore_sc_msareg 24, SC32_MSAREGS, a0, t0
412 +- restore_sc_msareg 25, SC32_MSAREGS, a0, t0
413 +- restore_sc_msareg 26, SC32_MSAREGS, a0, t0
414 +- restore_sc_msareg 27, SC32_MSAREGS, a0, t0
415 +- restore_sc_msareg 28, SC32_MSAREGS, a0, t0
416 +- restore_sc_msareg 29, SC32_MSAREGS, a0, t0
417 +- restore_sc_msareg 30, SC32_MSAREGS, a0, t0
418 +- restore_sc_msareg 31, SC32_MSAREGS, a0, t0
419 +- jr ra
420 +- li v0, 0
421 +- END(_restore_msa_context32)
422 +-
423 +-#endif /* CONFIG_MIPS32_COMPAT */
424 +-
425 +-#endif /* CONFIG_CPU_HAS_MSA */
426 +-
427 + .set reorder
428 +
429 + .type fault@function
430 +diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
431 +index 33133d3df3e5..9e60d117e41e 100644
432 +--- a/arch/mips/kernel/signal.c
433 ++++ b/arch/mips/kernel/signal.c
434 +@@ -31,7 +31,6 @@
435 + #include <linux/bitops.h>
436 + #include <asm/cacheflush.h>
437 + #include <asm/fpu.h>
438 +-#include <asm/msa.h>
439 + #include <asm/sim.h>
440 + #include <asm/ucontext.h>
441 + #include <asm/cpu-features.h>
442 +@@ -48,9 +47,6 @@ static int (*restore_fp_context)(struct sigcontext __user *sc);
443 + extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
444 + extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
445 +
446 +-extern asmlinkage int _save_msa_context(struct sigcontext __user *sc);
447 +-extern asmlinkage int _restore_msa_context(struct sigcontext __user *sc);
448 +-
449 + struct sigframe {
450 + u32 sf_ass[4]; /* argument save space for o32 */
451 + u32 sf_pad[2]; /* Was: signal trampoline */
452 +@@ -100,60 +96,20 @@ static int copy_fp_from_sigcontext(struct sigcontext __user *sc)
453 + }
454 +
455 + /*
456 +- * These functions will save only the upper 64 bits of the vector registers,
457 +- * since the lower 64 bits have already been saved as the scalar FP context.
458 +- */
459 +-static int copy_msa_to_sigcontext(struct sigcontext __user *sc)
460 +-{
461 +- int i;
462 +- int err = 0;
463 +-
464 +- for (i = 0; i < NUM_FPU_REGS; i++) {
465 +- err |=
466 +- __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
467 +- &sc->sc_msaregs[i]);
468 +- }
469 +- err |= __put_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
470 +-
471 +- return err;
472 +-}
473 +-
474 +-static int copy_msa_from_sigcontext(struct sigcontext __user *sc)
475 +-{
476 +- int i;
477 +- int err = 0;
478 +- u64 val;
479 +-
480 +- for (i = 0; i < NUM_FPU_REGS; i++) {
481 +- err |= __get_user(val, &sc->sc_msaregs[i]);
482 +- set_fpr64(&current->thread.fpu.fpr[i], 1, val);
483 +- }
484 +- err |= __get_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
485 +-
486 +- return err;
487 +-}
488 +-
489 +-/*
490 + * Helper routines
491 + */
492 +-static int protected_save_fp_context(struct sigcontext __user *sc,
493 +- unsigned used_math)
494 ++static int protected_save_fp_context(struct sigcontext __user *sc)
495 + {
496 + int err;
497 +- bool save_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
498 + #ifndef CONFIG_EVA
499 + while (1) {
500 + lock_fpu_owner();
501 + if (is_fpu_owner()) {
502 + err = save_fp_context(sc);
503 +- if (save_msa && !err)
504 +- err = _save_msa_context(sc);
505 + unlock_fpu_owner();
506 + } else {
507 + unlock_fpu_owner();
508 + err = copy_fp_to_sigcontext(sc);
509 +- if (save_msa && !err)
510 +- err = copy_msa_to_sigcontext(sc);
511 + }
512 + if (likely(!err))
513 + break;
514 +@@ -169,38 +125,24 @@ static int protected_save_fp_context(struct sigcontext __user *sc,
515 + * EVA does not have FPU EVA instructions so saving fpu context directly
516 + * does not work.
517 + */
518 +- disable_msa();
519 + lose_fpu(1);
520 + err = save_fp_context(sc); /* this might fail */
521 +- if (save_msa && !err)
522 +- err = copy_msa_to_sigcontext(sc);
523 + #endif
524 + return err;
525 + }
526 +
527 +-static int protected_restore_fp_context(struct sigcontext __user *sc,
528 +- unsigned used_math)
529 ++static int protected_restore_fp_context(struct sigcontext __user *sc)
530 + {
531 + int err, tmp __maybe_unused;
532 +- bool restore_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
533 + #ifndef CONFIG_EVA
534 + while (1) {
535 + lock_fpu_owner();
536 + if (is_fpu_owner()) {
537 + err = restore_fp_context(sc);
538 +- if (restore_msa && !err) {
539 +- enable_msa();
540 +- err = _restore_msa_context(sc);
541 +- } else {
542 +- /* signal handler may have used MSA */
543 +- disable_msa();
544 +- }
545 + unlock_fpu_owner();
546 + } else {
547 + unlock_fpu_owner();
548 + err = copy_fp_from_sigcontext(sc);
549 +- if (!err && (used_math & USEDMATH_MSA))
550 +- err = copy_msa_from_sigcontext(sc);
551 + }
552 + if (likely(!err))
553 + break;
554 +@@ -216,11 +158,8 @@ static int protected_restore_fp_context(struct sigcontext __user *sc,
555 + * EVA does not have FPU EVA instructions so restoring fpu context
556 + * directly does not work.
557 + */
558 +- enable_msa();
559 + lose_fpu(0);
560 + err = restore_fp_context(sc); /* this might fail */
561 +- if (restore_msa && !err)
562 +- err = copy_msa_from_sigcontext(sc);
563 + #endif
564 + return err;
565 + }
566 +@@ -252,8 +191,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
567 + err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
568 + }
569 +
570 +- used_math = used_math() ? USEDMATH_FP : 0;
571 +- used_math |= thread_msa_context_live() ? USEDMATH_MSA : 0;
572 ++ used_math = !!used_math();
573 + err |= __put_user(used_math, &sc->sc_used_math);
574 +
575 + if (used_math) {
576 +@@ -261,7 +199,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
577 + * Save FPU state to signal context. Signal handler
578 + * will "inherit" current FPU state.
579 + */
580 +- err |= protected_save_fp_context(sc, used_math);
581 ++ err |= protected_save_fp_context(sc);
582 + }
583 + return err;
584 + }
585 +@@ -286,14 +224,14 @@ int fpcsr_pending(unsigned int __user *fpcsr)
586 + }
587 +
588 + static int
589 +-check_and_restore_fp_context(struct sigcontext __user *sc, unsigned used_math)
590 ++check_and_restore_fp_context(struct sigcontext __user *sc)
591 + {
592 + int err, sig;
593 +
594 + err = sig = fpcsr_pending(&sc->sc_fpc_csr);
595 + if (err > 0)
596 + err = 0;
597 +- err |= protected_restore_fp_context(sc, used_math);
598 ++ err |= protected_restore_fp_context(sc);
599 + return err ?: sig;
600 + }
601 +
602 +@@ -333,10 +271,9 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
603 + if (used_math) {
604 + /* restore fpu context if we have used it before */
605 + if (!err)
606 +- err = check_and_restore_fp_context(sc, used_math);
607 ++ err = check_and_restore_fp_context(sc);
608 + } else {
609 +- /* signal handler may have used FPU or MSA. Disable them. */
610 +- disable_msa();
611 ++ /* signal handler may have used FPU. Give it up. */
612 + lose_fpu(0);
613 + }
614 +
615 +diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
616 +index 299f956e4db3..bae2e6ee2109 100644
617 +--- a/arch/mips/kernel/signal32.c
618 ++++ b/arch/mips/kernel/signal32.c
619 +@@ -30,7 +30,6 @@
620 + #include <asm/sim.h>
621 + #include <asm/ucontext.h>
622 + #include <asm/fpu.h>
623 +-#include <asm/msa.h>
624 + #include <asm/war.h>
625 + #include <asm/vdso.h>
626 + #include <asm/dsp.h>
627 +@@ -43,9 +42,6 @@ static int (*restore_fp_context32)(struct sigcontext32 __user *sc);
628 + extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
629 + extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
630 +
631 +-extern asmlinkage int _save_msa_context32(struct sigcontext32 __user *sc);
632 +-extern asmlinkage int _restore_msa_context32(struct sigcontext32 __user *sc);
633 +-
634 + /*
635 + * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
636 + */
637 +@@ -115,59 +111,19 @@ static int copy_fp_from_sigcontext32(struct sigcontext32 __user *sc)
638 + }
639 +
640 + /*
641 +- * These functions will save only the upper 64 bits of the vector registers,
642 +- * since the lower 64 bits have already been saved as the scalar FP context.
643 +- */
644 +-static int copy_msa_to_sigcontext32(struct sigcontext32 __user *sc)
645 +-{
646 +- int i;
647 +- int err = 0;
648 +-
649 +- for (i = 0; i < NUM_FPU_REGS; i++) {
650 +- err |=
651 +- __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
652 +- &sc->sc_msaregs[i]);
653 +- }
654 +- err |= __put_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
655 +-
656 +- return err;
657 +-}
658 +-
659 +-static int copy_msa_from_sigcontext32(struct sigcontext32 __user *sc)
660 +-{
661 +- int i;
662 +- int err = 0;
663 +- u64 val;
664 +-
665 +- for (i = 0; i < NUM_FPU_REGS; i++) {
666 +- err |= __get_user(val, &sc->sc_msaregs[i]);
667 +- set_fpr64(&current->thread.fpu.fpr[i], 1, val);
668 +- }
669 +- err |= __get_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
670 +-
671 +- return err;
672 +-}
673 +-
674 +-/*
675 + * sigcontext handlers
676 + */
677 +-static int protected_save_fp_context32(struct sigcontext32 __user *sc,
678 +- unsigned used_math)
679 ++static int protected_save_fp_context32(struct sigcontext32 __user *sc)
680 + {
681 + int err;
682 +- bool save_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
683 + while (1) {
684 + lock_fpu_owner();
685 + if (is_fpu_owner()) {
686 + err = save_fp_context32(sc);
687 +- if (save_msa && !err)
688 +- err = _save_msa_context32(sc);
689 + unlock_fpu_owner();
690 + } else {
691 + unlock_fpu_owner();
692 + err = copy_fp_to_sigcontext32(sc);
693 +- if (save_msa && !err)
694 +- err = copy_msa_to_sigcontext32(sc);
695 + }
696 + if (likely(!err))
697 + break;
698 +@@ -181,28 +137,17 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc,
699 + return err;
700 + }
701 +
702 +-static int protected_restore_fp_context32(struct sigcontext32 __user *sc,
703 +- unsigned used_math)
704 ++static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
705 + {
706 + int err, tmp __maybe_unused;
707 +- bool restore_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
708 + while (1) {
709 + lock_fpu_owner();
710 + if (is_fpu_owner()) {
711 + err = restore_fp_context32(sc);
712 +- if (restore_msa && !err) {
713 +- enable_msa();
714 +- err = _restore_msa_context32(sc);
715 +- } else {
716 +- /* signal handler may have used MSA */
717 +- disable_msa();
718 +- }
719 + unlock_fpu_owner();
720 + } else {
721 + unlock_fpu_owner();
722 + err = copy_fp_from_sigcontext32(sc);
723 +- if (restore_msa && !err)
724 +- err = copy_msa_from_sigcontext32(sc);
725 + }
726 + if (likely(!err))
727 + break;
728 +@@ -241,8 +186,7 @@ static int setup_sigcontext32(struct pt_regs *regs,
729 + err |= __put_user(mflo3(), &sc->sc_lo3);
730 + }
731 +
732 +- used_math = used_math() ? USEDMATH_FP : 0;
733 +- used_math |= thread_msa_context_live() ? USEDMATH_MSA : 0;
734 ++ used_math = !!used_math();
735 + err |= __put_user(used_math, &sc->sc_used_math);
736 +
737 + if (used_math) {
738 +@@ -250,21 +194,20 @@ static int setup_sigcontext32(struct pt_regs *regs,
739 + * Save FPU state to signal context. Signal handler
740 + * will "inherit" current FPU state.
741 + */
742 +- err |= protected_save_fp_context32(sc, used_math);
743 ++ err |= protected_save_fp_context32(sc);
744 + }
745 + return err;
746 + }
747 +
748 + static int
749 +-check_and_restore_fp_context32(struct sigcontext32 __user *sc,
750 +- unsigned used_math)
751 ++check_and_restore_fp_context32(struct sigcontext32 __user *sc)
752 + {
753 + int err, sig;
754 +
755 + err = sig = fpcsr_pending(&sc->sc_fpc_csr);
756 + if (err > 0)
757 + err = 0;
758 +- err |= protected_restore_fp_context32(sc, used_math);
759 ++ err |= protected_restore_fp_context32(sc);
760 + return err ?: sig;
761 + }
762 +
763 +@@ -301,10 +244,9 @@ static int restore_sigcontext32(struct pt_regs *regs,
764 + if (used_math) {
765 + /* restore fpu context if we have used it before */
766 + if (!err)
767 +- err = check_and_restore_fp_context32(sc, used_math);
768 ++ err = check_and_restore_fp_context32(sc);
769 + } else {
770 +- /* signal handler may have used FPU or MSA. Disable them. */
771 +- disable_msa();
772 ++ /* signal handler may have used FPU. Give it up. */
773 + lose_fpu(0);
774 + }
775 +
776 +diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
777 +index 5efce56f0df0..3e0ff8d0fbf9 100644
778 +--- a/arch/mips/kvm/kvm_mips.c
779 ++++ b/arch/mips/kvm/kvm_mips.c
780 +@@ -149,9 +149,7 @@ void kvm_mips_free_vcpus(struct kvm *kvm)
781 + if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
782 + kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
783 + }
784 +-
785 +- if (kvm->arch.guest_pmap)
786 +- kfree(kvm->arch.guest_pmap);
787 ++ kfree(kvm->arch.guest_pmap);
788 +
789 + kvm_for_each_vcpu(i, vcpu, kvm) {
790 + kvm_arch_vcpu_free(vcpu);
791 +@@ -389,12 +387,9 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
792 +
793 + kvm_mips_dump_stats(vcpu);
794 +
795 +- if (vcpu->arch.guest_ebase)
796 +- kfree(vcpu->arch.guest_ebase);
797 +-
798 +- if (vcpu->arch.kseg0_commpage)
799 +- kfree(vcpu->arch.kseg0_commpage);
800 +-
801 ++ kfree(vcpu->arch.guest_ebase);
802 ++ kfree(vcpu->arch.kseg0_commpage);
803 ++ kfree(vcpu);
804 + }
805 +
806 + void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
807 +diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
808 +index 0e83e7d8c73f..b5fad8afe837 100644
809 +--- a/arch/powerpc/include/asm/switch_to.h
810 ++++ b/arch/powerpc/include/asm/switch_to.h
811 +@@ -84,6 +84,8 @@ static inline void clear_task_ebb(struct task_struct *t)
812 + {
813 + #ifdef CONFIG_PPC_BOOK3S_64
814 + /* EBB perf events are not inherited, so clear all EBB state. */
815 ++ t->thread.ebbrr = 0;
816 ++ t->thread.ebbhr = 0;
817 + t->thread.bescr = 0;
818 + t->thread.mmcr2 = 0;
819 + t->thread.mmcr0 = 0;
820 +diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
821 +index ea4dc3a89c1f..14b2862533b5 100644
822 +--- a/arch/powerpc/include/asm/systbl.h
823 ++++ b/arch/powerpc/include/asm/systbl.h
824 +@@ -190,7 +190,7 @@ SYSCALL_SPU(getcwd)
825 + SYSCALL_SPU(capget)
826 + SYSCALL_SPU(capset)
827 + COMPAT_SYS(sigaltstack)
828 +-COMPAT_SYS_SPU(sendfile)
829 ++SYSX_SPU(sys_sendfile64,compat_sys_sendfile,sys_sendfile)
830 + SYSCALL(ni_syscall)
831 + SYSCALL(ni_syscall)
832 + PPC_SYS(vfork)
833 +diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h
834 +index 5b7657959faa..de2c0e4ee1aa 100644
835 +--- a/arch/powerpc/include/uapi/asm/cputable.h
836 ++++ b/arch/powerpc/include/uapi/asm/cputable.h
837 +@@ -41,5 +41,6 @@
838 + #define PPC_FEATURE2_EBB 0x10000000
839 + #define PPC_FEATURE2_ISEL 0x08000000
840 + #define PPC_FEATURE2_TAR 0x04000000
841 ++#define PPC_FEATURE2_VEC_CRYPTO 0x02000000
842 +
843 + #endif /* _UAPI__ASM_POWERPC_CPUTABLE_H */
844 +diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
845 +index c1faade6506d..11da04a4625a 100644
846 +--- a/arch/powerpc/kernel/cputable.c
847 ++++ b/arch/powerpc/kernel/cputable.c
848 +@@ -109,7 +109,8 @@ extern void __restore_cpu_e6500(void);
849 + PPC_FEATURE_PSERIES_PERFMON_COMPAT)
850 + #define COMMON_USER2_POWER8 (PPC_FEATURE2_ARCH_2_07 | \
851 + PPC_FEATURE2_HTM_COMP | PPC_FEATURE2_DSCR | \
852 +- PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR)
853 ++ PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR | \
854 ++ PPC_FEATURE2_VEC_CRYPTO)
855 + #define COMMON_USER_PA6T (COMMON_USER_PPC64 | PPC_FEATURE_PA6T |\
856 + PPC_FEATURE_TRUE_LE | \
857 + PPC_FEATURE_HAS_ALTIVEC_COMP)
858 +diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
859 +index 40bd7bd4e19a..8a8b722870a1 100644
860 +--- a/arch/powerpc/kernel/legacy_serial.c
861 ++++ b/arch/powerpc/kernel/legacy_serial.c
862 +@@ -48,6 +48,9 @@ static struct of_device_id legacy_serial_parents[] __initdata = {
863 + static unsigned int legacy_serial_count;
864 + static int legacy_serial_console = -1;
865 +
866 ++static const upf_t legacy_port_flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
867 ++ UPF_SHARE_IRQ | UPF_FIXED_PORT;
868 ++
869 + static unsigned int tsi_serial_in(struct uart_port *p, int offset)
870 + {
871 + unsigned int tmp;
872 +@@ -153,8 +156,6 @@ static int __init add_legacy_soc_port(struct device_node *np,
873 + {
874 + u64 addr;
875 + const __be32 *addrp;
876 +- upf_t flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ
877 +- | UPF_FIXED_PORT;
878 + struct device_node *tsi = of_get_parent(np);
879 +
880 + /* We only support ports that have a clock frequency properly
881 +@@ -185,9 +186,11 @@ static int __init add_legacy_soc_port(struct device_node *np,
882 + * IO port value. It will be fixed up later along with the irq
883 + */
884 + if (tsi && !strcmp(tsi->type, "tsi-bridge"))
885 +- return add_legacy_port(np, -1, UPIO_TSI, addr, addr, NO_IRQ, flags, 0);
886 ++ return add_legacy_port(np, -1, UPIO_TSI, addr, addr,
887 ++ NO_IRQ, legacy_port_flags, 0);
888 + else
889 +- return add_legacy_port(np, -1, UPIO_MEM, addr, addr, NO_IRQ, flags, 0);
890 ++ return add_legacy_port(np, -1, UPIO_MEM, addr, addr,
891 ++ NO_IRQ, legacy_port_flags, 0);
892 + }
893 +
894 + static int __init add_legacy_isa_port(struct device_node *np,
895 +@@ -233,7 +236,7 @@ static int __init add_legacy_isa_port(struct device_node *np,
896 +
897 + /* Add port, irq will be dealt with later */
898 + return add_legacy_port(np, index, UPIO_PORT, be32_to_cpu(reg[1]),
899 +- taddr, NO_IRQ, UPF_BOOT_AUTOCONF, 0);
900 ++ taddr, NO_IRQ, legacy_port_flags, 0);
901 +
902 + }
903 +
904 +@@ -306,7 +309,7 @@ static int __init add_legacy_pci_port(struct device_node *np,
905 + * IO port value. It will be fixed up later along with the irq
906 + */
907 + return add_legacy_port(np, index, iotype, base, addr, NO_IRQ,
908 +- UPF_BOOT_AUTOCONF, np != pci_dev);
909 ++ legacy_port_flags, np != pci_dev);
910 + }
911 + #endif
912 +
913 +diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
914 +index 79b7612ac6fa..aa3e7c0f60e2 100644
915 +--- a/arch/powerpc/kernel/setup-common.c
916 ++++ b/arch/powerpc/kernel/setup-common.c
917 +@@ -459,9 +459,17 @@ void __init smp_setup_cpu_maps(void)
918 + }
919 +
920 + for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
921 ++ bool avail;
922 ++
923 + DBG(" thread %d -> cpu %d (hard id %d)\n",
924 + j, cpu, be32_to_cpu(intserv[j]));
925 +- set_cpu_present(cpu, true);
926 ++
927 ++ avail = of_device_is_available(dn);
928 ++ if (!avail)
929 ++ avail = !of_property_match_string(dn,
930 ++ "enable-method", "spin-table");
931 ++
932 ++ set_cpu_present(cpu, avail);
933 + set_hard_smp_processor_id(cpu, be32_to_cpu(intserv[j]));
934 + set_cpu_possible(cpu, true);
935 + cpu++;
936 +diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
937 +index 7e711bdcc6da..9fff9cdcc519 100644
938 +--- a/arch/powerpc/kernel/time.c
939 ++++ b/arch/powerpc/kernel/time.c
940 +@@ -551,7 +551,7 @@ void timer_interrupt(struct pt_regs * regs)
941 + may_hard_irq_enable();
942 +
943 +
944 +-#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
945 ++#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
946 + if (atomic_read(&ppc_n_lost_interrupts) != 0)
947 + do_IRQ(regs);
948 + #endif
949 +diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
950 +index c0511c27a733..412dd46dd0b7 100644
951 +--- a/arch/powerpc/lib/sstep.c
952 ++++ b/arch/powerpc/lib/sstep.c
953 +@@ -1470,7 +1470,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
954 + regs->gpr[rd] = byterev_4(val);
955 + goto ldst_done;
956 +
957 +-#ifdef CONFIG_PPC_CPU
958 ++#ifdef CONFIG_PPC_FPU
959 + case 535: /* lfsx */
960 + case 567: /* lfsux */
961 + if (!(regs->msr & MSR_FP))
962 +diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
963 +index 06ba83b036d3..5cbfde131839 100644
964 +--- a/arch/powerpc/mm/hash_utils_64.c
965 ++++ b/arch/powerpc/mm/hash_utils_64.c
966 +@@ -964,6 +964,22 @@ void hash_failure_debug(unsigned long ea, unsigned long access,
967 + trap, vsid, ssize, psize, lpsize, pte);
968 + }
969 +
970 ++static void check_paca_psize(unsigned long ea, struct mm_struct *mm,
971 ++ int psize, bool user_region)
972 ++{
973 ++ if (user_region) {
974 ++ if (psize != get_paca_psize(ea)) {
975 ++ get_paca()->context = mm->context;
976 ++ slb_flush_and_rebolt();
977 ++ }
978 ++ } else if (get_paca()->vmalloc_sllp !=
979 ++ mmu_psize_defs[mmu_vmalloc_psize].sllp) {
980 ++ get_paca()->vmalloc_sllp =
981 ++ mmu_psize_defs[mmu_vmalloc_psize].sllp;
982 ++ slb_vmalloc_update();
983 ++ }
984 ++}
985 ++
986 + /* Result code is:
987 + * 0 - handled
988 + * 1 - normal page fault
989 +@@ -1085,6 +1101,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
990 + WARN_ON(1);
991 + }
992 + #endif
993 ++ check_paca_psize(ea, mm, psize, user_region);
994 ++
995 + goto bail;
996 + }
997 +
998 +@@ -1125,17 +1143,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
999 + #endif
1000 + }
1001 + }
1002 +- if (user_region) {
1003 +- if (psize != get_paca_psize(ea)) {
1004 +- get_paca()->context = mm->context;
1005 +- slb_flush_and_rebolt();
1006 +- }
1007 +- } else if (get_paca()->vmalloc_sllp !=
1008 +- mmu_psize_defs[mmu_vmalloc_psize].sllp) {
1009 +- get_paca()->vmalloc_sllp =
1010 +- mmu_psize_defs[mmu_vmalloc_psize].sllp;
1011 +- slb_vmalloc_update();
1012 +- }
1013 ++
1014 ++ check_paca_psize(ea, mm, psize, user_region);
1015 + #endif /* CONFIG_PPC_64K_PAGES */
1016 +
1017 + #ifdef CONFIG_PPC_HAS_HASH_64K
1018 +diff --git a/arch/powerpc/platforms/powernv/opal-sysparam.c b/arch/powerpc/platforms/powernv/opal-sysparam.c
1019 +index d202f9bc3683..9d1acf22a099 100644
1020 +--- a/arch/powerpc/platforms/powernv/opal-sysparam.c
1021 ++++ b/arch/powerpc/platforms/powernv/opal-sysparam.c
1022 +@@ -260,10 +260,10 @@ void __init opal_sys_param_init(void)
1023 + attr[i].kobj_attr.attr.mode = S_IRUGO;
1024 + break;
1025 + case OPAL_SYSPARAM_WRITE:
1026 +- attr[i].kobj_attr.attr.mode = S_IWUGO;
1027 ++ attr[i].kobj_attr.attr.mode = S_IWUSR;
1028 + break;
1029 + case OPAL_SYSPARAM_RW:
1030 +- attr[i].kobj_attr.attr.mode = S_IRUGO | S_IWUGO;
1031 ++ attr[i].kobj_attr.attr.mode = S_IRUGO | S_IWUSR;
1032 + break;
1033 + default:
1034 + break;
1035 +diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
1036 +index 8a8f0472d98f..83da53fde6b5 100644
1037 +--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
1038 ++++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
1039 +@@ -464,6 +464,7 @@ static int pseries_eeh_get_state(struct eeh_pe *pe, int *state)
1040 + } else {
1041 + result = EEH_STATE_NOT_SUPPORT;
1042 + }
1043 ++ break;
1044 + default:
1045 + result = EEH_STATE_NOT_SUPPORT;
1046 + }
1047 +diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
1048 +index 14fd6fd75a19..6205f0c434db 100644
1049 +--- a/arch/x86/include/asm/ptrace.h
1050 ++++ b/arch/x86/include/asm/ptrace.h
1051 +@@ -231,6 +231,22 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
1052 +
1053 + #define ARCH_HAS_USER_SINGLE_STEP_INFO
1054 +
1055 ++/*
1056 ++ * When hitting ptrace_stop(), we cannot return using SYSRET because
1057 ++ * that does not restore the full CPU state, only a minimal set. The
1058 ++ * ptracer can change arbitrary register values, which is usually okay
1059 ++ * because the usual ptrace stops run off the signal delivery path which
1060 ++ * forces IRET; however, ptrace_event() stops happen in arbitrary places
1061 ++ * in the kernel and don't force IRET path.
1062 ++ *
1063 ++ * So force IRET path after a ptrace stop.
1064 ++ */
1065 ++#define arch_ptrace_stop_needed(code, info) \
1066 ++({ \
1067 ++ set_thread_flag(TIF_NOTIFY_RESUME); \
1068 ++ false; \
1069 ++})
1070 ++
1071 + struct user_desc;
1072 + extern int do_get_thread_area(struct task_struct *p, int idx,
1073 + struct user_desc __user *info);
1074 +diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
1075 +index 59c5abe32f06..fb624469d0ee 100644
1076 +--- a/drivers/block/mtip32xx/mtip32xx.c
1077 ++++ b/drivers/block/mtip32xx/mtip32xx.c
1078 +@@ -1529,6 +1529,37 @@ static inline void ata_swap_string(u16 *buf, unsigned int len)
1079 + be16_to_cpus(&buf[i]);
1080 + }
1081 +
1082 ++static void mtip_set_timeout(struct driver_data *dd,
1083 ++ struct host_to_dev_fis *fis,
1084 ++ unsigned int *timeout, u8 erasemode)
1085 ++{
1086 ++ switch (fis->command) {
1087 ++ case ATA_CMD_DOWNLOAD_MICRO:
1088 ++ *timeout = 120000; /* 2 minutes */
1089 ++ break;
1090 ++ case ATA_CMD_SEC_ERASE_UNIT:
1091 ++ case 0xFC:
1092 ++ if (erasemode)
1093 ++ *timeout = ((*(dd->port->identify + 90) * 2) * 60000);
1094 ++ else
1095 ++ *timeout = ((*(dd->port->identify + 89) * 2) * 60000);
1096 ++ break;
1097 ++ case ATA_CMD_STANDBYNOW1:
1098 ++ *timeout = 120000; /* 2 minutes */
1099 ++ break;
1100 ++ case 0xF7:
1101 ++ case 0xFA:
1102 ++ *timeout = 60000; /* 60 seconds */
1103 ++ break;
1104 ++ case ATA_CMD_SMART:
1105 ++ *timeout = 15000; /* 15 seconds */
1106 ++ break;
1107 ++ default:
1108 ++ *timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
1109 ++ break;
1110 ++ }
1111 ++}
1112 ++
1113 + /*
1114 + * Request the device identity information.
1115 + *
1116 +@@ -1644,6 +1675,7 @@ static int mtip_standby_immediate(struct mtip_port *port)
1117 + int rv;
1118 + struct host_to_dev_fis fis;
1119 + unsigned long start;
1120 ++ unsigned int timeout;
1121 +
1122 + /* Build the FIS. */
1123 + memset(&fis, 0, sizeof(struct host_to_dev_fis));
1124 +@@ -1651,6 +1683,8 @@ static int mtip_standby_immediate(struct mtip_port *port)
1125 + fis.opts = 1 << 7;
1126 + fis.command = ATA_CMD_STANDBYNOW1;
1127 +
1128 ++ mtip_set_timeout(port->dd, &fis, &timeout, 0);
1129 ++
1130 + start = jiffies;
1131 + rv = mtip_exec_internal_command(port,
1132 + &fis,
1133 +@@ -1659,7 +1693,7 @@ static int mtip_standby_immediate(struct mtip_port *port)
1134 + 0,
1135 + 0,
1136 + GFP_ATOMIC,
1137 +- 15000);
1138 ++ timeout);
1139 + dbg_printk(MTIP_DRV_NAME "Time taken to complete standby cmd: %d ms\n",
1140 + jiffies_to_msecs(jiffies - start));
1141 + if (rv)
1142 +@@ -2202,36 +2236,6 @@ static unsigned int implicit_sector(unsigned char command,
1143 + }
1144 + return rv;
1145 + }
1146 +-static void mtip_set_timeout(struct driver_data *dd,
1147 +- struct host_to_dev_fis *fis,
1148 +- unsigned int *timeout, u8 erasemode)
1149 +-{
1150 +- switch (fis->command) {
1151 +- case ATA_CMD_DOWNLOAD_MICRO:
1152 +- *timeout = 120000; /* 2 minutes */
1153 +- break;
1154 +- case ATA_CMD_SEC_ERASE_UNIT:
1155 +- case 0xFC:
1156 +- if (erasemode)
1157 +- *timeout = ((*(dd->port->identify + 90) * 2) * 60000);
1158 +- else
1159 +- *timeout = ((*(dd->port->identify + 89) * 2) * 60000);
1160 +- break;
1161 +- case ATA_CMD_STANDBYNOW1:
1162 +- *timeout = 120000; /* 2 minutes */
1163 +- break;
1164 +- case 0xF7:
1165 +- case 0xFA:
1166 +- *timeout = 60000; /* 60 seconds */
1167 +- break;
1168 +- case ATA_CMD_SMART:
1169 +- *timeout = 15000; /* 15 seconds */
1170 +- break;
1171 +- default:
1172 +- *timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
1173 +- break;
1174 +- }
1175 +-}
1176 +
1177 + /*
1178 + * Executes a taskfile
1179 +@@ -4479,6 +4483,57 @@ static DEFINE_HANDLER(5);
1180 + static DEFINE_HANDLER(6);
1181 + static DEFINE_HANDLER(7);
1182 +
1183 ++static void mtip_disable_link_opts(struct driver_data *dd, struct pci_dev *pdev)
1184 ++{
1185 ++ int pos;
1186 ++ unsigned short pcie_dev_ctrl;
1187 ++
1188 ++ pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1189 ++ if (pos) {
1190 ++ pci_read_config_word(pdev,
1191 ++ pos + PCI_EXP_DEVCTL,
1192 ++ &pcie_dev_ctrl);
1193 ++ if (pcie_dev_ctrl & (1 << 11) ||
1194 ++ pcie_dev_ctrl & (1 << 4)) {
1195 ++ dev_info(&dd->pdev->dev,
1196 ++ "Disabling ERO/No-Snoop on bridge device %04x:%04x\n",
1197 ++ pdev->vendor, pdev->device);
1198 ++ pcie_dev_ctrl &= ~(PCI_EXP_DEVCTL_NOSNOOP_EN |
1199 ++ PCI_EXP_DEVCTL_RELAX_EN);
1200 ++ pci_write_config_word(pdev,
1201 ++ pos + PCI_EXP_DEVCTL,
1202 ++ pcie_dev_ctrl);
1203 ++ }
1204 ++ }
1205 ++}
1206 ++
1207 ++static void mtip_fix_ero_nosnoop(struct driver_data *dd, struct pci_dev *pdev)
1208 ++{
1209 ++ /*
1210 ++ * This workaround is specific to AMD/ATI chipset with a PCI upstream
1211 ++ * device with device id 0x5aXX
1212 ++ */
1213 ++ if (pdev->bus && pdev->bus->self) {
1214 ++ if (pdev->bus->self->vendor == PCI_VENDOR_ID_ATI &&
1215 ++ ((pdev->bus->self->device & 0xff00) == 0x5a00)) {
1216 ++ mtip_disable_link_opts(dd, pdev->bus->self);
1217 ++ } else {
1218 ++ /* Check further up the topology */
1219 ++ struct pci_dev *parent_dev = pdev->bus->self;
1220 ++ if (parent_dev->bus &&
1221 ++ parent_dev->bus->parent &&
1222 ++ parent_dev->bus->parent->self &&
1223 ++ parent_dev->bus->parent->self->vendor ==
1224 ++ PCI_VENDOR_ID_ATI &&
1225 ++ (parent_dev->bus->parent->self->device &
1226 ++ 0xff00) == 0x5a00) {
1227 ++ mtip_disable_link_opts(dd,
1228 ++ parent_dev->bus->parent->self);
1229 ++ }
1230 ++ }
1231 ++ }
1232 ++}
1233 ++
1234 + /*
1235 + * Called for each supported PCI device detected.
1236 + *
1237 +@@ -4630,6 +4685,8 @@ static int mtip_pci_probe(struct pci_dev *pdev,
1238 + goto msi_initialize_err;
1239 + }
1240 +
1241 ++ mtip_fix_ero_nosnoop(dd, pdev);
1242 ++
1243 + /* Initialize the block layer. */
1244 + rv = mtip_block_initialize(dd);
1245 + if (rv < 0) {
1246 +@@ -4935,13 +4992,13 @@ static int __init mtip_init(void)
1247 + */
1248 + static void __exit mtip_exit(void)
1249 + {
1250 +- debugfs_remove_recursive(dfs_parent);
1251 +-
1252 + /* Release the allocated major block device number. */
1253 + unregister_blkdev(mtip_major, MTIP_DRV_NAME);
1254 +
1255 + /* Unregister the PCI driver. */
1256 + pci_unregister_driver(&mtip_pci_driver);
1257 ++
1258 ++ debugfs_remove_recursive(dfs_parent);
1259 + }
1260 +
1261 + MODULE_AUTHOR("Micron Technology, Inc");
1262 +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
1263 +index abda6609d3e7..558224cf55bf 100644
1264 +--- a/drivers/cpufreq/cpufreq.c
1265 ++++ b/drivers/cpufreq/cpufreq.c
1266 +@@ -2166,10 +2166,8 @@ int cpufreq_update_policy(unsigned int cpu)
1267 + struct cpufreq_policy new_policy;
1268 + int ret;
1269 +
1270 +- if (!policy) {
1271 +- ret = -ENODEV;
1272 +- goto no_policy;
1273 +- }
1274 ++ if (!policy)
1275 ++ return -ENODEV;
1276 +
1277 + down_write(&policy->rwsem);
1278 +
1279 +@@ -2188,7 +2186,7 @@ int cpufreq_update_policy(unsigned int cpu)
1280 + new_policy.cur = cpufreq_driver->get(cpu);
1281 + if (WARN_ON(!new_policy.cur)) {
1282 + ret = -EIO;
1283 +- goto no_policy;
1284 ++ goto unlock;
1285 + }
1286 +
1287 + if (!policy->cur) {
1288 +@@ -2203,10 +2201,10 @@ int cpufreq_update_policy(unsigned int cpu)
1289 +
1290 + ret = cpufreq_set_policy(policy, &new_policy);
1291 +
1292 ++unlock:
1293 + up_write(&policy->rwsem);
1294 +
1295 + cpufreq_cpu_put(policy);
1296 +-no_policy:
1297 + return ret;
1298 + }
1299 + EXPORT_SYMBOL(cpufreq_update_policy);
1300 +diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c
1301 +index 0af618abebaf..3607070797af 100644
1302 +--- a/drivers/cpufreq/ppc-corenet-cpufreq.c
1303 ++++ b/drivers/cpufreq/ppc-corenet-cpufreq.c
1304 +@@ -138,7 +138,7 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
1305 + struct cpufreq_frequency_table *table;
1306 + struct cpu_data *data;
1307 + unsigned int cpu = policy->cpu;
1308 +- u64 transition_latency_hz;
1309 ++ u64 u64temp;
1310 +
1311 + np = of_get_cpu_node(cpu, NULL);
1312 + if (!np)
1313 +@@ -206,9 +206,10 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
1314 + for_each_cpu(i, per_cpu(cpu_mask, cpu))
1315 + per_cpu(cpu_data, i) = data;
1316 +
1317 +- transition_latency_hz = 12ULL * NSEC_PER_SEC;
1318 +- policy->cpuinfo.transition_latency =
1319 +- do_div(transition_latency_hz, fsl_get_sys_freq());
1320 ++ /* Minimum transition latency is 12 platform clocks */
1321 ++ u64temp = 12ULL * NSEC_PER_SEC;
1322 ++ do_div(u64temp, fsl_get_sys_freq());
1323 ++ policy->cpuinfo.transition_latency = u64temp + 1;
1324 +
1325 + of_node_put(np);
1326 +
1327 +diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
1328 +index f0d588f8859e..1acb99100556 100644
1329 +--- a/drivers/infiniband/core/user_mad.c
1330 ++++ b/drivers/infiniband/core/user_mad.c
1331 +@@ -98,7 +98,7 @@ struct ib_umad_port {
1332 +
1333 + struct ib_umad_device {
1334 + int start_port, end_port;
1335 +- struct kref ref;
1336 ++ struct kobject kobj;
1337 + struct ib_umad_port port[0];
1338 + };
1339 +
1340 +@@ -134,14 +134,18 @@ static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS);
1341 + static void ib_umad_add_one(struct ib_device *device);
1342 + static void ib_umad_remove_one(struct ib_device *device);
1343 +
1344 +-static void ib_umad_release_dev(struct kref *ref)
1345 ++static void ib_umad_release_dev(struct kobject *kobj)
1346 + {
1347 + struct ib_umad_device *dev =
1348 +- container_of(ref, struct ib_umad_device, ref);
1349 ++ container_of(kobj, struct ib_umad_device, kobj);
1350 +
1351 + kfree(dev);
1352 + }
1353 +
1354 ++static struct kobj_type ib_umad_dev_ktype = {
1355 ++ .release = ib_umad_release_dev,
1356 ++};
1357 ++
1358 + static int hdr_size(struct ib_umad_file *file)
1359 + {
1360 + return file->use_pkey_index ? sizeof (struct ib_user_mad_hdr) :
1361 +@@ -780,27 +784,19 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
1362 + {
1363 + struct ib_umad_port *port;
1364 + struct ib_umad_file *file;
1365 +- int ret;
1366 ++ int ret = -ENXIO;
1367 +
1368 + port = container_of(inode->i_cdev, struct ib_umad_port, cdev);
1369 +- if (port)
1370 +- kref_get(&port->umad_dev->ref);
1371 +- else
1372 +- return -ENXIO;
1373 +
1374 + mutex_lock(&port->file_mutex);
1375 +
1376 +- if (!port->ib_dev) {
1377 +- ret = -ENXIO;
1378 ++ if (!port->ib_dev)
1379 + goto out;
1380 +- }
1381 +
1382 ++ ret = -ENOMEM;
1383 + file = kzalloc(sizeof *file, GFP_KERNEL);
1384 +- if (!file) {
1385 +- kref_put(&port->umad_dev->ref, ib_umad_release_dev);
1386 +- ret = -ENOMEM;
1387 ++ if (!file)
1388 + goto out;
1389 +- }
1390 +
1391 + mutex_init(&file->mutex);
1392 + spin_lock_init(&file->send_lock);
1393 +@@ -814,6 +810,13 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
1394 + list_add_tail(&file->port_list, &port->file_list);
1395 +
1396 + ret = nonseekable_open(inode, filp);
1397 ++ if (ret) {
1398 ++ list_del(&file->port_list);
1399 ++ kfree(file);
1400 ++ goto out;
1401 ++ }
1402 ++
1403 ++ kobject_get(&port->umad_dev->kobj);
1404 +
1405 + out:
1406 + mutex_unlock(&port->file_mutex);
1407 +@@ -852,7 +855,7 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
1408 + mutex_unlock(&file->port->file_mutex);
1409 +
1410 + kfree(file);
1411 +- kref_put(&dev->ref, ib_umad_release_dev);
1412 ++ kobject_put(&dev->kobj);
1413 +
1414 + return 0;
1415 + }
1416 +@@ -880,10 +883,6 @@ static int ib_umad_sm_open(struct inode *inode, struct file *filp)
1417 + int ret;
1418 +
1419 + port = container_of(inode->i_cdev, struct ib_umad_port, sm_cdev);
1420 +- if (port)
1421 +- kref_get(&port->umad_dev->ref);
1422 +- else
1423 +- return -ENXIO;
1424 +
1425 + if (filp->f_flags & O_NONBLOCK) {
1426 + if (down_trylock(&port->sm_sem)) {
1427 +@@ -898,17 +897,27 @@ static int ib_umad_sm_open(struct inode *inode, struct file *filp)
1428 + }
1429 +
1430 + ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
1431 +- if (ret) {
1432 +- up(&port->sm_sem);
1433 +- goto fail;
1434 +- }
1435 ++ if (ret)
1436 ++ goto err_up_sem;
1437 +
1438 + filp->private_data = port;
1439 +
1440 +- return nonseekable_open(inode, filp);
1441 ++ ret = nonseekable_open(inode, filp);
1442 ++ if (ret)
1443 ++ goto err_clr_sm_cap;
1444 ++
1445 ++ kobject_get(&port->umad_dev->kobj);
1446 ++
1447 ++ return 0;
1448 ++
1449 ++err_clr_sm_cap:
1450 ++ swap(props.set_port_cap_mask, props.clr_port_cap_mask);
1451 ++ ib_modify_port(port->ib_dev, port->port_num, 0, &props);
1452 ++
1453 ++err_up_sem:
1454 ++ up(&port->sm_sem);
1455 +
1456 + fail:
1457 +- kref_put(&port->umad_dev->ref, ib_umad_release_dev);
1458 + return ret;
1459 + }
1460 +
1461 +@@ -927,7 +936,7 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp)
1462 +
1463 + up(&port->sm_sem);
1464 +
1465 +- kref_put(&port->umad_dev->ref, ib_umad_release_dev);
1466 ++ kobject_put(&port->umad_dev->kobj);
1467 +
1468 + return ret;
1469 + }
1470 +@@ -995,6 +1004,7 @@ static int find_overflow_devnum(void)
1471 + }
1472 +
1473 + static int ib_umad_init_port(struct ib_device *device, int port_num,
1474 ++ struct ib_umad_device *umad_dev,
1475 + struct ib_umad_port *port)
1476 + {
1477 + int devnum;
1478 +@@ -1027,6 +1037,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
1479 +
1480 + cdev_init(&port->cdev, &umad_fops);
1481 + port->cdev.owner = THIS_MODULE;
1482 ++ port->cdev.kobj.parent = &umad_dev->kobj;
1483 + kobject_set_name(&port->cdev.kobj, "umad%d", port->dev_num);
1484 + if (cdev_add(&port->cdev, base, 1))
1485 + goto err_cdev;
1486 +@@ -1045,6 +1056,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
1487 + base += IB_UMAD_MAX_PORTS;
1488 + cdev_init(&port->sm_cdev, &umad_sm_fops);
1489 + port->sm_cdev.owner = THIS_MODULE;
1490 ++ port->sm_cdev.kobj.parent = &umad_dev->kobj;
1491 + kobject_set_name(&port->sm_cdev.kobj, "issm%d", port->dev_num);
1492 + if (cdev_add(&port->sm_cdev, base, 1))
1493 + goto err_sm_cdev;
1494 +@@ -1138,7 +1150,7 @@ static void ib_umad_add_one(struct ib_device *device)
1495 + if (!umad_dev)
1496 + return;
1497 +
1498 +- kref_init(&umad_dev->ref);
1499 ++ kobject_init(&umad_dev->kobj, &ib_umad_dev_ktype);
1500 +
1501 + umad_dev->start_port = s;
1502 + umad_dev->end_port = e;
1503 +@@ -1146,7 +1158,8 @@ static void ib_umad_add_one(struct ib_device *device)
1504 + for (i = s; i <= e; ++i) {
1505 + umad_dev->port[i - s].umad_dev = umad_dev;
1506 +
1507 +- if (ib_umad_init_port(device, i, &umad_dev->port[i - s]))
1508 ++ if (ib_umad_init_port(device, i, umad_dev,
1509 ++ &umad_dev->port[i - s]))
1510 + goto err;
1511 + }
1512 +
1513 +@@ -1158,7 +1171,7 @@ err:
1514 + while (--i >= s)
1515 + ib_umad_kill_port(&umad_dev->port[i - s]);
1516 +
1517 +- kref_put(&umad_dev->ref, ib_umad_release_dev);
1518 ++ kobject_put(&umad_dev->kobj);
1519 + }
1520 +
1521 + static void ib_umad_remove_one(struct ib_device *device)
1522 +@@ -1172,7 +1185,7 @@ static void ib_umad_remove_one(struct ib_device *device)
1523 + for (i = 0; i <= umad_dev->end_port - umad_dev->start_port; ++i)
1524 + ib_umad_kill_port(&umad_dev->port[i]);
1525 +
1526 +- kref_put(&umad_dev->ref, ib_umad_release_dev);
1527 ++ kobject_put(&umad_dev->kobj);
1528 + }
1529 +
1530 + static char *umad_devnode(struct device *dev, umode_t *mode)
1531 +diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
1532 +index cfaa56ada189..7151a02b4ebb 100644
1533 +--- a/drivers/infiniband/hw/cxgb4/cq.c
1534 ++++ b/drivers/infiniband/hw/cxgb4/cq.c
1535 +@@ -940,7 +940,6 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
1536 + if (!mm2)
1537 + goto err4;
1538 +
1539 +- memset(&uresp, 0, sizeof(uresp));
1540 + uresp.qid_mask = rhp->rdev.cqmask;
1541 + uresp.cqid = chp->cq.cqid;
1542 + uresp.size = chp->cq.size;
1543 +@@ -951,7 +950,8 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
1544 + uresp.gts_key = ucontext->key;
1545 + ucontext->key += PAGE_SIZE;
1546 + spin_unlock(&ucontext->mmap_lock);
1547 +- ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
1548 ++ ret = ib_copy_to_udata(udata, &uresp,
1549 ++ sizeof(uresp) - sizeof(uresp.reserved));
1550 + if (ret)
1551 + goto err5;
1552 +
1553 +diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
1554 +index f4fa50a609e2..8914ea90ddd9 100644
1555 +--- a/drivers/infiniband/hw/cxgb4/device.c
1556 ++++ b/drivers/infiniband/hw/cxgb4/device.c
1557 +@@ -736,6 +736,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
1558 + pci_resource_len(devp->rdev.lldi.pdev, 2));
1559 + if (!devp->rdev.bar2_kva) {
1560 + pr_err(MOD "Unable to ioremap BAR2\n");
1561 ++ ib_dealloc_device(&devp->ibdev);
1562 + return ERR_PTR(-EINVAL);
1563 + }
1564 + } else if (ocqp_supported(infop)) {
1565 +@@ -747,6 +748,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
1566 + devp->rdev.lldi.vr->ocq.size);
1567 + if (!devp->rdev.oc_mw_kva) {
1568 + pr_err(MOD "Unable to ioremap onchip mem\n");
1569 ++ ib_dealloc_device(&devp->ibdev);
1570 + return ERR_PTR(-EINVAL);
1571 + }
1572 + }
1573 +diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
1574 +index a94a3e12c349..c777e22bd8d5 100644
1575 +--- a/drivers/infiniband/hw/cxgb4/provider.c
1576 ++++ b/drivers/infiniband/hw/cxgb4/provider.c
1577 +@@ -122,7 +122,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
1578 + INIT_LIST_HEAD(&context->mmaps);
1579 + spin_lock_init(&context->mmap_lock);
1580 +
1581 +- if (udata->outlen < sizeof(uresp)) {
1582 ++ if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
1583 + if (!warned++)
1584 + pr_err(MOD "Warning - downlevel libcxgb4 (non-fatal), device status page disabled.");
1585 + rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED;
1586 +@@ -140,7 +140,8 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
1587 + context->key += PAGE_SIZE;
1588 + spin_unlock(&context->mmap_lock);
1589 +
1590 +- ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1591 ++ ret = ib_copy_to_udata(udata, &uresp,
1592 ++ sizeof(uresp) - sizeof(uresp.reserved));
1593 + if (ret)
1594 + goto err_mm;
1595 +
1596 +diff --git a/drivers/infiniband/hw/cxgb4/user.h b/drivers/infiniband/hw/cxgb4/user.h
1597 +index 11ccd276e5d9..cbd0ce170728 100644
1598 +--- a/drivers/infiniband/hw/cxgb4/user.h
1599 ++++ b/drivers/infiniband/hw/cxgb4/user.h
1600 +@@ -48,6 +48,7 @@ struct c4iw_create_cq_resp {
1601 + __u32 cqid;
1602 + __u32 size;
1603 + __u32 qid_mask;
1604 ++ __u32 reserved; /* explicit padding (optional for i386) */
1605 + };
1606 +
1607 +
1608 +@@ -74,5 +75,6 @@ struct c4iw_create_qp_resp {
1609 + struct c4iw_alloc_ucontext_resp {
1610 + __u64 status_page_key;
1611 + __u32 status_page_size;
1612 ++ __u32 reserved; /* explicit padding (optional for i386) */
1613 + };
1614 + #endif
1615 +diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c
1616 +index e2f9a51f4a38..45802e97332e 100644
1617 +--- a/drivers/infiniband/hw/ipath/ipath_diag.c
1618 ++++ b/drivers/infiniband/hw/ipath/ipath_diag.c
1619 +@@ -346,6 +346,10 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
1620 + ret = -EFAULT;
1621 + goto bail;
1622 + }
1623 ++ dp.len = odp.len;
1624 ++ dp.unit = odp.unit;
1625 ++ dp.data = odp.data;
1626 ++ dp.pbc_wd = 0;
1627 + } else {
1628 + ret = -EINVAL;
1629 + goto bail;
1630 +diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
1631 +index 62bb6b49dc1d..8ae4f896cb41 100644
1632 +--- a/drivers/infiniband/hw/mlx5/cq.c
1633 ++++ b/drivers/infiniband/hw/mlx5/cq.c
1634 +@@ -32,6 +32,7 @@
1635 +
1636 + #include <linux/kref.h>
1637 + #include <rdma/ib_umem.h>
1638 ++#include <rdma/ib_user_verbs.h>
1639 + #include "mlx5_ib.h"
1640 + #include "user.h"
1641 +
1642 +@@ -602,14 +603,24 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
1643 + int *cqe_size, int *index, int *inlen)
1644 + {
1645 + struct mlx5_ib_create_cq ucmd;
1646 ++ size_t ucmdlen;
1647 + int page_shift;
1648 + int npages;
1649 + int ncont;
1650 + int err;
1651 +
1652 +- if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
1653 ++ ucmdlen =
1654 ++ (udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) <
1655 ++ sizeof(ucmd)) ? (sizeof(ucmd) -
1656 ++ sizeof(ucmd.reserved)) : sizeof(ucmd);
1657 ++
1658 ++ if (ib_copy_from_udata(&ucmd, udata, ucmdlen))
1659 + return -EFAULT;
1660 +
1661 ++ if (ucmdlen == sizeof(ucmd) &&
1662 ++ ucmd.reserved != 0)
1663 ++ return -EINVAL;
1664 ++
1665 + if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128)
1666 + return -EINVAL;
1667 +
1668 +diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
1669 +index 210b3eaf188a..384af6dec5eb 100644
1670 +--- a/drivers/infiniband/hw/mlx5/srq.c
1671 ++++ b/drivers/infiniband/hw/mlx5/srq.c
1672 +@@ -35,6 +35,7 @@
1673 + #include <linux/mlx5/srq.h>
1674 + #include <linux/slab.h>
1675 + #include <rdma/ib_umem.h>
1676 ++#include <rdma/ib_user_verbs.h>
1677 +
1678 + #include "mlx5_ib.h"
1679 + #include "user.h"
1680 +@@ -78,16 +79,27 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
1681 + {
1682 + struct mlx5_ib_dev *dev = to_mdev(pd->device);
1683 + struct mlx5_ib_create_srq ucmd;
1684 ++ size_t ucmdlen;
1685 + int err;
1686 + int npages;
1687 + int page_shift;
1688 + int ncont;
1689 + u32 offset;
1690 +
1691 +- if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
1692 ++ ucmdlen =
1693 ++ (udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) <
1694 ++ sizeof(ucmd)) ? (sizeof(ucmd) -
1695 ++ sizeof(ucmd.reserved)) : sizeof(ucmd);
1696 ++
1697 ++ if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
1698 + mlx5_ib_dbg(dev, "failed copy udata\n");
1699 + return -EFAULT;
1700 + }
1701 ++
1702 ++ if (ucmdlen == sizeof(ucmd) &&
1703 ++ ucmd.reserved != 0)
1704 ++ return -EINVAL;
1705 ++
1706 + srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
1707 +
1708 + srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size,
1709 +diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h
1710 +index 0f4f8e42a17f..d0ba264ac1ed 100644
1711 +--- a/drivers/infiniband/hw/mlx5/user.h
1712 ++++ b/drivers/infiniband/hw/mlx5/user.h
1713 +@@ -91,6 +91,7 @@ struct mlx5_ib_create_cq {
1714 + __u64 buf_addr;
1715 + __u64 db_addr;
1716 + __u32 cqe_size;
1717 ++ __u32 reserved; /* explicit padding (optional on i386) */
1718 + };
1719 +
1720 + struct mlx5_ib_create_cq_resp {
1721 +@@ -109,6 +110,7 @@ struct mlx5_ib_create_srq {
1722 + __u64 buf_addr;
1723 + __u64 db_addr;
1724 + __u32 flags;
1725 ++ __u32 reserved; /* explicit padding (optional on i386) */
1726 + };
1727 +
1728 + struct mlx5_ib_create_srq_resp {
1729 +diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
1730 +index edad991d60ed..22c720e5740d 100644
1731 +--- a/drivers/infiniband/hw/qib/qib_mad.c
1732 ++++ b/drivers/infiniband/hw/qib/qib_mad.c
1733 +@@ -1028,7 +1028,7 @@ static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
1734 +
1735 + event.event = IB_EVENT_PKEY_CHANGE;
1736 + event.device = &dd->verbs_dev.ibdev;
1737 +- event.element.port_num = 1;
1738 ++ event.element.port_num = port;
1739 + ib_dispatch_event(&event);
1740 + }
1741 + return 0;
1742 +diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
1743 +index 66a908bf3fb9..5b2bed8fc493 100644
1744 +--- a/drivers/infiniband/ulp/srp/ib_srp.c
1745 ++++ b/drivers/infiniband/ulp/srp/ib_srp.c
1746 +@@ -1594,6 +1594,12 @@ err_unmap:
1747 + err_iu:
1748 + srp_put_tx_iu(target, iu, SRP_IU_CMD);
1749 +
1750 ++ /*
1751 ++ * Avoid that the loops that iterate over the request ring can
1752 ++ * encounter a dangling SCSI command pointer.
1753 ++ */
1754 ++ req->scmnd = NULL;
1755 ++
1756 + spin_lock_irqsave(&target->lock, flags);
1757 + list_add(&req->list, &target->free_reqs);
1758 +
1759 +diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
1760 +index b96e978a37b7..ee2a04d90d20 100644
1761 +--- a/drivers/input/mouse/elantech.c
1762 ++++ b/drivers/input/mouse/elantech.c
1763 +@@ -473,8 +473,15 @@ static void elantech_report_absolute_v3(struct psmouse *psmouse,
1764 + input_report_key(dev, BTN_TOOL_FINGER, fingers == 1);
1765 + input_report_key(dev, BTN_TOOL_DOUBLETAP, fingers == 2);
1766 + input_report_key(dev, BTN_TOOL_TRIPLETAP, fingers == 3);
1767 +- input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
1768 +- input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
1769 ++
1770 ++ /* For clickpads map both buttons to BTN_LEFT */
1771 ++ if (etd->fw_version & 0x001000) {
1772 ++ input_report_key(dev, BTN_LEFT, packet[0] & 0x03);
1773 ++ } else {
1774 ++ input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
1775 ++ input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
1776 ++ }
1777 ++
1778 + input_report_abs(dev, ABS_PRESSURE, pres);
1779 + input_report_abs(dev, ABS_TOOL_WIDTH, width);
1780 +
1781 +@@ -484,10 +491,17 @@ static void elantech_report_absolute_v3(struct psmouse *psmouse,
1782 + static void elantech_input_sync_v4(struct psmouse *psmouse)
1783 + {
1784 + struct input_dev *dev = psmouse->dev;
1785 ++ struct elantech_data *etd = psmouse->private;
1786 + unsigned char *packet = psmouse->packet;
1787 +
1788 +- input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
1789 +- input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
1790 ++ /* For clickpads map both buttons to BTN_LEFT */
1791 ++ if (etd->fw_version & 0x001000) {
1792 ++ input_report_key(dev, BTN_LEFT, packet[0] & 0x03);
1793 ++ } else {
1794 ++ input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
1795 ++ input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
1796 ++ }
1797 ++
1798 + input_mt_report_pointer_emulation(dev, true);
1799 + input_sync(dev);
1800 + }
1801 +@@ -835,7 +849,7 @@ static int elantech_set_absolute_mode(struct psmouse *psmouse)
1802 + if (etd->set_hw_resolution)
1803 + etd->reg_10 = 0x0b;
1804 + else
1805 +- etd->reg_10 = 0x03;
1806 ++ etd->reg_10 = 0x01;
1807 +
1808 + if (elantech_write_reg(psmouse, 0x10, etd->reg_10))
1809 + rc = -1;
1810 +@@ -1336,7 +1350,8 @@ static int elantech_reconnect(struct psmouse *psmouse)
1811 + }
1812 +
1813 + /*
1814 +- * Some hw_version 3 models go into error state when we try to set bit 3 of r10
1815 ++ * Some hw_version 3 models go into error state when we try to set
1816 ++ * bit 3 and/or bit 1 of r10.
1817 + */
1818 + static const struct dmi_system_id no_hw_res_dmi_table[] = {
1819 + #if defined(CONFIG_DMI) && defined(CONFIG_X86)
1820 +diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
1821 +index c5ec703c727e..ec772d962f06 100644
1822 +--- a/drivers/input/mouse/synaptics.c
1823 ++++ b/drivers/input/mouse/synaptics.c
1824 +@@ -347,15 +347,6 @@ static int synaptics_resolution(struct psmouse *psmouse)
1825 + unsigned char resp[3];
1826 + int i;
1827 +
1828 +- for (i = 0; min_max_pnpid_table[i].pnp_ids; i++)
1829 +- if (matches_pnp_id(psmouse, min_max_pnpid_table[i].pnp_ids)) {
1830 +- priv->x_min = min_max_pnpid_table[i].x_min;
1831 +- priv->x_max = min_max_pnpid_table[i].x_max;
1832 +- priv->y_min = min_max_pnpid_table[i].y_min;
1833 +- priv->y_max = min_max_pnpid_table[i].y_max;
1834 +- return 0;
1835 +- }
1836 +-
1837 + if (SYN_ID_MAJOR(priv->identity) < 4)
1838 + return 0;
1839 +
1840 +@@ -366,6 +357,16 @@ static int synaptics_resolution(struct psmouse *psmouse)
1841 + }
1842 + }
1843 +
1844 ++ for (i = 0; min_max_pnpid_table[i].pnp_ids; i++) {
1845 ++ if (matches_pnp_id(psmouse, min_max_pnpid_table[i].pnp_ids)) {
1846 ++ priv->x_min = min_max_pnpid_table[i].x_min;
1847 ++ priv->x_max = min_max_pnpid_table[i].x_max;
1848 ++ priv->y_min = min_max_pnpid_table[i].y_min;
1849 ++ priv->y_max = min_max_pnpid_table[i].y_max;
1850 ++ return 0;
1851 ++ }
1852 ++ }
1853 ++
1854 + if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 5 &&
1855 + SYN_CAP_MAX_DIMENSIONS(priv->ext_cap_0c)) {
1856 + if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MAX_COORDS, resp)) {
1857 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
1858 +index 759475ef6ff3..83b01fa02400 100644
1859 +--- a/drivers/pci/pci.c
1860 ++++ b/drivers/pci/pci.c
1861 +@@ -4126,7 +4126,7 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
1862 + u16 cmd;
1863 + int rc;
1864 +
1865 +- WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
1866 ++ WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
1867 +
1868 + /* ARCH specific VGA enables */
1869 + rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
1870 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
1871 +index e7292065a1b1..0feb4a32a941 100644
1872 +--- a/drivers/pci/quirks.c
1873 ++++ b/drivers/pci/quirks.c
1874 +@@ -2954,6 +2954,7 @@ static void disable_igfx_irq(struct pci_dev *dev)
1875 + }
1876 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
1877 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
1878 ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq);
1879 +
1880 + /*
1881 + * PCI devices which are on Intel chips can skip the 10ms delay
1882 +diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
1883 +index 9a6e4a2cd072..fda6cf19fafe 100644
1884 +--- a/drivers/scsi/hpsa.c
1885 ++++ b/drivers/scsi/hpsa.c
1886 +@@ -115,9 +115,15 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
1887 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
1888 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
1889 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
1890 ++ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
1891 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
1892 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
1893 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
1894 ++ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
1895 ++ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
1896 ++ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
1897 ++ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
1898 ++ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
1899 + {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
1900 + {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
1901 + {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
1902 +@@ -165,9 +171,15 @@ static struct board_type products[] = {
1903 + {0x21C3103C, "Smart Array", &SA5_access},
1904 + {0x21C4103C, "Smart Array", &SA5_access},
1905 + {0x21C5103C, "Smart Array", &SA5_access},
1906 ++ {0x21C6103C, "Smart Array", &SA5_access},
1907 + {0x21C7103C, "Smart Array", &SA5_access},
1908 + {0x21C8103C, "Smart Array", &SA5_access},
1909 + {0x21C9103C, "Smart Array", &SA5_access},
1910 ++ {0x21CA103C, "Smart Array", &SA5_access},
1911 ++ {0x21CB103C, "Smart Array", &SA5_access},
1912 ++ {0x21CC103C, "Smart Array", &SA5_access},
1913 ++ {0x21CD103C, "Smart Array", &SA5_access},
1914 ++ {0x21CE103C, "Smart Array", &SA5_access},
1915 + {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
1916 + {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
1917 + {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
1918 +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
1919 +index ca2bc348ef5b..e71e1840ac02 100644
1920 +--- a/drivers/target/iscsi/iscsi_target.c
1921 ++++ b/drivers/target/iscsi/iscsi_target.c
1922 +@@ -1309,7 +1309,7 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
1923 + if (cmd->data_direction != DMA_TO_DEVICE) {
1924 + pr_err("Command ITT: 0x%08x received DataOUT for a"
1925 + " NON-WRITE command.\n", cmd->init_task_tag);
1926 +- return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
1927 ++ return iscsit_dump_data_payload(conn, payload_length, 1);
1928 + }
1929 + se_cmd = &cmd->se_cmd;
1930 + iscsit_mod_dataout_timer(cmd);
1931 +diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
1932 +index d9b1d88e1ad3..621b56fcb877 100644
1933 +--- a/drivers/target/iscsi/iscsi_target_login.c
1934 ++++ b/drivers/target/iscsi/iscsi_target_login.c
1935 +@@ -1216,7 +1216,7 @@ old_sess_out:
1936 + static int __iscsi_target_login_thread(struct iscsi_np *np)
1937 + {
1938 + u8 *buffer, zero_tsih = 0;
1939 +- int ret = 0, rc, stop;
1940 ++ int ret = 0, rc;
1941 + struct iscsi_conn *conn = NULL;
1942 + struct iscsi_login *login;
1943 + struct iscsi_portal_group *tpg = NULL;
1944 +@@ -1230,6 +1230,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1945 + if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
1946 + np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
1947 + complete(&np->np_restart_comp);
1948 ++ } else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) {
1949 ++ spin_unlock_bh(&np->np_thread_lock);
1950 ++ goto exit;
1951 + } else {
1952 + np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
1953 + }
1954 +@@ -1422,10 +1425,8 @@ old_sess_out:
1955 + }
1956 +
1957 + out:
1958 +- stop = kthread_should_stop();
1959 +- /* Wait for another socket.. */
1960 +- if (!stop)
1961 +- return 1;
1962 ++ return 1;
1963 ++
1964 + exit:
1965 + iscsi_stop_login_thread_timer(np);
1966 + spin_lock_bh(&np->np_thread_lock);
1967 +@@ -1442,7 +1443,7 @@ int iscsi_target_login_thread(void *arg)
1968 +
1969 + allow_signal(SIGINT);
1970 +
1971 +- while (!kthread_should_stop()) {
1972 ++ while (1) {
1973 + ret = __iscsi_target_login_thread(np);
1974 + /*
1975 + * We break and exit here unless another sock_accept() call
1976 +diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
1977 +index 53e157cb8c54..fd90b28f1d94 100644
1978 +--- a/drivers/target/iscsi/iscsi_target_util.c
1979 ++++ b/drivers/target/iscsi/iscsi_target_util.c
1980 +@@ -1295,6 +1295,8 @@ int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_deta
1981 + login->login_failed = 1;
1982 + iscsit_collect_login_stats(conn, status_class, status_detail);
1983 +
1984 ++ memset(&login->rsp[0], 0, ISCSI_HDR_LEN);
1985 ++
1986 + hdr = (struct iscsi_login_rsp *)&login->rsp[0];
1987 + hdr->opcode = ISCSI_OP_LOGIN_RSP;
1988 + hdr->status_class = status_class;
1989 +diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
1990 +index 26416c15d65c..6ea95d216eb8 100644
1991 +--- a/drivers/target/target_core_device.c
1992 ++++ b/drivers/target/target_core_device.c
1993 +@@ -616,6 +616,7 @@ void core_dev_unexport(
1994 + dev->export_count--;
1995 + spin_unlock(&hba->device_lock);
1996 +
1997 ++ lun->lun_sep = NULL;
1998 + lun->lun_se_dev = NULL;
1999 + }
2000 +
2001 +diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
2002 +index 399c3fddecf6..0e67d96b3ebd 100644
2003 +--- a/drivers/watchdog/ath79_wdt.c
2004 ++++ b/drivers/watchdog/ath79_wdt.c
2005 +@@ -20,6 +20,7 @@
2006 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2007 +
2008 + #include <linux/bitops.h>
2009 ++#include <linux/delay.h>
2010 + #include <linux/errno.h>
2011 + #include <linux/fs.h>
2012 + #include <linux/io.h>
2013 +@@ -90,6 +91,15 @@ static inline void ath79_wdt_keepalive(void)
2014 + static inline void ath79_wdt_enable(void)
2015 + {
2016 + ath79_wdt_keepalive();
2017 ++
2018 ++ /*
2019 ++ * Updating the TIMER register requires a few microseconds
2020 ++ * on the AR934x SoCs at least. Use a small delay to ensure
2021 ++ * that the TIMER register is updated within the hardware
2022 ++ * before enabling the watchdog.
2023 ++ */
2024 ++ udelay(2);
2025 ++
2026 + ath79_wdt_wr(WDOG_REG_CTRL, WDOG_CTRL_ACTION_FCR);
2027 + /* flush write */
2028 + ath79_wdt_rr(WDOG_REG_CTRL);
2029 +diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c
2030 +index 20dc73844737..d9c1a1601926 100644
2031 +--- a/drivers/watchdog/kempld_wdt.c
2032 ++++ b/drivers/watchdog/kempld_wdt.c
2033 +@@ -162,7 +162,7 @@ static int kempld_wdt_set_stage_timeout(struct kempld_wdt_data *wdt_data,
2034 + kempld_get_mutex(pld);
2035 + stage_cfg = kempld_read8(pld, KEMPLD_WDT_STAGE_CFG(stage->id));
2036 + stage_cfg &= ~STAGE_CFG_PRESCALER_MASK;
2037 +- stage_cfg |= STAGE_CFG_SET_PRESCALER(prescaler);
2038 ++ stage_cfg |= STAGE_CFG_SET_PRESCALER(PRESCALER_21);
2039 + kempld_write8(pld, KEMPLD_WDT_STAGE_CFG(stage->id), stage_cfg);
2040 + kempld_write32(pld, KEMPLD_WDT_STAGE_TIMEOUT(stage->id),
2041 + stage_timeout);
2042 +diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
2043 +index 47629d268e0a..c1b03f4235b9 100644
2044 +--- a/drivers/watchdog/sp805_wdt.c
2045 ++++ b/drivers/watchdog/sp805_wdt.c
2046 +@@ -59,7 +59,6 @@
2047 + * @adev: amba device structure of wdt
2048 + * @status: current status of wdt
2049 + * @load_val: load value to be set for current timeout
2050 +- * @timeout: current programmed timeout
2051 + */
2052 + struct sp805_wdt {
2053 + struct watchdog_device wdd;
2054 +@@ -68,7 +67,6 @@ struct sp805_wdt {
2055 + struct clk *clk;
2056 + struct amba_device *adev;
2057 + unsigned int load_val;
2058 +- unsigned int timeout;
2059 + };
2060 +
2061 + static bool nowayout = WATCHDOG_NOWAYOUT;
2062 +@@ -98,7 +96,7 @@ static int wdt_setload(struct watchdog_device *wdd, unsigned int timeout)
2063 + spin_lock(&wdt->lock);
2064 + wdt->load_val = load;
2065 + /* roundup timeout to closest positive integer value */
2066 +- wdt->timeout = div_u64((load + 1) * 2 + (rate / 2), rate);
2067 ++ wdd->timeout = div_u64((load + 1) * 2 + (rate / 2), rate);
2068 + spin_unlock(&wdt->lock);
2069 +
2070 + return 0;
2071 +diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
2072 +index 0c438973f3c8..c79f3e767c8c 100644
2073 +--- a/fs/nfs/inode.c
2074 ++++ b/fs/nfs/inode.c
2075 +@@ -1575,18 +1575,20 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
2076 + inode->i_version = fattr->change_attr;
2077 + }
2078 + } else if (server->caps & NFS_CAP_CHANGE_ATTR)
2079 +- invalid |= save_cache_validity;
2080 ++ nfsi->cache_validity |= save_cache_validity;
2081 +
2082 + if (fattr->valid & NFS_ATTR_FATTR_MTIME) {
2083 + memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
2084 + } else if (server->caps & NFS_CAP_MTIME)
2085 +- invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
2086 ++ nfsi->cache_validity |= save_cache_validity &
2087 ++ (NFS_INO_INVALID_ATTR
2088 + | NFS_INO_REVAL_FORCED);
2089 +
2090 + if (fattr->valid & NFS_ATTR_FATTR_CTIME) {
2091 + memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
2092 + } else if (server->caps & NFS_CAP_CTIME)
2093 +- invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
2094 ++ nfsi->cache_validity |= save_cache_validity &
2095 ++ (NFS_INO_INVALID_ATTR
2096 + | NFS_INO_REVAL_FORCED);
2097 +
2098 + /* Check if our cached file size is stale */
2099 +@@ -1608,7 +1610,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
2100 + (long long)new_isize);
2101 + }
2102 + } else
2103 +- invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
2104 ++ nfsi->cache_validity |= save_cache_validity &
2105 ++ (NFS_INO_INVALID_ATTR
2106 + | NFS_INO_REVAL_PAGECACHE
2107 + | NFS_INO_REVAL_FORCED);
2108 +
2109 +@@ -1616,7 +1619,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
2110 + if (fattr->valid & NFS_ATTR_FATTR_ATIME)
2111 + memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime));
2112 + else if (server->caps & NFS_CAP_ATIME)
2113 +- invalid |= save_cache_validity & (NFS_INO_INVALID_ATIME
2114 ++ nfsi->cache_validity |= save_cache_validity &
2115 ++ (NFS_INO_INVALID_ATIME
2116 + | NFS_INO_REVAL_FORCED);
2117 +
2118 + if (fattr->valid & NFS_ATTR_FATTR_MODE) {
2119 +@@ -1627,7 +1631,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
2120 + invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
2121 + }
2122 + } else if (server->caps & NFS_CAP_MODE)
2123 +- invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
2124 ++ nfsi->cache_validity |= save_cache_validity &
2125 ++ (NFS_INO_INVALID_ATTR
2126 + | NFS_INO_INVALID_ACCESS
2127 + | NFS_INO_INVALID_ACL
2128 + | NFS_INO_REVAL_FORCED);
2129 +@@ -1638,7 +1643,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
2130 + inode->i_uid = fattr->uid;
2131 + }
2132 + } else if (server->caps & NFS_CAP_OWNER)
2133 +- invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
2134 ++ nfsi->cache_validity |= save_cache_validity &
2135 ++ (NFS_INO_INVALID_ATTR
2136 + | NFS_INO_INVALID_ACCESS
2137 + | NFS_INO_INVALID_ACL
2138 + | NFS_INO_REVAL_FORCED);
2139 +@@ -1649,7 +1655,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
2140 + inode->i_gid = fattr->gid;
2141 + }
2142 + } else if (server->caps & NFS_CAP_OWNER_GROUP)
2143 +- invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
2144 ++ nfsi->cache_validity |= save_cache_validity &
2145 ++ (NFS_INO_INVALID_ATTR
2146 + | NFS_INO_INVALID_ACCESS
2147 + | NFS_INO_INVALID_ACL
2148 + | NFS_INO_REVAL_FORCED);
2149 +@@ -1662,7 +1669,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
2150 + set_nlink(inode, fattr->nlink);
2151 + }
2152 + } else if (server->caps & NFS_CAP_NLINK)
2153 +- invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
2154 ++ nfsi->cache_validity |= save_cache_validity &
2155 ++ (NFS_INO_INVALID_ATTR
2156 + | NFS_INO_REVAL_FORCED);
2157 +
2158 + if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
2159 +diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
2160 +index b9a35c05b60f..5e992fc51e61 100644
2161 +--- a/fs/nfs/nfs4filelayout.c
2162 ++++ b/fs/nfs/nfs4filelayout.c
2163 +@@ -1330,7 +1330,7 @@ filelayout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
2164 + struct nfs4_filelayout *flo;
2165 +
2166 + flo = kzalloc(sizeof(*flo), gfp_flags);
2167 +- return &flo->generic_hdr;
2168 ++ return flo != NULL ? &flo->generic_hdr : NULL;
2169 + }
2170 +
2171 + static void
2172 +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
2173 +index 2349518eef2c..21275148fc13 100644
2174 +--- a/fs/nfs/nfs4state.c
2175 ++++ b/fs/nfs/nfs4state.c
2176 +@@ -1456,7 +1456,7 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs
2177 + * server that doesn't support a grace period.
2178 + */
2179 + spin_lock(&sp->so_lock);
2180 +- write_seqcount_begin(&sp->so_reclaim_seqcount);
2181 ++ raw_write_seqcount_begin(&sp->so_reclaim_seqcount);
2182 + restart:
2183 + list_for_each_entry(state, &sp->so_states, open_states) {
2184 + if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
2185 +@@ -1519,13 +1519,13 @@ restart:
2186 + spin_lock(&sp->so_lock);
2187 + goto restart;
2188 + }
2189 +- write_seqcount_end(&sp->so_reclaim_seqcount);
2190 ++ raw_write_seqcount_end(&sp->so_reclaim_seqcount);
2191 + spin_unlock(&sp->so_lock);
2192 + return 0;
2193 + out_err:
2194 + nfs4_put_open_state(state);
2195 + spin_lock(&sp->so_lock);
2196 +- write_seqcount_end(&sp->so_reclaim_seqcount);
2197 ++ raw_write_seqcount_end(&sp->so_reclaim_seqcount);
2198 + spin_unlock(&sp->so_lock);
2199 + return status;
2200 + }
2201 +diff --git a/fs/nfs/super.c b/fs/nfs/super.c
2202 +index 2cb56943e232..104ef01d694d 100644
2203 +--- a/fs/nfs/super.c
2204 ++++ b/fs/nfs/super.c
2205 +@@ -2248,6 +2248,7 @@ nfs_remount(struct super_block *sb, int *flags, char *raw_data)
2206 + data->nfs_server.addrlen = nfss->nfs_client->cl_addrlen;
2207 + data->version = nfsvers;
2208 + data->minorversion = nfss->nfs_client->cl_minorversion;
2209 ++ data->net = current->nsproxy->net_ns;
2210 + memcpy(&data->nfs_server.address, &nfss->nfs_client->cl_addr,
2211 + data->nfs_server.addrlen);
2212 +
2213 +diff --git a/fs/nfs/write.c b/fs/nfs/write.c
2214 +index 9a3b6a4cd6b9..aaa16b31e21e 100644
2215 +--- a/fs/nfs/write.c
2216 ++++ b/fs/nfs/write.c
2217 +@@ -913,12 +913,14 @@ static bool nfs_write_pageuptodate(struct page *page, struct inode *inode)
2218 +
2219 + if (nfs_have_delegated_attributes(inode))
2220 + goto out;
2221 +- if (nfsi->cache_validity & (NFS_INO_INVALID_DATA|NFS_INO_REVAL_PAGECACHE))
2222 ++ if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
2223 + return false;
2224 + smp_rmb();
2225 + if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags))
2226 + return false;
2227 + out:
2228 ++ if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
2229 ++ return false;
2230 + return PageUptodate(page) != 0;
2231 + }
2232 +
2233 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
2234 +index 9a77a5a21557..6134ee283798 100644
2235 +--- a/fs/nfsd/nfs4state.c
2236 ++++ b/fs/nfsd/nfs4state.c
2237 +@@ -3726,7 +3726,7 @@ nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
2238 + * correspondance, and we have to delete the lockowner when we
2239 + * delete the lock stateid:
2240 + */
2241 +- unhash_lockowner(lo);
2242 ++ release_lockowner(lo);
2243 + return nfs_ok;
2244 + }
2245 +
2246 +diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
2247 +index 18881f34737a..b4c49588eada 100644
2248 +--- a/fs/nfsd/nfs4xdr.c
2249 ++++ b/fs/nfsd/nfs4xdr.c
2250 +@@ -2095,8 +2095,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
2251 + err = vfs_getattr(&path, &stat);
2252 + if (err)
2253 + goto out_nfserr;
2254 +- if ((bmval0 & (FATTR4_WORD0_FILES_FREE | FATTR4_WORD0_FILES_TOTAL |
2255 +- FATTR4_WORD0_MAXNAME)) ||
2256 ++ if ((bmval0 & (FATTR4_WORD0_FILES_AVAIL | FATTR4_WORD0_FILES_FREE |
2257 ++ FATTR4_WORD0_FILES_TOTAL | FATTR4_WORD0_MAXNAME)) ||
2258 + (bmval1 & (FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE |
2259 + FATTR4_WORD1_SPACE_TOTAL))) {
2260 + err = vfs_statfs(&path, &statfs);
2261 +diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
2262 +index bc8b8009897d..6e48eb0ff61d 100644
2263 +--- a/fs/reiserfs/inode.c
2264 ++++ b/fs/reiserfs/inode.c
2265 +@@ -3220,8 +3220,14 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
2266 + attr->ia_size != i_size_read(inode)) {
2267 + error = inode_newsize_ok(inode, attr->ia_size);
2268 + if (!error) {
2269 ++ /*
2270 ++ * Could race against reiserfs_file_release
2271 ++ * if called from NFS, so take tailpack mutex.
2272 ++ */
2273 ++ mutex_lock(&REISERFS_I(inode)->tailpack);
2274 + truncate_setsize(inode, attr->ia_size);
2275 +- reiserfs_vfs_truncate_file(inode);
2276 ++ reiserfs_truncate_file(inode, 1);
2277 ++ mutex_unlock(&REISERFS_I(inode)->tailpack);
2278 + }
2279 + }
2280 +
2281 +diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
2282 +index 4f34dbae823d..f7d48a08f443 100644
2283 +--- a/fs/ubifs/file.c
2284 ++++ b/fs/ubifs/file.c
2285 +@@ -1525,8 +1525,7 @@ static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma,
2286 + }
2287 +
2288 + wait_for_stable_page(page);
2289 +- unlock_page(page);
2290 +- return 0;
2291 ++ return VM_FAULT_LOCKED;
2292 +
2293 + out_unlock:
2294 + unlock_page(page);
2295 +diff --git a/fs/ubifs/shrinker.c b/fs/ubifs/shrinker.c
2296 +index f35135e28e96..9a9fb94a41c6 100644
2297 +--- a/fs/ubifs/shrinker.c
2298 ++++ b/fs/ubifs/shrinker.c
2299 +@@ -128,7 +128,6 @@ static int shrink_tnc(struct ubifs_info *c, int nr, int age, int *contention)
2300 + freed = ubifs_destroy_tnc_subtree(znode);
2301 + atomic_long_sub(freed, &ubifs_clean_zn_cnt);
2302 + atomic_long_sub(freed, &c->clean_zn_cnt);
2303 +- ubifs_assert(atomic_long_read(&c->clean_zn_cnt) >= 0);
2304 + total_freed += freed;
2305 + znode = zprev;
2306 + }
2307 +diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
2308 +index 944f3d9456a8..a9e29ea37620 100644
2309 +--- a/fs/xfs/xfs_mount.c
2310 ++++ b/fs/xfs/xfs_mount.c
2311 +@@ -323,8 +323,19 @@ reread:
2312 + /*
2313 + * Initialize the mount structure from the superblock.
2314 + */
2315 +- xfs_sb_from_disk(&mp->m_sb, XFS_BUF_TO_SBP(bp));
2316 +- xfs_sb_quota_from_disk(&mp->m_sb);
2317 ++ xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
2318 ++ xfs_sb_quota_from_disk(sbp);
2319 ++
2320 ++ /*
2321 ++ * If we haven't validated the superblock, do so now before we try
2322 ++ * to check the sector size and reread the superblock appropriately.
2323 ++ */
2324 ++ if (sbp->sb_magicnum != XFS_SB_MAGIC) {
2325 ++ if (loud)
2326 ++ xfs_warn(mp, "Invalid superblock magic number");
2327 ++ error = EINVAL;
2328 ++ goto release_buf;
2329 ++ }
2330 +
2331 + /*
2332 + * We must be able to do sector-sized and sector-aligned IO.
2333 +@@ -337,11 +348,11 @@ reread:
2334 + goto release_buf;
2335 + }
2336 +
2337 +- /*
2338 +- * Re-read the superblock so the buffer is correctly sized,
2339 +- * and properly verified.
2340 +- */
2341 + if (buf_ops == NULL) {
2342 ++ /*
2343 ++ * Re-read the superblock so the buffer is correctly sized,
2344 ++ * and properly verified.
2345 ++ */
2346 + xfs_buf_relse(bp);
2347 + sector_size = sbp->sb_sectsize;
2348 + buf_ops = loud ? &xfs_sb_buf_ops : &xfs_sb_quiet_buf_ops;
2349 +diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
2350 +index 077904c8b70d..cc79eff4a1ad 100644
2351 +--- a/include/linux/ptrace.h
2352 ++++ b/include/linux/ptrace.h
2353 +@@ -334,6 +334,9 @@ static inline void user_single_step_siginfo(struct task_struct *tsk,
2354 + * calling arch_ptrace_stop() when it would be superfluous. For example,
2355 + * if the thread has not been back to user mode since the last stop, the
2356 + * thread state might indicate that nothing needs to be done.
2357 ++ *
2358 ++ * This is guaranteed to be invoked once before a task stops for ptrace and
2359 ++ * may include arch-specific operations necessary prior to a ptrace stop.
2360 + */
2361 + #define arch_ptrace_stop_needed(code, info) (0)
2362 + #endif
2363 +diff --git a/include/trace/syscall.h b/include/trace/syscall.h
2364 +index fed853f3d7aa..9674145e2f6a 100644
2365 +--- a/include/trace/syscall.h
2366 ++++ b/include/trace/syscall.h
2367 +@@ -4,6 +4,7 @@
2368 + #include <linux/tracepoint.h>
2369 + #include <linux/unistd.h>
2370 + #include <linux/ftrace_event.h>
2371 ++#include <linux/thread_info.h>
2372 +
2373 + #include <asm/ptrace.h>
2374 +
2375 +@@ -32,4 +33,18 @@ struct syscall_metadata {
2376 + struct ftrace_event_call *exit_event;
2377 + };
2378 +
2379 ++#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS)
2380 ++static inline void syscall_tracepoint_update(struct task_struct *p)
2381 ++{
2382 ++ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
2383 ++ set_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT);
2384 ++ else
2385 ++ clear_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT);
2386 ++}
2387 ++#else
2388 ++static inline void syscall_tracepoint_update(struct task_struct *p)
2389 ++{
2390 ++}
2391 ++#endif
2392 ++
2393 + #endif /* _TRACE_SYSCALL_H */
2394 +diff --git a/kernel/fork.c b/kernel/fork.c
2395 +index 142904349fb5..68b92262dc45 100644
2396 +--- a/kernel/fork.c
2397 ++++ b/kernel/fork.c
2398 +@@ -1487,7 +1487,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
2399 +
2400 + total_forks++;
2401 + spin_unlock(&current->sighand->siglock);
2402 ++ syscall_tracepoint_update(p);
2403 + write_unlock_irq(&tasklist_lock);
2404 ++
2405 + proc_fork_connector(p);
2406 + cgroup_post_fork(p);
2407 + if (clone_flags & CLONE_THREAD)
2408 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
2409 +index 737b0efa1a62..e916972c6d87 100644
2410 +--- a/kernel/trace/trace.c
2411 ++++ b/kernel/trace/trace.c
2412 +@@ -1461,12 +1461,12 @@ static void tracing_stop_tr(struct trace_array *tr)
2413 +
2414 + void trace_stop_cmdline_recording(void);
2415 +
2416 +-static void trace_save_cmdline(struct task_struct *tsk)
2417 ++static int trace_save_cmdline(struct task_struct *tsk)
2418 + {
2419 + unsigned pid, idx;
2420 +
2421 + if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
2422 +- return;
2423 ++ return 0;
2424 +
2425 + /*
2426 + * It's not the end of the world if we don't get
2427 +@@ -1475,7 +1475,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
2428 + * so if we miss here, then better luck next time.
2429 + */
2430 + if (!arch_spin_trylock(&trace_cmdline_lock))
2431 +- return;
2432 ++ return 0;
2433 +
2434 + idx = map_pid_to_cmdline[tsk->pid];
2435 + if (idx == NO_CMDLINE_MAP) {
2436 +@@ -1500,6 +1500,8 @@ static void trace_save_cmdline(struct task_struct *tsk)
2437 + memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
2438 +
2439 + arch_spin_unlock(&trace_cmdline_lock);
2440 ++
2441 ++ return 1;
2442 + }
2443 +
2444 + void trace_find_cmdline(int pid, char comm[])
2445 +@@ -1541,9 +1543,8 @@ void tracing_record_cmdline(struct task_struct *tsk)
2446 + if (!__this_cpu_read(trace_cmdline_save))
2447 + return;
2448 +
2449 +- __this_cpu_write(trace_cmdline_save, false);
2450 +-
2451 +- trace_save_cmdline(tsk);
2452 ++ if (trace_save_cmdline(tsk))
2453 ++ __this_cpu_write(trace_cmdline_save, false);
2454 + }
2455 +
2456 + void
2457 +diff --git a/kernel/watchdog.c b/kernel/watchdog.c
2458 +index 516203e665fc..30e482240dae 100644
2459 +--- a/kernel/watchdog.c
2460 ++++ b/kernel/watchdog.c
2461 +@@ -527,10 +527,8 @@ static void update_timers_all_cpus(void)
2462 + int cpu;
2463 +
2464 + get_online_cpus();
2465 +- preempt_disable();
2466 + for_each_online_cpu(cpu)
2467 + update_timers(cpu);
2468 +- preempt_enable();
2469 + put_online_cpus();
2470 + }
2471 +
2472 +diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
2473 +index b74da447e81e..7a85967060a5 100644
2474 +--- a/lib/lz4/lz4_decompress.c
2475 ++++ b/lib/lz4/lz4_decompress.c
2476 +@@ -192,6 +192,8 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
2477 + int s = 255;
2478 + while ((ip < iend) && (s == 255)) {
2479 + s = *ip++;
2480 ++ if (unlikely(length > (size_t)(length + s)))
2481 ++ goto _output_error;
2482 + length += s;
2483 + }
2484 + }
2485 +@@ -232,6 +234,8 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
2486 + if (length == ML_MASK) {
2487 + while (ip < iend) {
2488 + int s = *ip++;
2489 ++ if (unlikely(length > (size_t)(length + s)))
2490 ++ goto _output_error;
2491 + length += s;
2492 + if (s == 255)
2493 + continue;
2494 +@@ -284,7 +288,7 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
2495 +
2496 + /* write overflow error detected */
2497 + _output_error:
2498 +- return (int) (-(((char *) ip) - source));
2499 ++ return -1;
2500 + }
2501 +
2502 + int lz4_decompress(const unsigned char *src, size_t *src_len,
2503 +diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
2504 +index 06c6ff0cb911..a4acaf2bcf18 100644
2505 +--- a/net/sunrpc/svc_xprt.c
2506 ++++ b/net/sunrpc/svc_xprt.c
2507 +@@ -730,6 +730,8 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
2508 + newxpt = xprt->xpt_ops->xpo_accept(xprt);
2509 + if (newxpt)
2510 + svc_add_new_temp_xprt(serv, newxpt);
2511 ++ else
2512 ++ module_put(xprt->xpt_class->xcl_owner);
2513 + } else if (xprt->xpt_ops->xpo_has_wspace(xprt)) {
2514 + /* XPT_DATA|XPT_DEFERRED case: */
2515 + dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
2516 +diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
2517 +index 9d1421e63ff8..49b582a225b0 100644
2518 +--- a/scripts/recordmcount.h
2519 ++++ b/scripts/recordmcount.h
2520 +@@ -163,11 +163,11 @@ static int mcount_adjust = 0;
2521 +
2522 + static int MIPS_is_fake_mcount(Elf_Rel const *rp)
2523 + {
2524 +- static Elf_Addr old_r_offset;
2525 ++ static Elf_Addr old_r_offset = ~(Elf_Addr)0;
2526 + Elf_Addr current_r_offset = _w(rp->r_offset);
2527 + int is_fake;
2528 +
2529 +- is_fake = old_r_offset &&
2530 ++ is_fake = (old_r_offset != ~(Elf_Addr)0) &&
2531 + (current_r_offset - old_r_offset == MIPS_FAKEMCOUNT_OFFSET);
2532 + old_r_offset = current_r_offset;
2533 +
2534 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
2535 +index 6cc3cf285558..0176cf0e01d9 100644
2536 +--- a/sound/pci/hda/hda_intel.c
2537 ++++ b/sound/pci/hda/hda_intel.c
2538 +@@ -282,6 +282,24 @@ static char *driver_short_names[] = {
2539 + [AZX_DRIVER_GENERIC] = "HD-Audio Generic",
2540 + };
2541 +
2542 ++
2543 ++/* Intel HSW/BDW display HDA controller Extended Mode registers.
2544 ++ * EM4 (M value) and EM5 (N Value) are used to convert CDClk (Core Display
2545 ++ * Clock) to 24MHz BCLK: BCLK = CDCLK * M / N
2546 ++ * The values will be lost when the display power well is disabled.
2547 ++ */
2548 ++#define ICH6_REG_EM4 0x100c
2549 ++#define ICH6_REG_EM5 0x1010
2550 ++
2551 ++struct hda_intel {
2552 ++ struct azx chip;
2553 ++
2554 ++ /* HSW/BDW display HDA controller to restore BCLK from CDCLK */
2555 ++ unsigned int bclk_m;
2556 ++ unsigned int bclk_n;
2557 ++};
2558 ++
2559 ++
2560 + #ifdef CONFIG_X86
2561 + static void __mark_pages_wc(struct azx *chip, struct snd_dma_buffer *dmab, bool on)
2562 + {
2563 +@@ -574,6 +592,22 @@ static int param_set_xint(const char *val, const struct kernel_param *kp)
2564 + #define azx_del_card_list(chip) /* NOP */
2565 + #endif /* CONFIG_PM */
2566 +
2567 ++static void haswell_save_bclk(struct azx *chip)
2568 ++{
2569 ++ struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
2570 ++
2571 ++ hda->bclk_m = azx_readw(chip, EM4);
2572 ++ hda->bclk_n = azx_readw(chip, EM5);
2573 ++}
2574 ++
2575 ++static void haswell_restore_bclk(struct azx *chip)
2576 ++{
2577 ++ struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
2578 ++
2579 ++ azx_writew(chip, EM4, hda->bclk_m);
2580 ++ azx_writew(chip, EM5, hda->bclk_n);
2581 ++}
2582 ++
2583 + #if defined(CONFIG_PM_SLEEP) || defined(SUPPORT_VGA_SWITCHEROO)
2584 + /*
2585 + * power management
2586 +@@ -600,6 +634,13 @@ static int azx_suspend(struct device *dev)
2587 + free_irq(chip->irq, chip);
2588 + chip->irq = -1;
2589 + }
2590 ++
2591 ++ /* Save BCLK M/N values before they become invalid in D3.
2592 ++ * Will test if display power well can be released now.
2593 ++ */
2594 ++ if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
2595 ++ haswell_save_bclk(chip);
2596 ++
2597 + if (chip->msi)
2598 + pci_disable_msi(chip->pci);
2599 + pci_disable_device(pci);
2600 +@@ -619,8 +660,10 @@ static int azx_resume(struct device *dev)
2601 + if (chip->disabled)
2602 + return 0;
2603 +
2604 +- if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
2605 ++ if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
2606 + hda_display_power(true);
2607 ++ haswell_restore_bclk(chip);
2608 ++ }
2609 + pci_set_power_state(pci, PCI_D0);
2610 + pci_restore_state(pci);
2611 + if (pci_enable_device(pci) < 0) {
2612 +@@ -664,8 +707,10 @@ static int azx_runtime_suspend(struct device *dev)
2613 + azx_stop_chip(chip);
2614 + azx_enter_link_reset(chip);
2615 + azx_clear_irq_pending(chip);
2616 +- if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
2617 ++ if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
2618 ++ haswell_save_bclk(chip);
2619 + hda_display_power(false);
2620 ++ }
2621 + return 0;
2622 + }
2623 +
2624 +@@ -683,8 +728,10 @@ static int azx_runtime_resume(struct device *dev)
2625 + if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
2626 + return 0;
2627 +
2628 +- if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
2629 ++ if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
2630 + hda_display_power(true);
2631 ++ haswell_restore_bclk(chip);
2632 ++ }
2633 +
2634 + /* Read STATESTS before controller reset */
2635 + status = azx_readw(chip, STATESTS);
2636 +@@ -877,6 +924,8 @@ static int register_vga_switcheroo(struct azx *chip)
2637 + static int azx_free(struct azx *chip)
2638 + {
2639 + struct pci_dev *pci = chip->pci;
2640 ++ struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
2641 ++
2642 + int i;
2643 +
2644 + if ((chip->driver_caps & AZX_DCAPS_PM_RUNTIME)
2645 +@@ -924,7 +973,7 @@ static int azx_free(struct azx *chip)
2646 + hda_display_power(false);
2647 + hda_i915_exit();
2648 + }
2649 +- kfree(chip);
2650 ++ kfree(hda);
2651 +
2652 + return 0;
2653 + }
2654 +@@ -1168,6 +1217,7 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
2655 + static struct snd_device_ops ops = {
2656 + .dev_free = azx_dev_free,
2657 + };
2658 ++ struct hda_intel *hda;
2659 + struct azx *chip;
2660 + int err;
2661 +
2662 +@@ -1177,13 +1227,14 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
2663 + if (err < 0)
2664 + return err;
2665 +
2666 +- chip = kzalloc(sizeof(*chip), GFP_KERNEL);
2667 +- if (!chip) {
2668 +- dev_err(card->dev, "Cannot allocate chip\n");
2669 ++ hda = kzalloc(sizeof(*hda), GFP_KERNEL);
2670 ++ if (!hda) {
2671 ++ dev_err(card->dev, "Cannot allocate hda\n");
2672 + pci_disable_device(pci);
2673 + return -ENOMEM;
2674 + }
2675 +
2676 ++ chip = &hda->chip;
2677 + spin_lock_init(&chip->reg_lock);
2678 + mutex_init(&chip->open_mutex);
2679 + chip->card = card;
2680 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
2681 +index 8867ab3a71d4..bce551293e2a 100644
2682 +--- a/sound/pci/hda/patch_hdmi.c
2683 ++++ b/sound/pci/hda/patch_hdmi.c
2684 +@@ -2208,7 +2208,7 @@ static int generic_hdmi_resume(struct hda_codec *codec)
2685 + struct hdmi_spec *spec = codec->spec;
2686 + int pin_idx;
2687 +
2688 +- generic_hdmi_init(codec);
2689 ++ codec->patch_ops.init(codec);
2690 + snd_hda_codec_resume_amp(codec);
2691 + snd_hda_codec_resume_cache(codec);
2692 +
2693 +diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
2694 +index 75515b494034..37710495fa0a 100644
2695 +--- a/sound/pci/hda/patch_sigmatel.c
2696 ++++ b/sound/pci/hda/patch_sigmatel.c
2697 +@@ -122,6 +122,12 @@ enum {
2698 + };
2699 +
2700 + enum {
2701 ++ STAC_92HD95_HP_LED,
2702 ++ STAC_92HD95_HP_BASS,
2703 ++ STAC_92HD95_MODELS
2704 ++};
2705 ++
2706 ++enum {
2707 + STAC_925x_REF,
2708 + STAC_M1,
2709 + STAC_M1_2,
2710 +@@ -4128,6 +4134,48 @@ static const struct snd_pci_quirk stac9205_fixup_tbl[] = {
2711 + {} /* terminator */
2712 + };
2713 +
2714 ++static void stac92hd95_fixup_hp_led(struct hda_codec *codec,
2715 ++ const struct hda_fixup *fix, int action)
2716 ++{
2717 ++ struct sigmatel_spec *spec = codec->spec;
2718 ++
2719 ++ if (action != HDA_FIXUP_ACT_PRE_PROBE)
2720 ++ return;
2721 ++
2722 ++ if (find_mute_led_cfg(codec, spec->default_polarity))
2723 ++ codec_dbg(codec, "mute LED gpio %d polarity %d\n",
2724 ++ spec->gpio_led,
2725 ++ spec->gpio_led_polarity);
2726 ++}
2727 ++
2728 ++static const struct hda_fixup stac92hd95_fixups[] = {
2729 ++ [STAC_92HD95_HP_LED] = {
2730 ++ .type = HDA_FIXUP_FUNC,
2731 ++ .v.func = stac92hd95_fixup_hp_led,
2732 ++ },
2733 ++ [STAC_92HD95_HP_BASS] = {
2734 ++ .type = HDA_FIXUP_VERBS,
2735 ++ .v.verbs = (const struct hda_verb[]) {
2736 ++ {0x1a, 0x795, 0x00}, /* HPF to 100Hz */
2737 ++ {}
2738 ++ },
2739 ++ .chained = true,
2740 ++ .chain_id = STAC_92HD95_HP_LED,
2741 ++ },
2742 ++};
2743 ++
2744 ++static const struct snd_pci_quirk stac92hd95_fixup_tbl[] = {
2745 ++ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1911, "HP Spectre 13", STAC_92HD95_HP_BASS),
2746 ++ {} /* terminator */
2747 ++};
2748 ++
2749 ++static const struct hda_model_fixup stac92hd95_models[] = {
2750 ++ { .id = STAC_92HD95_HP_LED, .name = "hp-led" },
2751 ++ { .id = STAC_92HD95_HP_BASS, .name = "hp-bass" },
2752 ++ {}
2753 ++};
2754 ++
2755 ++
2756 + static int stac_parse_auto_config(struct hda_codec *codec)
2757 + {
2758 + struct sigmatel_spec *spec = codec->spec;
2759 +@@ -4580,10 +4628,16 @@ static int patch_stac92hd95(struct hda_codec *codec)
2760 + spec->gen.beep_nid = 0x19; /* digital beep */
2761 + spec->pwr_nids = stac92hd95_pwr_nids;
2762 + spec->num_pwrs = ARRAY_SIZE(stac92hd95_pwr_nids);
2763 +- spec->default_polarity = -1; /* no default cfg */
2764 ++ spec->default_polarity = 0;
2765 +
2766 + codec->patch_ops = stac_patch_ops;
2767 +
2768 ++ snd_hda_pick_fixup(codec, stac92hd95_models, stac92hd95_fixup_tbl,
2769 ++ stac92hd95_fixups);
2770 ++ snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
2771 ++
2772 ++ stac_setup_gpio(codec);
2773 ++
2774 + err = stac_parse_auto_config(codec);
2775 + if (err < 0) {
2776 + stac_free(codec);
2777 +@@ -4592,6 +4646,8 @@ static int patch_stac92hd95(struct hda_codec *codec)
2778 +
2779 + codec->proc_widget_hook = stac92hd_proc_hook;
2780 +
2781 ++ snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PROBE);
2782 ++
2783 + return 0;
2784 + }
2785 +
2786 +diff --git a/sound/usb/card.c b/sound/usb/card.c
2787 +index c3b5b7dca1c3..a09e5f3519e3 100644
2788 +--- a/sound/usb/card.c
2789 ++++ b/sound/usb/card.c
2790 +@@ -307,6 +307,11 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
2791 +
2792 + static int snd_usb_audio_free(struct snd_usb_audio *chip)
2793 + {
2794 ++ struct list_head *p, *n;
2795 ++
2796 ++ list_for_each_safe(p, n, &chip->ep_list)
2797 ++ snd_usb_endpoint_free(p);
2798 ++
2799 + mutex_destroy(&chip->mutex);
2800 + kfree(chip);
2801 + return 0;
2802 +@@ -585,7 +590,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
2803 + struct snd_usb_audio *chip)
2804 + {
2805 + struct snd_card *card;
2806 +- struct list_head *p, *n;
2807 ++ struct list_head *p;
2808 +
2809 + if (chip == (void *)-1L)
2810 + return;
2811 +@@ -598,14 +603,16 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
2812 + mutex_lock(&register_mutex);
2813 + chip->num_interfaces--;
2814 + if (chip->num_interfaces <= 0) {
2815 ++ struct snd_usb_endpoint *ep;
2816 ++
2817 + snd_card_disconnect(card);
2818 + /* release the pcm resources */
2819 + list_for_each(p, &chip->pcm_list) {
2820 + snd_usb_stream_disconnect(p);
2821 + }
2822 + /* release the endpoint resources */
2823 +- list_for_each_safe(p, n, &chip->ep_list) {
2824 +- snd_usb_endpoint_free(p);
2825 ++ list_for_each_entry(ep, &chip->ep_list, list) {
2826 ++ snd_usb_endpoint_release(ep);
2827 + }
2828 + /* release the midi resources */
2829 + list_for_each(p, &chip->midi_list) {
2830 +diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
2831 +index 289f582c9130..114e3e7ff511 100644
2832 +--- a/sound/usb/endpoint.c
2833 ++++ b/sound/usb/endpoint.c
2834 +@@ -987,19 +987,30 @@ void snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep)
2835 + }
2836 +
2837 + /**
2838 ++ * snd_usb_endpoint_release: Tear down an snd_usb_endpoint
2839 ++ *
2840 ++ * @ep: the endpoint to release
2841 ++ *
2842 ++ * This function does not care for the endpoint's use count but will tear
2843 ++ * down all the streaming URBs immediately.
2844 ++ */
2845 ++void snd_usb_endpoint_release(struct snd_usb_endpoint *ep)
2846 ++{
2847 ++ release_urbs(ep, 1);
2848 ++}
2849 ++
2850 ++/**
2851 + * snd_usb_endpoint_free: Free the resources of an snd_usb_endpoint
2852 + *
2853 + * @ep: the list header of the endpoint to free
2854 + *
2855 +- * This function does not care for the endpoint's use count but will tear
2856 +- * down all the streaming URBs immediately and free all resources.
2857 ++ * This free all resources of the given ep.
2858 + */
2859 + void snd_usb_endpoint_free(struct list_head *head)
2860 + {
2861 + struct snd_usb_endpoint *ep;
2862 +
2863 + ep = list_entry(head, struct snd_usb_endpoint, list);
2864 +- release_urbs(ep, 1);
2865 + kfree(ep);
2866 + }
2867 +
2868 +diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h
2869 +index 1c7e8ee48abc..e61ee5c356a3 100644
2870 +--- a/sound/usb/endpoint.h
2871 ++++ b/sound/usb/endpoint.h
2872 +@@ -23,6 +23,7 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep);
2873 + void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep);
2874 + int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep);
2875 + void snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep);
2876 ++void snd_usb_endpoint_release(struct snd_usb_endpoint *ep);
2877 + void snd_usb_endpoint_free(struct list_head *head);
2878 +
2879 + int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep);