Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Fri, 28 Oct 2016 18:27:28
Message-Id: 1477679184.ae1e8223edd642dd90a57647ff6ad861df23e5fc.alicef@gentoo
1 commit: ae1e8223edd642dd90a57647ff6ad861df23e5fc
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Fri Oct 28 18:26:24 2016 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Fri Oct 28 18:26:24 2016 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ae1e8223
7
8 Linux patch 4.4.28
9
10 0000_README | 4 +
11 1027_linux-4.4.28.patch | 4828 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 4832 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 148210e..356c33e 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -151,6 +151,10 @@ Patch: 1026_linux-4.4.27.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.4.27
21
22 +Patch: 1027_linux-4.4.28.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.4.28
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1027_linux-4.4.28.patch b/1027_linux-4.4.28.patch
31 new file mode 100644
32 index 0000000..a653d5d
33 --- /dev/null
34 +++ b/1027_linux-4.4.28.patch
35 @@ -0,0 +1,4828 @@
36 +diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
37 +index 0e4102ae1a61..c360f80c3473 100644
38 +--- a/Documentation/kernel-parameters.txt
39 ++++ b/Documentation/kernel-parameters.txt
40 +@@ -1371,7 +1371,14 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
41 + i8042.nopnp [HW] Don't use ACPIPnP / PnPBIOS to discover KBD/AUX
42 + controllers
43 + i8042.notimeout [HW] Ignore timeout condition signalled by controller
44 +- i8042.reset [HW] Reset the controller during init and cleanup
45 ++ i8042.reset [HW] Reset the controller during init, cleanup and
46 ++ suspend-to-ram transitions, only during s2r
47 ++ transitions, or never reset
48 ++ Format: { 1 | Y | y | 0 | N | n }
49 ++ 1, Y, y: always reset controller
50 ++ 0, N, n: don't ever reset controller
51 ++ Default: only on s2r transitions on x86; most other
52 ++ architectures force reset to be always executed
53 + i8042.unlock [HW] Unlock (ignore) the keylock
54 + i8042.kbdreset [HW] Reset device connected to KBD port
55 +
56 +diff --git a/Makefile b/Makefile
57 +index b6ee4ce561f8..391294301aaf 100644
58 +--- a/Makefile
59 ++++ b/Makefile
60 +@@ -1,6 +1,6 @@
61 + VERSION = 4
62 + PATCHLEVEL = 4
63 +-SUBLEVEL = 27
64 ++SUBLEVEL = 28
65 + EXTRAVERSION =
66 + NAME = Blurry Fish Butt
67 +
68 +diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
69 +index 004b7f0bc76c..257b8699efde 100644
70 +--- a/arch/arc/kernel/signal.c
71 ++++ b/arch/arc/kernel/signal.c
72 +@@ -107,13 +107,13 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
73 + struct user_regs_struct uregs;
74 +
75 + err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
76 +- if (!err)
77 +- set_current_blocked(&set);
78 +-
79 + err |= __copy_from_user(&uregs.scratch,
80 + &(sf->uc.uc_mcontext.regs.scratch),
81 + sizeof(sf->uc.uc_mcontext.regs.scratch));
82 ++ if (err)
83 ++ return err;
84 +
85 ++ set_current_blocked(&set);
86 + regs->bta = uregs.scratch.bta;
87 + regs->lp_start = uregs.scratch.lp_start;
88 + regs->lp_end = uregs.scratch.lp_end;
89 +@@ -138,7 +138,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
90 + regs->r0 = uregs.scratch.r0;
91 + regs->sp = uregs.scratch.sp;
92 +
93 +- return err;
94 ++ return 0;
95 + }
96 +
97 + static inline int is_do_ss_needed(unsigned int magic)
98 +diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
99 +index 0a456bef8c79..8a336852eeba 100644
100 +--- a/arch/arm64/include/asm/percpu.h
101 ++++ b/arch/arm64/include/asm/percpu.h
102 +@@ -44,48 +44,44 @@ static inline unsigned long __percpu_##op(void *ptr, \
103 + \
104 + switch (size) { \
105 + case 1: \
106 +- do { \
107 +- asm ("//__per_cpu_" #op "_1\n" \
108 +- "ldxrb %w[ret], %[ptr]\n" \
109 ++ asm ("//__per_cpu_" #op "_1\n" \
110 ++ "1: ldxrb %w[ret], %[ptr]\n" \
111 + #asm_op " %w[ret], %w[ret], %w[val]\n" \
112 +- "stxrb %w[loop], %w[ret], %[ptr]\n" \
113 +- : [loop] "=&r" (loop), [ret] "=&r" (ret), \
114 +- [ptr] "+Q"(*(u8 *)ptr) \
115 +- : [val] "Ir" (val)); \
116 +- } while (loop); \
117 ++ " stxrb %w[loop], %w[ret], %[ptr]\n" \
118 ++ " cbnz %w[loop], 1b" \
119 ++ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
120 ++ [ptr] "+Q"(*(u8 *)ptr) \
121 ++ : [val] "Ir" (val)); \
122 + break; \
123 + case 2: \
124 +- do { \
125 +- asm ("//__per_cpu_" #op "_2\n" \
126 +- "ldxrh %w[ret], %[ptr]\n" \
127 ++ asm ("//__per_cpu_" #op "_2\n" \
128 ++ "1: ldxrh %w[ret], %[ptr]\n" \
129 + #asm_op " %w[ret], %w[ret], %w[val]\n" \
130 +- "stxrh %w[loop], %w[ret], %[ptr]\n" \
131 +- : [loop] "=&r" (loop), [ret] "=&r" (ret), \
132 +- [ptr] "+Q"(*(u16 *)ptr) \
133 +- : [val] "Ir" (val)); \
134 +- } while (loop); \
135 ++ " stxrh %w[loop], %w[ret], %[ptr]\n" \
136 ++ " cbnz %w[loop], 1b" \
137 ++ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
138 ++ [ptr] "+Q"(*(u16 *)ptr) \
139 ++ : [val] "Ir" (val)); \
140 + break; \
141 + case 4: \
142 +- do { \
143 +- asm ("//__per_cpu_" #op "_4\n" \
144 +- "ldxr %w[ret], %[ptr]\n" \
145 ++ asm ("//__per_cpu_" #op "_4\n" \
146 ++ "1: ldxr %w[ret], %[ptr]\n" \
147 + #asm_op " %w[ret], %w[ret], %w[val]\n" \
148 +- "stxr %w[loop], %w[ret], %[ptr]\n" \
149 +- : [loop] "=&r" (loop), [ret] "=&r" (ret), \
150 +- [ptr] "+Q"(*(u32 *)ptr) \
151 +- : [val] "Ir" (val)); \
152 +- } while (loop); \
153 ++ " stxr %w[loop], %w[ret], %[ptr]\n" \
154 ++ " cbnz %w[loop], 1b" \
155 ++ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
156 ++ [ptr] "+Q"(*(u32 *)ptr) \
157 ++ : [val] "Ir" (val)); \
158 + break; \
159 + case 8: \
160 +- do { \
161 +- asm ("//__per_cpu_" #op "_8\n" \
162 +- "ldxr %[ret], %[ptr]\n" \
163 ++ asm ("//__per_cpu_" #op "_8\n" \
164 ++ "1: ldxr %[ret], %[ptr]\n" \
165 + #asm_op " %[ret], %[ret], %[val]\n" \
166 +- "stxr %w[loop], %[ret], %[ptr]\n" \
167 +- : [loop] "=&r" (loop), [ret] "=&r" (ret), \
168 +- [ptr] "+Q"(*(u64 *)ptr) \
169 +- : [val] "Ir" (val)); \
170 +- } while (loop); \
171 ++ " stxr %w[loop], %[ret], %[ptr]\n" \
172 ++ " cbnz %w[loop], 1b" \
173 ++ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
174 ++ [ptr] "+Q"(*(u64 *)ptr) \
175 ++ : [val] "Ir" (val)); \
176 + break; \
177 + default: \
178 + BUILD_BUG(); \
179 +@@ -150,44 +146,40 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
180 +
181 + switch (size) {
182 + case 1:
183 +- do {
184 +- asm ("//__percpu_xchg_1\n"
185 +- "ldxrb %w[ret], %[ptr]\n"
186 +- "stxrb %w[loop], %w[val], %[ptr]\n"
187 +- : [loop] "=&r"(loop), [ret] "=&r"(ret),
188 +- [ptr] "+Q"(*(u8 *)ptr)
189 +- : [val] "r" (val));
190 +- } while (loop);
191 ++ asm ("//__percpu_xchg_1\n"
192 ++ "1: ldxrb %w[ret], %[ptr]\n"
193 ++ " stxrb %w[loop], %w[val], %[ptr]\n"
194 ++ " cbnz %w[loop], 1b"
195 ++ : [loop] "=&r"(loop), [ret] "=&r"(ret),
196 ++ [ptr] "+Q"(*(u8 *)ptr)
197 ++ : [val] "r" (val));
198 + break;
199 + case 2:
200 +- do {
201 +- asm ("//__percpu_xchg_2\n"
202 +- "ldxrh %w[ret], %[ptr]\n"
203 +- "stxrh %w[loop], %w[val], %[ptr]\n"
204 +- : [loop] "=&r"(loop), [ret] "=&r"(ret),
205 +- [ptr] "+Q"(*(u16 *)ptr)
206 +- : [val] "r" (val));
207 +- } while (loop);
208 ++ asm ("//__percpu_xchg_2\n"
209 ++ "1: ldxrh %w[ret], %[ptr]\n"
210 ++ " stxrh %w[loop], %w[val], %[ptr]\n"
211 ++ " cbnz %w[loop], 1b"
212 ++ : [loop] "=&r"(loop), [ret] "=&r"(ret),
213 ++ [ptr] "+Q"(*(u16 *)ptr)
214 ++ : [val] "r" (val));
215 + break;
216 + case 4:
217 +- do {
218 +- asm ("//__percpu_xchg_4\n"
219 +- "ldxr %w[ret], %[ptr]\n"
220 +- "stxr %w[loop], %w[val], %[ptr]\n"
221 +- : [loop] "=&r"(loop), [ret] "=&r"(ret),
222 +- [ptr] "+Q"(*(u32 *)ptr)
223 +- : [val] "r" (val));
224 +- } while (loop);
225 ++ asm ("//__percpu_xchg_4\n"
226 ++ "1: ldxr %w[ret], %[ptr]\n"
227 ++ " stxr %w[loop], %w[val], %[ptr]\n"
228 ++ " cbnz %w[loop], 1b"
229 ++ : [loop] "=&r"(loop), [ret] "=&r"(ret),
230 ++ [ptr] "+Q"(*(u32 *)ptr)
231 ++ : [val] "r" (val));
232 + break;
233 + case 8:
234 +- do {
235 +- asm ("//__percpu_xchg_8\n"
236 +- "ldxr %[ret], %[ptr]\n"
237 +- "stxr %w[loop], %[val], %[ptr]\n"
238 +- : [loop] "=&r"(loop), [ret] "=&r"(ret),
239 +- [ptr] "+Q"(*(u64 *)ptr)
240 +- : [val] "r" (val));
241 +- } while (loop);
242 ++ asm ("//__percpu_xchg_8\n"
243 ++ "1: ldxr %[ret], %[ptr]\n"
244 ++ " stxr %w[loop], %[val], %[ptr]\n"
245 ++ " cbnz %w[loop], 1b"
246 ++ : [loop] "=&r"(loop), [ret] "=&r"(ret),
247 ++ [ptr] "+Q"(*(u64 *)ptr)
248 ++ : [val] "r" (val));
249 + break;
250 + default:
251 + BUILD_BUG();
252 +diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
253 +index b685257926f0..20ceb5edf7b8 100644
254 +--- a/arch/arm64/kernel/head.S
255 ++++ b/arch/arm64/kernel/head.S
256 +@@ -518,8 +518,9 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
257 + b.lt 4f // Skip if no PMU present
258 + mrs x0, pmcr_el0 // Disable debug access traps
259 + ubfx x0, x0, #11, #5 // to EL2 and allow access to
260 +- msr mdcr_el2, x0 // all PMU counters from EL1
261 + 4:
262 ++ csel x0, xzr, x0, lt // all PMU counters from EL1
263 ++ msr mdcr_el2, x0 // (if they exist)
264 +
265 + /* Stage-2 translation */
266 + msr vttbr_el2, xzr
267 +diff --git a/arch/metag/include/asm/atomic.h b/arch/metag/include/asm/atomic.h
268 +index 470e365f04ea..8ff0a70865f6 100644
269 +--- a/arch/metag/include/asm/atomic.h
270 ++++ b/arch/metag/include/asm/atomic.h
271 +@@ -39,11 +39,10 @@
272 + #define atomic_dec(v) atomic_sub(1, (v))
273 +
274 + #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
275 ++#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
276 +
277 + #endif
278 +
279 +-#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
280 +-
281 + #include <asm-generic/atomic64.h>
282 +
283 + #endif /* __ASM_METAG_ATOMIC_H */
284 +diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
285 +index f6fc6aac5496..b6578611dddb 100644
286 +--- a/arch/mips/include/asm/ptrace.h
287 ++++ b/arch/mips/include/asm/ptrace.h
288 +@@ -152,7 +152,7 @@ static inline int is_syscall_success(struct pt_regs *regs)
289 +
290 + static inline long regs_return_value(struct pt_regs *regs)
291 + {
292 +- if (is_syscall_success(regs))
293 ++ if (is_syscall_success(regs) || !user_mode(regs))
294 + return regs->regs[2];
295 + else
296 + return -regs->regs[2];
297 +diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
298 +index 090393aa0f20..6c7d78546eee 100644
299 +--- a/arch/mips/vdso/Makefile
300 ++++ b/arch/mips/vdso/Makefile
301 +@@ -75,7 +75,7 @@ obj-vdso := $(obj-vdso-y:%.o=$(obj)/%.o)
302 + $(obj-vdso): KBUILD_CFLAGS := $(cflags-vdso) $(native-abi)
303 + $(obj-vdso): KBUILD_AFLAGS := $(aflags-vdso) $(native-abi)
304 +
305 +-$(obj)/vdso.lds: KBUILD_CPPFLAGS := $(native-abi)
306 ++$(obj)/vdso.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) $(native-abi)
307 +
308 + $(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
309 + $(call if_changed,vdsold)
310 +diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
311 +index 291cee28ccb6..c2c43f714684 100644
312 +--- a/arch/parisc/include/asm/pgtable.h
313 ++++ b/arch/parisc/include/asm/pgtable.h
314 +@@ -83,10 +83,10 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
315 + printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
316 +
317 + /* This is the size of the initially mapped kernel memory */
318 +-#ifdef CONFIG_64BIT
319 +-#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */
320 ++#if defined(CONFIG_64BIT)
321 ++#define KERNEL_INITIAL_ORDER 26 /* 1<<26 = 64MB */
322 + #else
323 +-#define KERNEL_INITIAL_ORDER 24 /* 1<<24 = 16MB */
324 ++#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */
325 + #endif
326 + #define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
327 +
328 +diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
329 +index f7ea626e29c9..81d6f6391944 100644
330 +--- a/arch/parisc/kernel/setup.c
331 ++++ b/arch/parisc/kernel/setup.c
332 +@@ -38,6 +38,7 @@
333 + #include <linux/export.h>
334 +
335 + #include <asm/processor.h>
336 ++#include <asm/sections.h>
337 + #include <asm/pdc.h>
338 + #include <asm/led.h>
339 + #include <asm/machdep.h> /* for pa7300lc_init() proto */
340 +@@ -140,6 +141,13 @@ void __init setup_arch(char **cmdline_p)
341 + #endif
342 + printk(KERN_CONT ".\n");
343 +
344 ++ /*
345 ++ * Check if initial kernel page mappings are sufficient.
346 ++ * panic early if not, else we may access kernel functions
347 ++ * and variables which can't be reached.
348 ++ */
349 ++ if (__pa((unsigned long) &_end) >= KERNEL_INITIAL_SIZE)
350 ++ panic("KERNEL_INITIAL_ORDER too small!");
351 +
352 + pdc_console_init();
353 +
354 +diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
355 +index 308f29081d46..60771df10fde 100644
356 +--- a/arch/parisc/kernel/vmlinux.lds.S
357 ++++ b/arch/parisc/kernel/vmlinux.lds.S
358 +@@ -88,8 +88,9 @@ SECTIONS
359 + /* Start of data section */
360 + _sdata = .;
361 +
362 +- RO_DATA_SECTION(8)
363 +-
364 ++ /* Architecturally we need to keep __gp below 0x1000000 and thus
365 ++ * in front of RO_DATA_SECTION() which stores lots of tracepoint
366 ++ * and ftrace symbols. */
367 + #ifdef CONFIG_64BIT
368 + . = ALIGN(16);
369 + /* Linkage tables */
370 +@@ -104,6 +105,8 @@ SECTIONS
371 + }
372 + #endif
373 +
374 ++ RO_DATA_SECTION(8)
375 ++
376 + /* unwind info */
377 + .PARISC.unwind : {
378 + __start___unwind = .;
379 +diff --git a/arch/powerpc/kernel/vdso64/datapage.S b/arch/powerpc/kernel/vdso64/datapage.S
380 +index 2f01c4a0d8a0..7612eeb31da1 100644
381 +--- a/arch/powerpc/kernel/vdso64/datapage.S
382 ++++ b/arch/powerpc/kernel/vdso64/datapage.S
383 +@@ -59,7 +59,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map)
384 + bl V_LOCAL_FUNC(__get_datapage)
385 + mtlr r12
386 + addi r3,r3,CFG_SYSCALL_MAP64
387 +- cmpli cr0,r4,0
388 ++ cmpldi cr0,r4,0
389 + crclr cr0*4+so
390 + beqlr
391 + li r0,__NR_syscalls
392 +diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S
393 +index a76b4af37ef2..382021324883 100644
394 +--- a/arch/powerpc/kernel/vdso64/gettimeofday.S
395 ++++ b/arch/powerpc/kernel/vdso64/gettimeofday.S
396 +@@ -145,7 +145,7 @@ V_FUNCTION_BEGIN(__kernel_clock_getres)
397 + bne cr0,99f
398 +
399 + li r3,0
400 +- cmpli cr0,r4,0
401 ++ cmpldi cr0,r4,0
402 + crclr cr0*4+so
403 + beqlr
404 + lis r5,CLOCK_REALTIME_RES@h
405 +diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
406 +index f09899e35991..7b22624f332c 100644
407 +--- a/arch/powerpc/lib/copyuser_64.S
408 ++++ b/arch/powerpc/lib/copyuser_64.S
409 +@@ -359,6 +359,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
410 + addi r3,r3,8
411 + 171:
412 + 177:
413 ++179:
414 + addi r3,r3,8
415 + 370:
416 + 372:
417 +@@ -373,7 +374,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
418 + 173:
419 + 174:
420 + 175:
421 +-179:
422 + 181:
423 + 184:
424 + 186:
425 +diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
426 +index 6527882ce05e..ddfd2740a1b5 100644
427 +--- a/arch/powerpc/mm/copro_fault.c
428 ++++ b/arch/powerpc/mm/copro_fault.c
429 +@@ -106,6 +106,8 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
430 + switch (REGION_ID(ea)) {
431 + case USER_REGION_ID:
432 + pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea);
433 ++ if (mm == NULL)
434 ++ return 1;
435 + psize = get_slice_psize(mm, ea);
436 + ssize = user_segment_size(ea);
437 + vsid = get_vsid(mm->context.id, ea, ssize);
438 +diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
439 +index 2ba602591a20..ba0cae69a396 100644
440 +--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
441 ++++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
442 +@@ -1163,7 +1163,7 @@ static void pnv_eeh_get_and_dump_hub_diag(struct pci_controller *hose)
443 + return;
444 + }
445 +
446 +- switch (data->type) {
447 ++ switch (be16_to_cpu(data->type)) {
448 + case OPAL_P7IOC_DIAG_TYPE_RGC:
449 + pr_info("P7IOC diag-data for RGC\n\n");
450 + pnv_eeh_dump_hub_diag_common(data);
451 +@@ -1395,7 +1395,7 @@ static int pnv_eeh_next_error(struct eeh_pe **pe)
452 +
453 + /* Try best to clear it */
454 + opal_pci_eeh_freeze_clear(phb->opal_id,
455 +- frozen_pe_no,
456 ++ be64_to_cpu(frozen_pe_no),
457 + OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
458 + ret = EEH_NEXT_ERR_NONE;
459 + } else if ((*pe)->state & EEH_PE_ISOLATED ||
460 +diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
461 +index ad8c3f4a5e0b..dd5e0f3b1b5d 100644
462 +--- a/arch/powerpc/platforms/powernv/pci.c
463 ++++ b/arch/powerpc/platforms/powernv/pci.c
464 +@@ -197,8 +197,8 @@ static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
465 + be64_to_cpu(data->dma1ErrorLog1));
466 +
467 + for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
468 +- if ((data->pestA[i] >> 63) == 0 &&
469 +- (data->pestB[i] >> 63) == 0)
470 ++ if ((be64_to_cpu(data->pestA[i]) >> 63) == 0 &&
471 ++ (be64_to_cpu(data->pestB[i]) >> 63) == 0)
472 + continue;
473 +
474 + pr_info("PE[%3d] A/B: %016llx %016llx\n",
475 +diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
476 +index b7a67e3d2201..3ae43282460e 100644
477 +--- a/arch/powerpc/platforms/pseries/lpar.c
478 ++++ b/arch/powerpc/platforms/pseries/lpar.c
479 +@@ -406,7 +406,7 @@ static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
480 + unsigned long *vpn, int count,
481 + int psize, int ssize)
482 + {
483 +- unsigned long param[8];
484 ++ unsigned long param[PLPAR_HCALL9_BUFSIZE];
485 + int i = 0, pix = 0, rc;
486 + unsigned long flags = 0;
487 + int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
488 +@@ -523,7 +523,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
489 + unsigned long flags = 0;
490 + struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
491 + int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
492 +- unsigned long param[9];
493 ++ unsigned long param[PLPAR_HCALL9_BUFSIZE];
494 + unsigned long hash, index, shift, hidx, slot;
495 + real_pte_t pte;
496 + int psize, ssize;
497 +diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
498 +index a2e6ef32e054..0a2031618f7f 100644
499 +--- a/arch/s390/include/asm/tlbflush.h
500 ++++ b/arch/s390/include/asm/tlbflush.h
501 +@@ -81,7 +81,8 @@ static inline void __tlb_flush_full(struct mm_struct *mm)
502 + }
503 +
504 + /*
505 +- * Flush TLB entries for a specific ASCE on all CPUs.
506 ++ * Flush TLB entries for a specific ASCE on all CPUs. Should never be used
507 ++ * when more than one asce (e.g. gmap) ran on this mm.
508 + */
509 + static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
510 + {
511 +diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
512 +index 471a370a527b..8345ae1f117d 100644
513 +--- a/arch/s390/mm/pgtable.c
514 ++++ b/arch/s390/mm/pgtable.c
515 +@@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(gmap_alloc);
516 + static void gmap_flush_tlb(struct gmap *gmap)
517 + {
518 + if (MACHINE_HAS_IDTE)
519 +- __tlb_flush_asce(gmap->mm, gmap->asce);
520 ++ __tlb_flush_idte(gmap->asce);
521 + else
522 + __tlb_flush_global();
523 + }
524 +@@ -205,7 +205,7 @@ void gmap_free(struct gmap *gmap)
525 +
526 + /* Flush tlb. */
527 + if (MACHINE_HAS_IDTE)
528 +- __tlb_flush_asce(gmap->mm, gmap->asce);
529 ++ __tlb_flush_idte(gmap->asce);
530 + else
531 + __tlb_flush_global();
532 +
533 +diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
534 +index 38b3ead7222d..52a2526c3fbe 100644
535 +--- a/arch/x86/kernel/e820.c
536 ++++ b/arch/x86/kernel/e820.c
537 +@@ -347,7 +347,7 @@ int __init sanitize_e820_map(struct e820entry *biosmap, int max_nr_map,
538 + * continue building up new bios map based on this
539 + * information
540 + */
541 +- if (current_type != last_type) {
542 ++ if (current_type != last_type || current_type == E820_PRAM) {
543 + if (last_type != 0) {
544 + new_bios[new_bios_entry].size =
545 + change_point[chgidx]->addr - last_addr;
546 +diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
547 +index 88d0a92d3f94..3aab53f8cad2 100644
548 +--- a/arch/x86/kvm/ioapic.c
549 ++++ b/arch/x86/kvm/ioapic.c
550 +@@ -580,7 +580,7 @@ static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
551 + ioapic->irr = 0;
552 + ioapic->irr_delivered = 0;
553 + ioapic->id = 0;
554 +- memset(ioapic->irq_eoi, 0x00, IOAPIC_NUM_PINS);
555 ++ memset(ioapic->irq_eoi, 0x00, sizeof(ioapic->irq_eoi));
556 + rtc_irq_eoi_tracking_reset(ioapic);
557 + }
558 +
559 +diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
560 +index 5a37188b559f..9d359e05fad7 100644
561 +--- a/block/blk-cgroup.c
562 ++++ b/block/blk-cgroup.c
563 +@@ -1331,10 +1331,8 @@ int blkcg_policy_register(struct blkcg_policy *pol)
564 + struct blkcg_policy_data *cpd;
565 +
566 + cpd = pol->cpd_alloc_fn(GFP_KERNEL);
567 +- if (!cpd) {
568 +- mutex_unlock(&blkcg_pol_mutex);
569 ++ if (!cpd)
570 + goto err_free_cpds;
571 +- }
572 +
573 + blkcg->cpd[pol->plid] = cpd;
574 + cpd->blkcg = blkcg;
575 +diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c
576 +index 758acabf2d81..8f3056cd0399 100644
577 +--- a/crypto/asymmetric_keys/pkcs7_parser.c
578 ++++ b/crypto/asymmetric_keys/pkcs7_parser.c
579 +@@ -547,9 +547,7 @@ int pkcs7_sig_note_set_of_authattrs(void *context, size_t hdrlen,
580 + struct pkcs7_signed_info *sinfo = ctx->sinfo;
581 +
582 + if (!test_bit(sinfo_has_content_type, &sinfo->aa_set) ||
583 +- !test_bit(sinfo_has_message_digest, &sinfo->aa_set) ||
584 +- (ctx->msg->data_type == OID_msIndirectData &&
585 +- !test_bit(sinfo_has_ms_opus_info, &sinfo->aa_set))) {
586 ++ !test_bit(sinfo_has_message_digest, &sinfo->aa_set)) {
587 + pr_warn("Missing required AuthAttr\n");
588 + return -EBADMSG;
589 + }
590 +diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
591 +index 5230e8449d30..c097f477c74c 100644
592 +--- a/drivers/acpi/nfit.c
593 ++++ b/drivers/acpi/nfit.c
594 +@@ -1806,6 +1806,9 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
595 +
596 + dev_dbg(dev, "%s: event: %d\n", __func__, event);
597 +
598 ++ if (event != NFIT_NOTIFY_UPDATE)
599 ++ return;
600 ++
601 + device_lock(dev);
602 + if (!dev->driver) {
603 + /* dev->driver may be null if we're being removed */
604 +diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h
605 +index 3d549a383659..13d6ec1ff055 100644
606 +--- a/drivers/acpi/nfit.h
607 ++++ b/drivers/acpi/nfit.h
608 +@@ -45,6 +45,10 @@ enum {
609 + ND_BLK_DCR_LATCH = 2,
610 + };
611 +
612 ++enum nfit_root_notifiers {
613 ++ NFIT_NOTIFY_UPDATE = 0x80,
614 ++};
615 ++
616 + struct nfit_spa {
617 + struct acpi_nfit_system_address *spa;
618 + struct list_head list;
619 +diff --git a/drivers/base/platform.c b/drivers/base/platform.c
620 +index 176b59f5bc47..ba66330cea67 100644
621 +--- a/drivers/base/platform.c
622 ++++ b/drivers/base/platform.c
623 +@@ -96,7 +96,7 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
624 + int ret;
625 +
626 + ret = of_irq_get(dev->dev.of_node, num);
627 +- if (ret >= 0 || ret == -EPROBE_DEFER)
628 ++ if (ret > 0 || ret == -EPROBE_DEFER)
629 + return ret;
630 + }
631 +
632 +@@ -154,7 +154,7 @@ int platform_get_irq_byname(struct platform_device *dev, const char *name)
633 + int ret;
634 +
635 + ret = of_irq_get_byname(dev->dev.of_node, name);
636 +- if (ret >= 0 || ret == -EPROBE_DEFER)
637 ++ if (ret > 0 || ret == -EPROBE_DEFER)
638 + return ret;
639 + }
640 +
641 +diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
642 +index c1935081d34a..aab64205d866 100644
643 +--- a/drivers/clk/imx/clk-imx6q.c
644 ++++ b/drivers/clk/imx/clk-imx6q.c
645 +@@ -550,6 +550,24 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
646 + if (IS_ENABLED(CONFIG_PCI_IMX6))
647 + clk_set_parent(clk[IMX6QDL_CLK_LVDS1_SEL], clk[IMX6QDL_CLK_SATA_REF_100M]);
648 +
649 ++ /*
650 ++ * Initialize the GPU clock muxes, so that the maximum specified clock
651 ++ * rates for the respective SoC are not exceeded.
652 ++ */
653 ++ if (clk_on_imx6dl()) {
654 ++ clk_set_parent(clk[IMX6QDL_CLK_GPU3D_CORE_SEL],
655 ++ clk[IMX6QDL_CLK_PLL2_PFD1_594M]);
656 ++ clk_set_parent(clk[IMX6QDL_CLK_GPU2D_CORE_SEL],
657 ++ clk[IMX6QDL_CLK_PLL2_PFD1_594M]);
658 ++ } else if (clk_on_imx6q()) {
659 ++ clk_set_parent(clk[IMX6QDL_CLK_GPU3D_CORE_SEL],
660 ++ clk[IMX6QDL_CLK_MMDC_CH0_AXI]);
661 ++ clk_set_parent(clk[IMX6QDL_CLK_GPU3D_SHADER_SEL],
662 ++ clk[IMX6QDL_CLK_PLL2_PFD1_594M]);
663 ++ clk_set_parent(clk[IMX6QDL_CLK_GPU2D_CORE_SEL],
664 ++ clk[IMX6QDL_CLK_PLL3_USB_OTG]);
665 ++ }
666 ++
667 + imx_register_uart_clocks(uart_clks);
668 + }
669 + CLK_OF_DECLARE(imx6q, "fsl,imx6q-ccm", imx6q_clocks_init);
670 +diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
671 +index 6e80e4298274..7ff8b15a3422 100644
672 +--- a/drivers/cpufreq/intel_pstate.c
673 ++++ b/drivers/cpufreq/intel_pstate.c
674 +@@ -285,14 +285,14 @@ static void intel_pstate_hwp_set(void)
675 + int min, hw_min, max, hw_max, cpu, range, adj_range;
676 + u64 value, cap;
677 +
678 +- rdmsrl(MSR_HWP_CAPABILITIES, cap);
679 +- hw_min = HWP_LOWEST_PERF(cap);
680 +- hw_max = HWP_HIGHEST_PERF(cap);
681 +- range = hw_max - hw_min;
682 +-
683 + get_online_cpus();
684 +
685 + for_each_online_cpu(cpu) {
686 ++ rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
687 ++ hw_min = HWP_LOWEST_PERF(cap);
688 ++ hw_max = HWP_HIGHEST_PERF(cap);
689 ++ range = hw_max - hw_min;
690 ++
691 + rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
692 + adj_range = limits->min_perf_pct * range / 100;
693 + min = hw_min + adj_range;
694 +diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
695 +index 48ef368347ab..9e02cb6afb0b 100644
696 +--- a/drivers/gpio/gpio-mpc8xxx.c
697 ++++ b/drivers/gpio/gpio-mpc8xxx.c
698 +@@ -329,7 +329,7 @@ static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int irq,
699 + irq_hw_number_t hwirq)
700 + {
701 + irq_set_chip_data(irq, h->host_data);
702 +- irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_level_irq);
703 ++ irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_edge_irq);
704 +
705 + return 0;
706 + }
707 +diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
708 +index be5b399da5d3..43482ae1e049 100644
709 +--- a/drivers/input/mouse/elantech.c
710 ++++ b/drivers/input/mouse/elantech.c
711 +@@ -1163,6 +1163,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
712 + DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"),
713 + },
714 + },
715 ++ {
716 ++ /* Fujitsu H760 also has a middle button */
717 ++ .matches = {
718 ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
719 ++ DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
720 ++ },
721 ++ },
722 + #endif
723 + { }
724 + };
725 +@@ -1507,10 +1514,10 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
726 + },
727 + },
728 + {
729 +- /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */
730 ++ /* Fujitsu H760 does not work with crc_enabled == 0 */
731 + .matches = {
732 + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
733 +- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"),
734 ++ DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
735 + },
736 + },
737 + {
738 +@@ -1521,6 +1528,20 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
739 + },
740 + },
741 + {
742 ++ /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */
743 ++ .matches = {
744 ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
745 ++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"),
746 ++ },
747 ++ },
748 ++ {
749 ++ /* Fujitsu LIFEBOOK E556 does not work with crc_enabled == 0 */
750 ++ .matches = {
751 ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
752 ++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E556"),
753 ++ },
754 ++ },
755 ++ {
756 + /* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */
757 + .matches = {
758 + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
759 +diff --git a/drivers/input/serio/i8042-io.h b/drivers/input/serio/i8042-io.h
760 +index a5eed2ade53d..34da81c006b6 100644
761 +--- a/drivers/input/serio/i8042-io.h
762 ++++ b/drivers/input/serio/i8042-io.h
763 +@@ -81,7 +81,7 @@ static inline int i8042_platform_init(void)
764 + return -EBUSY;
765 + #endif
766 +
767 +- i8042_reset = 1;
768 ++ i8042_reset = I8042_RESET_ALWAYS;
769 + return 0;
770 + }
771 +
772 +diff --git a/drivers/input/serio/i8042-ip22io.h b/drivers/input/serio/i8042-ip22io.h
773 +index ee1ad27d6ed0..08a1c10a1448 100644
774 +--- a/drivers/input/serio/i8042-ip22io.h
775 ++++ b/drivers/input/serio/i8042-ip22io.h
776 +@@ -61,7 +61,7 @@ static inline int i8042_platform_init(void)
777 + return -EBUSY;
778 + #endif
779 +
780 +- i8042_reset = 1;
781 ++ i8042_reset = I8042_RESET_ALWAYS;
782 +
783 + return 0;
784 + }
785 +diff --git a/drivers/input/serio/i8042-ppcio.h b/drivers/input/serio/i8042-ppcio.h
786 +index f708c75d16f1..1aabea43329e 100644
787 +--- a/drivers/input/serio/i8042-ppcio.h
788 ++++ b/drivers/input/serio/i8042-ppcio.h
789 +@@ -44,7 +44,7 @@ static inline void i8042_write_command(int val)
790 +
791 + static inline int i8042_platform_init(void)
792 + {
793 +- i8042_reset = 1;
794 ++ i8042_reset = I8042_RESET_ALWAYS;
795 + return 0;
796 + }
797 +
798 +diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h
799 +index afcd1c1a05b2..6231d63860ee 100644
800 +--- a/drivers/input/serio/i8042-sparcio.h
801 ++++ b/drivers/input/serio/i8042-sparcio.h
802 +@@ -130,7 +130,7 @@ static int __init i8042_platform_init(void)
803 + }
804 + }
805 +
806 +- i8042_reset = 1;
807 ++ i8042_reset = I8042_RESET_ALWAYS;
808 +
809 + return 0;
810 + }
811 +diff --git a/drivers/input/serio/i8042-unicore32io.h b/drivers/input/serio/i8042-unicore32io.h
812 +index 73f5cc124a36..455747552f85 100644
813 +--- a/drivers/input/serio/i8042-unicore32io.h
814 ++++ b/drivers/input/serio/i8042-unicore32io.h
815 +@@ -61,7 +61,7 @@ static inline int i8042_platform_init(void)
816 + if (!request_mem_region(I8042_REGION_START, I8042_REGION_SIZE, "i8042"))
817 + return -EBUSY;
818 +
819 +- i8042_reset = 1;
820 ++ i8042_reset = I8042_RESET_ALWAYS;
821 + return 0;
822 + }
823 +
824 +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
825 +index 68f5f4a0f1e7..f4bfb4b2d50a 100644
826 +--- a/drivers/input/serio/i8042-x86ia64io.h
827 ++++ b/drivers/input/serio/i8042-x86ia64io.h
828 +@@ -510,6 +510,90 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
829 + { }
830 + };
831 +
832 ++/*
833 ++ * On some Asus laptops, just running self tests cause problems.
834 ++ */
835 ++static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
836 ++ {
837 ++ .matches = {
838 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
839 ++ DMI_MATCH(DMI_PRODUCT_NAME, "A455LD"),
840 ++ },
841 ++ },
842 ++ {
843 ++ .matches = {
844 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
845 ++ DMI_MATCH(DMI_PRODUCT_NAME, "K401LB"),
846 ++ },
847 ++ },
848 ++ {
849 ++ .matches = {
850 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
851 ++ DMI_MATCH(DMI_PRODUCT_NAME, "K501LB"),
852 ++ },
853 ++ },
854 ++ {
855 ++ .matches = {
856 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
857 ++ DMI_MATCH(DMI_PRODUCT_NAME, "K501LX"),
858 ++ },
859 ++ },
860 ++ {
861 ++ .matches = {
862 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
863 ++ DMI_MATCH(DMI_PRODUCT_NAME, "R409L"),
864 ++ },
865 ++ },
866 ++ {
867 ++ .matches = {
868 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
869 ++ DMI_MATCH(DMI_PRODUCT_NAME, "V502LX"),
870 ++ },
871 ++ },
872 ++ {
873 ++ .matches = {
874 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
875 ++ DMI_MATCH(DMI_PRODUCT_NAME, "X302LA"),
876 ++ },
877 ++ },
878 ++ {
879 ++ .matches = {
880 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
881 ++ DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"),
882 ++ },
883 ++ },
884 ++ {
885 ++ .matches = {
886 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
887 ++ DMI_MATCH(DMI_PRODUCT_NAME, "X450LD"),
888 ++ },
889 ++ },
890 ++ {
891 ++ .matches = {
892 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
893 ++ DMI_MATCH(DMI_PRODUCT_NAME, "X455LAB"),
894 ++ },
895 ++ },
896 ++ {
897 ++ .matches = {
898 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
899 ++ DMI_MATCH(DMI_PRODUCT_NAME, "X455LDB"),
900 ++ },
901 ++ },
902 ++ {
903 ++ .matches = {
904 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
905 ++ DMI_MATCH(DMI_PRODUCT_NAME, "X455LF"),
906 ++ },
907 ++ },
908 ++ {
909 ++ .matches = {
910 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
911 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Z450LA"),
912 ++ },
913 ++ },
914 ++ { }
915 ++};
916 + static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
917 + {
918 + /* MSI Wind U-100 */
919 +@@ -1072,12 +1156,18 @@ static int __init i8042_platform_init(void)
920 + return retval;
921 +
922 + #if defined(__ia64__)
923 +- i8042_reset = true;
924 ++ i8042_reset = I8042_RESET_ALWAYS;
925 + #endif
926 +
927 + #ifdef CONFIG_X86
928 +- if (dmi_check_system(i8042_dmi_reset_table))
929 +- i8042_reset = true;
930 ++ /* Honor module parameter when value is not default */
931 ++ if (i8042_reset == I8042_RESET_DEFAULT) {
932 ++ if (dmi_check_system(i8042_dmi_reset_table))
933 ++ i8042_reset = I8042_RESET_ALWAYS;
934 ++
935 ++ if (dmi_check_system(i8042_dmi_noselftest_table))
936 ++ i8042_reset = I8042_RESET_NEVER;
937 ++ }
938 +
939 + if (dmi_check_system(i8042_dmi_noloop_table))
940 + i8042_noloop = true;
941 +diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
942 +index 405252a884dd..89abfdb539ac 100644
943 +--- a/drivers/input/serio/i8042.c
944 ++++ b/drivers/input/serio/i8042.c
945 +@@ -48,9 +48,39 @@ static bool i8042_unlock;
946 + module_param_named(unlock, i8042_unlock, bool, 0);
947 + MODULE_PARM_DESC(unlock, "Ignore keyboard lock.");
948 +
949 +-static bool i8042_reset;
950 +-module_param_named(reset, i8042_reset, bool, 0);
951 +-MODULE_PARM_DESC(reset, "Reset controller during init and cleanup.");
952 ++enum i8042_controller_reset_mode {
953 ++ I8042_RESET_NEVER,
954 ++ I8042_RESET_ALWAYS,
955 ++ I8042_RESET_ON_S2RAM,
956 ++#define I8042_RESET_DEFAULT I8042_RESET_ON_S2RAM
957 ++};
958 ++static enum i8042_controller_reset_mode i8042_reset = I8042_RESET_DEFAULT;
959 ++static int i8042_set_reset(const char *val, const struct kernel_param *kp)
960 ++{
961 ++ enum i8042_controller_reset_mode *arg = kp->arg;
962 ++ int error;
963 ++ bool reset;
964 ++
965 ++ if (val) {
966 ++ error = kstrtobool(val, &reset);
967 ++ if (error)
968 ++ return error;
969 ++ } else {
970 ++ reset = true;
971 ++ }
972 ++
973 ++ *arg = reset ? I8042_RESET_ALWAYS : I8042_RESET_NEVER;
974 ++ return 0;
975 ++}
976 ++
977 ++static const struct kernel_param_ops param_ops_reset_param = {
978 ++ .flags = KERNEL_PARAM_OPS_FL_NOARG,
979 ++ .set = i8042_set_reset,
980 ++};
981 ++#define param_check_reset_param(name, p) \
982 ++ __param_check(name, p, enum i8042_controller_reset_mode)
983 ++module_param_named(reset, i8042_reset, reset_param, 0);
984 ++MODULE_PARM_DESC(reset, "Reset controller on resume, cleanup or both");
985 +
986 + static bool i8042_direct;
987 + module_param_named(direct, i8042_direct, bool, 0);
988 +@@ -1019,7 +1049,7 @@ static int i8042_controller_init(void)
989 + * Reset the controller and reset CRT to the original value set by BIOS.
990 + */
991 +
992 +-static void i8042_controller_reset(bool force_reset)
993 ++static void i8042_controller_reset(bool s2r_wants_reset)
994 + {
995 + i8042_flush();
996 +
997 +@@ -1044,8 +1074,10 @@ static void i8042_controller_reset(bool force_reset)
998 + * Reset the controller if requested.
999 + */
1000 +
1001 +- if (i8042_reset || force_reset)
1002 ++ if (i8042_reset == I8042_RESET_ALWAYS ||
1003 ++ (i8042_reset == I8042_RESET_ON_S2RAM && s2r_wants_reset)) {
1004 + i8042_controller_selftest();
1005 ++ }
1006 +
1007 + /*
1008 + * Restore the original control register setting.
1009 +@@ -1110,7 +1142,7 @@ static void i8042_dritek_enable(void)
1010 + * before suspending.
1011 + */
1012 +
1013 +-static int i8042_controller_resume(bool force_reset)
1014 ++static int i8042_controller_resume(bool s2r_wants_reset)
1015 + {
1016 + int error;
1017 +
1018 +@@ -1118,7 +1150,8 @@ static int i8042_controller_resume(bool force_reset)
1019 + if (error)
1020 + return error;
1021 +
1022 +- if (i8042_reset || force_reset) {
1023 ++ if (i8042_reset == I8042_RESET_ALWAYS ||
1024 ++ (i8042_reset == I8042_RESET_ON_S2RAM && s2r_wants_reset)) {
1025 + error = i8042_controller_selftest();
1026 + if (error)
1027 + return error;
1028 +@@ -1195,7 +1228,7 @@ static int i8042_pm_resume_noirq(struct device *dev)
1029 +
1030 + static int i8042_pm_resume(struct device *dev)
1031 + {
1032 +- bool force_reset;
1033 ++ bool want_reset;
1034 + int i;
1035 +
1036 + for (i = 0; i < I8042_NUM_PORTS; i++) {
1037 +@@ -1218,9 +1251,9 @@ static int i8042_pm_resume(struct device *dev)
1038 + * off control to the platform firmware, otherwise we can simply restore
1039 + * the mode.
1040 + */
1041 +- force_reset = pm_resume_via_firmware();
1042 ++ want_reset = pm_resume_via_firmware();
1043 +
1044 +- return i8042_controller_resume(force_reset);
1045 ++ return i8042_controller_resume(want_reset);
1046 + }
1047 +
1048 + static int i8042_pm_thaw(struct device *dev)
1049 +@@ -1482,7 +1515,7 @@ static int __init i8042_probe(struct platform_device *dev)
1050 +
1051 + i8042_platform_device = dev;
1052 +
1053 +- if (i8042_reset) {
1054 ++ if (i8042_reset == I8042_RESET_ALWAYS) {
1055 + error = i8042_controller_selftest();
1056 + if (error)
1057 + return error;
1058 +diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
1059 +index 44aa57edf207..e33c729b9f48 100644
1060 +--- a/drivers/irqchip/irq-gic-v3.c
1061 ++++ b/drivers/irqchip/irq-gic-v3.c
1062 +@@ -142,7 +142,7 @@ static void gic_enable_redist(bool enable)
1063 + return; /* No PM support in this redistributor */
1064 + }
1065 +
1066 +- while (count--) {
1067 ++ while (--count) {
1068 + val = readl_relaxed(rbase + GICR_WAKER);
1069 + if (enable ^ (val & GICR_WAKER_ChildrenAsleep))
1070 + break;
1071 +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
1072 +index 51eda7235e32..5cac11d7a876 100644
1073 +--- a/drivers/md/dm-crypt.c
1074 ++++ b/drivers/md/dm-crypt.c
1075 +@@ -112,8 +112,7 @@ struct iv_tcw_private {
1076 + * and encrypts / decrypts at the same time.
1077 + */
1078 + enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
1079 +- DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
1080 +- DM_CRYPT_EXIT_THREAD};
1081 ++ DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
1082 +
1083 + /*
1084 + * The fields in here must be read only after initialization.
1085 +@@ -1204,18 +1203,20 @@ continue_locked:
1086 + if (!RB_EMPTY_ROOT(&cc->write_tree))
1087 + goto pop_from_list;
1088 +
1089 +- if (unlikely(test_bit(DM_CRYPT_EXIT_THREAD, &cc->flags))) {
1090 +- spin_unlock_irq(&cc->write_thread_wait.lock);
1091 +- break;
1092 +- }
1093 +-
1094 +- __set_current_state(TASK_INTERRUPTIBLE);
1095 ++ set_current_state(TASK_INTERRUPTIBLE);
1096 + __add_wait_queue(&cc->write_thread_wait, &wait);
1097 +
1098 + spin_unlock_irq(&cc->write_thread_wait.lock);
1099 +
1100 ++ if (unlikely(kthread_should_stop())) {
1101 ++ set_task_state(current, TASK_RUNNING);
1102 ++ remove_wait_queue(&cc->write_thread_wait, &wait);
1103 ++ break;
1104 ++ }
1105 ++
1106 + schedule();
1107 +
1108 ++ set_task_state(current, TASK_RUNNING);
1109 + spin_lock_irq(&cc->write_thread_wait.lock);
1110 + __remove_wait_queue(&cc->write_thread_wait, &wait);
1111 + goto continue_locked;
1112 +@@ -1530,13 +1531,8 @@ static void crypt_dtr(struct dm_target *ti)
1113 + if (!cc)
1114 + return;
1115 +
1116 +- if (cc->write_thread) {
1117 +- spin_lock_irq(&cc->write_thread_wait.lock);
1118 +- set_bit(DM_CRYPT_EXIT_THREAD, &cc->flags);
1119 +- wake_up_locked(&cc->write_thread_wait);
1120 +- spin_unlock_irq(&cc->write_thread_wait.lock);
1121 ++ if (cc->write_thread)
1122 + kthread_stop(cc->write_thread);
1123 +- }
1124 +
1125 + if (cc->io_queue)
1126 + destroy_workqueue(cc->io_queue);
1127 +diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
1128 +index cfa29f574c2a..5b2ef966012b 100644
1129 +--- a/drivers/md/dm-mpath.c
1130 ++++ b/drivers/md/dm-mpath.c
1131 +@@ -1220,10 +1220,10 @@ static void activate_path(struct work_struct *work)
1132 + {
1133 + struct pgpath *pgpath =
1134 + container_of(work, struct pgpath, activate_path.work);
1135 ++ struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1136 +
1137 +- if (pgpath->is_active)
1138 +- scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
1139 +- pg_init_done, pgpath);
1140 ++ if (pgpath->is_active && !blk_queue_dying(q))
1141 ++ scsi_dh_activate(q, pg_init_done, pgpath);
1142 + else
1143 + pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
1144 + }
1145 +diff --git a/drivers/md/dm.c b/drivers/md/dm.c
1146 +index a42729ebf272..84aa8b1d0480 100644
1147 +--- a/drivers/md/dm.c
1148 ++++ b/drivers/md/dm.c
1149 +@@ -2869,6 +2869,7 @@ EXPORT_SYMBOL_GPL(dm_device_name);
1150 +
1151 + static void __dm_destroy(struct mapped_device *md, bool wait)
1152 + {
1153 ++ struct request_queue *q = dm_get_md_queue(md);
1154 + struct dm_table *map;
1155 + int srcu_idx;
1156 +
1157 +@@ -2879,6 +2880,10 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
1158 + set_bit(DMF_FREEING, &md->flags);
1159 + spin_unlock(&_minor_lock);
1160 +
1161 ++ spin_lock_irq(q->queue_lock);
1162 ++ queue_flag_set(QUEUE_FLAG_DYING, q);
1163 ++ spin_unlock_irq(q->queue_lock);
1164 ++
1165 + if (dm_request_based(md) && md->kworker_task)
1166 + flush_kthread_worker(&md->kworker);
1167 +
1168 +@@ -3245,10 +3250,11 @@ static int __dm_resume(struct mapped_device *md, struct dm_table *map)
1169 +
1170 + int dm_resume(struct mapped_device *md)
1171 + {
1172 +- int r = -EINVAL;
1173 ++ int r;
1174 + struct dm_table *map = NULL;
1175 +
1176 + retry:
1177 ++ r = -EINVAL;
1178 + mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
1179 +
1180 + if (!dm_suspended_md(md))
1181 +@@ -3272,8 +3278,6 @@ retry:
1182 + goto out;
1183 +
1184 + clear_bit(DMF_SUSPENDED, &md->flags);
1185 +-
1186 +- r = 0;
1187 + out:
1188 + mutex_unlock(&md->suspend_lock);
1189 +
1190 +diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
1191 +index cfc005ee11d8..7fc72de2434c 100644
1192 +--- a/drivers/media/dvb-frontends/mb86a20s.c
1193 ++++ b/drivers/media/dvb-frontends/mb86a20s.c
1194 +@@ -71,25 +71,27 @@ static struct regdata mb86a20s_init1[] = {
1195 + };
1196 +
1197 + static struct regdata mb86a20s_init2[] = {
1198 +- { 0x28, 0x22 }, { 0x29, 0x00 }, { 0x2a, 0x1f }, { 0x2b, 0xf0 },
1199 ++ { 0x50, 0xd1 }, { 0x51, 0x22 },
1200 ++ { 0x39, 0x01 },
1201 ++ { 0x71, 0x00 },
1202 + { 0x3b, 0x21 },
1203 +- { 0x3c, 0x38 },
1204 ++ { 0x3c, 0x3a },
1205 + { 0x01, 0x0d },
1206 +- { 0x04, 0x08 }, { 0x05, 0x03 },
1207 ++ { 0x04, 0x08 }, { 0x05, 0x05 },
1208 + { 0x04, 0x0e }, { 0x05, 0x00 },
1209 +- { 0x04, 0x0f }, { 0x05, 0x37 },
1210 +- { 0x04, 0x0b }, { 0x05, 0x78 },
1211 ++ { 0x04, 0x0f }, { 0x05, 0x14 },
1212 ++ { 0x04, 0x0b }, { 0x05, 0x8c },
1213 + { 0x04, 0x00 }, { 0x05, 0x00 },
1214 +- { 0x04, 0x01 }, { 0x05, 0x1e },
1215 +- { 0x04, 0x02 }, { 0x05, 0x07 },
1216 +- { 0x04, 0x03 }, { 0x05, 0xd0 },
1217 ++ { 0x04, 0x01 }, { 0x05, 0x07 },
1218 ++ { 0x04, 0x02 }, { 0x05, 0x0f },
1219 ++ { 0x04, 0x03 }, { 0x05, 0xa0 },
1220 + { 0x04, 0x09 }, { 0x05, 0x00 },
1221 + { 0x04, 0x0a }, { 0x05, 0xff },
1222 +- { 0x04, 0x27 }, { 0x05, 0x00 },
1223 ++ { 0x04, 0x27 }, { 0x05, 0x64 },
1224 + { 0x04, 0x28 }, { 0x05, 0x00 },
1225 +- { 0x04, 0x1e }, { 0x05, 0x00 },
1226 +- { 0x04, 0x29 }, { 0x05, 0x64 },
1227 +- { 0x04, 0x32 }, { 0x05, 0x02 },
1228 ++ { 0x04, 0x1e }, { 0x05, 0xff },
1229 ++ { 0x04, 0x29 }, { 0x05, 0x0a },
1230 ++ { 0x04, 0x32 }, { 0x05, 0x0a },
1231 + { 0x04, 0x14 }, { 0x05, 0x02 },
1232 + { 0x04, 0x04 }, { 0x05, 0x00 },
1233 + { 0x04, 0x05 }, { 0x05, 0x22 },
1234 +@@ -97,8 +99,6 @@ static struct regdata mb86a20s_init2[] = {
1235 + { 0x04, 0x07 }, { 0x05, 0xd8 },
1236 + { 0x04, 0x12 }, { 0x05, 0x00 },
1237 + { 0x04, 0x13 }, { 0x05, 0xff },
1238 +- { 0x04, 0x15 }, { 0x05, 0x4e },
1239 +- { 0x04, 0x16 }, { 0x05, 0x20 },
1240 +
1241 + /*
1242 + * On this demod, when the bit count reaches the count below,
1243 +@@ -152,42 +152,36 @@ static struct regdata mb86a20s_init2[] = {
1244 + { 0x50, 0x51 }, { 0x51, 0x04 }, /* MER symbol 4 */
1245 + { 0x45, 0x04 }, /* CN symbol 4 */
1246 + { 0x48, 0x04 }, /* CN manual mode */
1247 +-
1248 ++ { 0x50, 0xd5 }, { 0x51, 0x01 },
1249 + { 0x50, 0xd6 }, { 0x51, 0x1f },
1250 + { 0x50, 0xd2 }, { 0x51, 0x03 },
1251 +- { 0x50, 0xd7 }, { 0x51, 0xbf },
1252 +- { 0x28, 0x74 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0xff },
1253 +- { 0x28, 0x46 }, { 0x29, 0x00 }, { 0x2a, 0x1a }, { 0x2b, 0x0c },
1254 +-
1255 +- { 0x04, 0x40 }, { 0x05, 0x00 },
1256 +- { 0x28, 0x00 }, { 0x2b, 0x08 },
1257 +- { 0x28, 0x05 }, { 0x2b, 0x00 },
1258 ++ { 0x50, 0xd7 }, { 0x51, 0x3f },
1259 + { 0x1c, 0x01 },
1260 +- { 0x28, 0x06 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x1f },
1261 +- { 0x28, 0x07 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x18 },
1262 +- { 0x28, 0x08 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x12 },
1263 +- { 0x28, 0x09 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x30 },
1264 +- { 0x28, 0x0a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x37 },
1265 +- { 0x28, 0x0b }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x02 },
1266 +- { 0x28, 0x0c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x09 },
1267 +- { 0x28, 0x0d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x06 },
1268 +- { 0x28, 0x0e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x7b },
1269 +- { 0x28, 0x0f }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x76 },
1270 +- { 0x28, 0x10 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x7d },
1271 +- { 0x28, 0x11 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x08 },
1272 +- { 0x28, 0x12 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0b },
1273 +- { 0x28, 0x13 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x00 },
1274 +- { 0x28, 0x14 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xf2 },
1275 +- { 0x28, 0x15 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xf3 },
1276 +- { 0x28, 0x16 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x05 },
1277 +- { 0x28, 0x17 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x16 },
1278 +- { 0x28, 0x18 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0f },
1279 +- { 0x28, 0x19 }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xef },
1280 +- { 0x28, 0x1a }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xd8 },
1281 +- { 0x28, 0x1b }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xf1 },
1282 +- { 0x28, 0x1c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x3d },
1283 +- { 0x28, 0x1d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x94 },
1284 +- { 0x28, 0x1e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0xba },
1285 ++ { 0x28, 0x06 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x03 },
1286 ++ { 0x28, 0x07 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0d },
1287 ++ { 0x28, 0x08 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x02 },
1288 ++ { 0x28, 0x09 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x01 },
1289 ++ { 0x28, 0x0a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x21 },
1290 ++ { 0x28, 0x0b }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x29 },
1291 ++ { 0x28, 0x0c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x16 },
1292 ++ { 0x28, 0x0d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x31 },
1293 ++ { 0x28, 0x0e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0e },
1294 ++ { 0x28, 0x0f }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x4e },
1295 ++ { 0x28, 0x10 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x46 },
1296 ++ { 0x28, 0x11 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0f },
1297 ++ { 0x28, 0x12 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x56 },
1298 ++ { 0x28, 0x13 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x35 },
1299 ++ { 0x28, 0x14 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xbe },
1300 ++ { 0x28, 0x15 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0x84 },
1301 ++ { 0x28, 0x16 }, { 0x29, 0x00 }, { 0x2a, 0x03 }, { 0x2b, 0xee },
1302 ++ { 0x28, 0x17 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x98 },
1303 ++ { 0x28, 0x18 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x9f },
1304 ++ { 0x28, 0x19 }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xb2 },
1305 ++ { 0x28, 0x1a }, { 0x29, 0x00 }, { 0x2a, 0x06 }, { 0x2b, 0xc2 },
1306 ++ { 0x28, 0x1b }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0x4a },
1307 ++ { 0x28, 0x1c }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xbc },
1308 ++ { 0x28, 0x1d }, { 0x29, 0x00 }, { 0x2a, 0x04 }, { 0x2b, 0xba },
1309 ++ { 0x28, 0x1e }, { 0x29, 0x00 }, { 0x2a, 0x06 }, { 0x2b, 0x14 },
1310 + { 0x50, 0x1e }, { 0x51, 0x5d },
1311 + { 0x50, 0x22 }, { 0x51, 0x00 },
1312 + { 0x50, 0x23 }, { 0x51, 0xc8 },
1313 +@@ -196,9 +190,7 @@ static struct regdata mb86a20s_init2[] = {
1314 + { 0x50, 0x26 }, { 0x51, 0x00 },
1315 + { 0x50, 0x27 }, { 0x51, 0xc3 },
1316 + { 0x50, 0x39 }, { 0x51, 0x02 },
1317 +- { 0xec, 0x0f },
1318 +- { 0xeb, 0x1f },
1319 +- { 0x28, 0x6a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x00 },
1320 ++ { 0x50, 0xd5 }, { 0x51, 0x01 },
1321 + { 0xd0, 0x00 },
1322 + };
1323 +
1324 +@@ -317,7 +309,11 @@ static int mb86a20s_read_status(struct dvb_frontend *fe, enum fe_status *status)
1325 + if (val >= 7)
1326 + *status |= FE_HAS_SYNC;
1327 +
1328 +- if (val >= 8) /* Maybe 9? */
1329 ++ /*
1330 ++ * Actually, on state S8, it starts receiving TS, but the TS
1331 ++ * output is only on normal state after the transition to S9.
1332 ++ */
1333 ++ if (val >= 9)
1334 + *status |= FE_HAS_LOCK;
1335 +
1336 + dev_dbg(&state->i2c->dev, "%s: Status = 0x%02x (state = %d)\n",
1337 +@@ -2067,6 +2063,11 @@ static void mb86a20s_release(struct dvb_frontend *fe)
1338 + kfree(state);
1339 + }
1340 +
1341 ++static int mb86a20s_get_frontend_algo(struct dvb_frontend *fe)
1342 ++{
1343 ++ return DVBFE_ALGO_HW;
1344 ++}
1345 ++
1346 + static struct dvb_frontend_ops mb86a20s_ops;
1347 +
1348 + struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config,
1349 +@@ -2140,6 +2141,7 @@ static struct dvb_frontend_ops mb86a20s_ops = {
1350 + .read_status = mb86a20s_read_status_and_stats,
1351 + .read_signal_strength = mb86a20s_read_signal_strength_from_cache,
1352 + .tune = mb86a20s_tune,
1353 ++ .get_frontend_algo = mb86a20s_get_frontend_algo,
1354 + };
1355 +
1356 + MODULE_DESCRIPTION("DVB Frontend module for Fujitsu mb86A20s hardware");
1357 +diff --git a/drivers/media/usb/cx231xx/cx231xx-avcore.c b/drivers/media/usb/cx231xx/cx231xx-avcore.c
1358 +index 491913778bcc..2f52d66b4dae 100644
1359 +--- a/drivers/media/usb/cx231xx/cx231xx-avcore.c
1360 ++++ b/drivers/media/usb/cx231xx/cx231xx-avcore.c
1361 +@@ -1264,7 +1264,10 @@ int cx231xx_set_agc_analog_digital_mux_select(struct cx231xx *dev,
1362 + dev->board.agc_analog_digital_select_gpio,
1363 + analog_or_digital);
1364 +
1365 +- return status;
1366 ++ if (status < 0)
1367 ++ return status;
1368 ++
1369 ++ return 0;
1370 + }
1371 +
1372 + int cx231xx_enable_i2c_port_3(struct cx231xx *dev, bool is_port_3)
1373 +diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
1374 +index 4a117a58c39a..8389c162bc89 100644
1375 +--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
1376 ++++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
1377 +@@ -486,7 +486,7 @@ struct cx231xx_board cx231xx_boards[] = {
1378 + .output_mode = OUT_MODE_VIP11,
1379 + .demod_xfer_mode = 0,
1380 + .ctl_pin_status_mask = 0xFFFFFFC4,
1381 +- .agc_analog_digital_select_gpio = 0x00, /* According with PV cxPolaris.inf file */
1382 ++ .agc_analog_digital_select_gpio = 0x1c,
1383 + .tuner_sif_gpio = -1,
1384 + .tuner_scl_gpio = -1,
1385 + .tuner_sda_gpio = -1,
1386 +diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
1387 +index a2fd49b6be83..19b0293312a0 100644
1388 +--- a/drivers/media/usb/cx231xx/cx231xx-core.c
1389 ++++ b/drivers/media/usb/cx231xx/cx231xx-core.c
1390 +@@ -712,6 +712,7 @@ int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode)
1391 + break;
1392 + case CX231XX_BOARD_CNXT_RDE_253S:
1393 + case CX231XX_BOARD_CNXT_RDU_253S:
1394 ++ case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID:
1395 + errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 1);
1396 + break;
1397 + case CX231XX_BOARD_HAUPPAUGE_EXETER:
1398 +@@ -738,7 +739,7 @@ int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode)
1399 + case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID:
1400 + case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL:
1401 + case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC:
1402 +- errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0);
1403 ++ errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0);
1404 + break;
1405 + default:
1406 + break;
1407 +diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c
1408 +index 1105db2355d2..83bfb1659abe 100644
1409 +--- a/drivers/memstick/host/rtsx_usb_ms.c
1410 ++++ b/drivers/memstick/host/rtsx_usb_ms.c
1411 +@@ -524,6 +524,7 @@ static void rtsx_usb_ms_handle_req(struct work_struct *work)
1412 + int rc;
1413 +
1414 + if (!host->req) {
1415 ++ pm_runtime_get_sync(ms_dev(host));
1416 + do {
1417 + rc = memstick_next_req(msh, &host->req);
1418 + dev_dbg(ms_dev(host), "next req %d\n", rc);
1419 +@@ -544,6 +545,7 @@ static void rtsx_usb_ms_handle_req(struct work_struct *work)
1420 + host->req->error);
1421 + }
1422 + } while (!rc);
1423 ++ pm_runtime_put(ms_dev(host));
1424 + }
1425 +
1426 + }
1427 +@@ -570,6 +572,7 @@ static int rtsx_usb_ms_set_param(struct memstick_host *msh,
1428 + dev_dbg(ms_dev(host), "%s: param = %d, value = %d\n",
1429 + __func__, param, value);
1430 +
1431 ++ pm_runtime_get_sync(ms_dev(host));
1432 + mutex_lock(&ucr->dev_mutex);
1433 +
1434 + err = rtsx_usb_card_exclusive_check(ucr, RTSX_USB_MS_CARD);
1435 +@@ -635,6 +638,7 @@ static int rtsx_usb_ms_set_param(struct memstick_host *msh,
1436 + }
1437 + out:
1438 + mutex_unlock(&ucr->dev_mutex);
1439 ++ pm_runtime_put(ms_dev(host));
1440 +
1441 + /* power-on delay */
1442 + if (param == MEMSTICK_POWER && value == MEMSTICK_POWER_ON)
1443 +@@ -681,6 +685,7 @@ static int rtsx_usb_detect_ms_card(void *__host)
1444 + int err;
1445 +
1446 + for (;;) {
1447 ++ pm_runtime_get_sync(ms_dev(host));
1448 + mutex_lock(&ucr->dev_mutex);
1449 +
1450 + /* Check pending MS card changes */
1451 +@@ -703,6 +708,7 @@ static int rtsx_usb_detect_ms_card(void *__host)
1452 + }
1453 +
1454 + poll_again:
1455 ++ pm_runtime_put(ms_dev(host));
1456 + if (host->eject)
1457 + break;
1458 +
1459 +diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
1460 +index a8a68acd3267..4e8069866c85 100644
1461 +--- a/drivers/misc/mei/hw-me-regs.h
1462 ++++ b/drivers/misc/mei/hw-me-regs.h
1463 +@@ -66,6 +66,9 @@
1464 + #ifndef _MEI_HW_MEI_REGS_H_
1465 + #define _MEI_HW_MEI_REGS_H_
1466 +
1467 ++#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
1468 ++#define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */
1469 ++
1470 + /*
1471 + * MEI device IDs
1472 + */
1473 +diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
1474 +index 27678d8154e0..0af3d7d30419 100644
1475 +--- a/drivers/misc/mei/pci-me.c
1476 ++++ b/drivers/misc/mei/pci-me.c
1477 +@@ -87,6 +87,9 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
1478 + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_cfg)},
1479 + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_cfg)},
1480 +
1481 ++ {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, mei_me_pch8_cfg)},
1482 ++ {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, mei_me_pch8_cfg)},
1483 ++
1484 + /* required last entry */
1485 + {0, }
1486 + };
1487 +diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
1488 +index 64950035613b..f2b733275a0a 100644
1489 +--- a/drivers/mmc/card/block.c
1490 ++++ b/drivers/mmc/card/block.c
1491 +@@ -1755,7 +1755,7 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
1492 + struct mmc_blk_data *md = mq->data;
1493 + struct mmc_packed *packed = mqrq->packed;
1494 + bool do_rel_wr, do_data_tag;
1495 +- u32 *packed_cmd_hdr;
1496 ++ __le32 *packed_cmd_hdr;
1497 + u8 hdr_blocks;
1498 + u8 i = 1;
1499 +
1500 +@@ -2279,7 +2279,8 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
1501 + set_capacity(md->disk, size);
1502 +
1503 + if (mmc_host_cmd23(card->host)) {
1504 +- if (mmc_card_mmc(card) ||
1505 ++ if ((mmc_card_mmc(card) &&
1506 ++ card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
1507 + (mmc_card_sd(card) &&
1508 + card->scr.cmds & SD_SCR_CMD23_SUPPORT))
1509 + md->flags |= MMC_BLK_CMD23;
1510 +diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
1511 +index 36cddab57d77..cf30b3712cb2 100644
1512 +--- a/drivers/mmc/card/queue.h
1513 ++++ b/drivers/mmc/card/queue.h
1514 +@@ -25,7 +25,7 @@ enum mmc_packed_type {
1515 +
1516 + struct mmc_packed {
1517 + struct list_head list;
1518 +- u32 cmd_hdr[1024];
1519 ++ __le32 cmd_hdr[1024];
1520 + unsigned int blocks;
1521 + u8 nr_entries;
1522 + u8 retries;
1523 +diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
1524 +index 6c71fc9f76c7..da9f71b8deb0 100644
1525 +--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
1526 ++++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
1527 +@@ -1138,11 +1138,6 @@ static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1528 + dev_dbg(sdmmc_dev(host), "%s\n", __func__);
1529 + mutex_lock(&ucr->dev_mutex);
1530 +
1531 +- if (rtsx_usb_card_exclusive_check(ucr, RTSX_USB_SD_CARD)) {
1532 +- mutex_unlock(&ucr->dev_mutex);
1533 +- return;
1534 +- }
1535 +-
1536 + sd_set_power_mode(host, ios->power_mode);
1537 + sd_set_bus_width(host, ios->bus_width);
1538 + sd_set_timing(host, ios->timing, &host->ddr_mode);
1539 +@@ -1314,6 +1309,7 @@ static void rtsx_usb_update_led(struct work_struct *work)
1540 + container_of(work, struct rtsx_usb_sdmmc, led_work);
1541 + struct rtsx_ucr *ucr = host->ucr;
1542 +
1543 ++ pm_runtime_get_sync(sdmmc_dev(host));
1544 + mutex_lock(&ucr->dev_mutex);
1545 +
1546 + if (host->led.brightness == LED_OFF)
1547 +@@ -1322,6 +1318,7 @@ static void rtsx_usb_update_led(struct work_struct *work)
1548 + rtsx_usb_turn_on_led(ucr);
1549 +
1550 + mutex_unlock(&ucr->dev_mutex);
1551 ++ pm_runtime_put(sdmmc_dev(host));
1552 + }
1553 + #endif
1554 +
1555 +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
1556 +index 552a34dc4f82..64a428984afe 100644
1557 +--- a/drivers/mmc/host/sdhci.c
1558 ++++ b/drivers/mmc/host/sdhci.c
1559 +@@ -675,7 +675,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1560 + * host->clock is in Hz. target_timeout is in us.
1561 + * Hence, us = 1000000 * cycles / Hz. Round up.
1562 + */
1563 +- val = 1000000 * data->timeout_clks;
1564 ++ val = 1000000ULL * data->timeout_clks;
1565 + if (do_div(val, host->clock))
1566 + target_timeout++;
1567 + target_timeout += val;
1568 +diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
1569 +index 56065632a5b8..75286588b823 100644
1570 +--- a/drivers/mtd/ubi/wl.c
1571 ++++ b/drivers/mtd/ubi/wl.c
1572 +@@ -643,7 +643,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
1573 + int shutdown)
1574 + {
1575 + int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
1576 +- int vol_id = -1, lnum = -1;
1577 ++ int erase = 0, keep = 0, vol_id = -1, lnum = -1;
1578 + #ifdef CONFIG_MTD_UBI_FASTMAP
1579 + int anchor = wrk->anchor;
1580 + #endif
1581 +@@ -777,6 +777,16 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
1582 + e1->pnum);
1583 + scrubbing = 1;
1584 + goto out_not_moved;
1585 ++ } else if (ubi->fast_attach && err == UBI_IO_BAD_HDR_EBADMSG) {
1586 ++ /*
1587 ++ * While a full scan would detect interrupted erasures
1588 ++ * at attach time we can face them here when attached from
1589 ++ * Fastmap.
1590 ++ */
1591 ++ dbg_wl("PEB %d has ECC errors, maybe from an interrupted erasure",
1592 ++ e1->pnum);
1593 ++ erase = 1;
1594 ++ goto out_not_moved;
1595 + }
1596 +
1597 + ubi_err(ubi, "error %d while reading VID header from PEB %d",
1598 +@@ -810,6 +820,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
1599 + * Target PEB had bit-flips or write error - torture it.
1600 + */
1601 + torture = 1;
1602 ++ keep = 1;
1603 + goto out_not_moved;
1604 + }
1605 +
1606 +@@ -895,7 +906,7 @@ out_not_moved:
1607 + ubi->erroneous_peb_count += 1;
1608 + } else if (scrubbing)
1609 + wl_tree_add(e1, &ubi->scrub);
1610 +- else
1611 ++ else if (keep)
1612 + wl_tree_add(e1, &ubi->used);
1613 + ubi_assert(!ubi->move_to_put);
1614 + ubi->move_from = ubi->move_to = NULL;
1615 +@@ -907,6 +918,12 @@ out_not_moved:
1616 + if (err)
1617 + goto out_ro;
1618 +
1619 ++ if (erase) {
1620 ++ err = do_sync_erase(ubi, e1, vol_id, lnum, 1);
1621 ++ if (err)
1622 ++ goto out_ro;
1623 ++ }
1624 ++
1625 + mutex_unlock(&ubi->move_mutex);
1626 + return 0;
1627 +
1628 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1629 +index 2e611dc5f162..1c8123816745 100644
1630 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1631 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1632 +@@ -14819,6 +14819,10 @@ static int bnx2x_get_fc_npiv(struct net_device *dev,
1633 + }
1634 +
1635 + offset = SHMEM2_RD(bp, fc_npiv_nvram_tbl_addr[BP_PORT(bp)]);
1636 ++ if (!offset) {
1637 ++ DP(BNX2X_MSG_MCP, "No FC-NPIV in NVRAM\n");
1638 ++ goto out;
1639 ++ }
1640 + DP(BNX2X_MSG_MCP, "Offset of FC-NPIV in NVRAM: %08x\n", offset);
1641 +
1642 + /* Read the table contents from nvram */
1643 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
1644 +index 67e9633ea9c7..232191417b93 100644
1645 +--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
1646 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
1647 +@@ -2282,7 +2282,7 @@ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
1648 + struct mlx4_en_dev *mdev = en_priv->mdev;
1649 + u64 mac_u64 = mlx4_mac_to_u64(mac);
1650 +
1651 +- if (!is_valid_ether_addr(mac))
1652 ++ if (is_multicast_ether_addr(mac))
1653 + return -EINVAL;
1654 +
1655 + return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64);
1656 +diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
1657 +index 5be34118e0af..f67e7e5b13e1 100644
1658 +--- a/drivers/net/wireless/realtek/rtlwifi/regd.c
1659 ++++ b/drivers/net/wireless/realtek/rtlwifi/regd.c
1660 +@@ -345,9 +345,9 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select(
1661 + return &rtl_regdom_no_midband;
1662 + case COUNTRY_CODE_IC:
1663 + return &rtl_regdom_11;
1664 +- case COUNTRY_CODE_ETSI:
1665 + case COUNTRY_CODE_TELEC_NETGEAR:
1666 + return &rtl_regdom_60_64;
1667 ++ case COUNTRY_CODE_ETSI:
1668 + case COUNTRY_CODE_SPAIN:
1669 + case COUNTRY_CODE_FRANCE:
1670 + case COUNTRY_CODE_ISRAEL:
1671 +@@ -406,6 +406,8 @@ static u8 channel_plan_to_country_code(u8 channelplan)
1672 + return COUNTRY_CODE_WORLD_WIDE_13;
1673 + case 0x22:
1674 + return COUNTRY_CODE_IC;
1675 ++ case 0x25:
1676 ++ return COUNTRY_CODE_ETSI;
1677 + case 0x32:
1678 + return COUNTRY_CODE_TELEC_NETGEAR;
1679 + case 0x41:
1680 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
1681 +index 42774bc39786..254192b5dad1 100644
1682 +--- a/drivers/pci/quirks.c
1683 ++++ b/drivers/pci/quirks.c
1684 +@@ -3136,6 +3136,7 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
1685 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
1686 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
1687 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
1688 ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
1689 +
1690 + static void quirk_no_pm_reset(struct pci_dev *dev)
1691 + {
1692 +diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
1693 +index fb991ec76423..696116ebdf50 100644
1694 +--- a/drivers/regulator/tps65910-regulator.c
1695 ++++ b/drivers/regulator/tps65910-regulator.c
1696 +@@ -1111,6 +1111,12 @@ static int tps65910_probe(struct platform_device *pdev)
1697 + pmic->num_regulators = ARRAY_SIZE(tps65910_regs);
1698 + pmic->ext_sleep_control = tps65910_ext_sleep_control;
1699 + info = tps65910_regs;
1700 ++ /* Work around silicon erratum SWCZ010: output programmed
1701 ++ * voltage level can go higher than expected or crash
1702 ++ * Workaround: use no synchronization of DCDC clocks
1703 ++ */
1704 ++ tps65910_reg_clear_bits(pmic->mfd, TPS65910_DCDCCTRL,
1705 ++ DCDCCTRL_DCDCCKSYNC_MASK);
1706 + break;
1707 + case TPS65911:
1708 + pmic->get_ctrl_reg = &tps65911_get_ctrl_register;
1709 +diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
1710 +index 5d7fbe4e907e..581001989937 100644
1711 +--- a/drivers/s390/scsi/zfcp_dbf.c
1712 ++++ b/drivers/s390/scsi/zfcp_dbf.c
1713 +@@ -3,7 +3,7 @@
1714 + *
1715 + * Debug traces for zfcp.
1716 + *
1717 +- * Copyright IBM Corp. 2002, 2013
1718 ++ * Copyright IBM Corp. 2002, 2016
1719 + */
1720 +
1721 + #define KMSG_COMPONENT "zfcp"
1722 +@@ -65,7 +65,7 @@ void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area,
1723 + * @tag: tag indicating which kind of unsolicited status has been received
1724 + * @req: request for which a response was received
1725 + */
1726 +-void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
1727 ++void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
1728 + {
1729 + struct zfcp_dbf *dbf = req->adapter->dbf;
1730 + struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
1731 +@@ -85,6 +85,8 @@ void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
1732 + rec->u.res.req_issued = req->issued;
1733 + rec->u.res.prot_status = q_pref->prot_status;
1734 + rec->u.res.fsf_status = q_head->fsf_status;
1735 ++ rec->u.res.port_handle = q_head->port_handle;
1736 ++ rec->u.res.lun_handle = q_head->lun_handle;
1737 +
1738 + memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual,
1739 + FSF_PROT_STATUS_QUAL_SIZE);
1740 +@@ -97,7 +99,7 @@ void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
1741 + rec->pl_len, "fsf_res", req->req_id);
1742 + }
1743 +
1744 +- debug_event(dbf->hba, 1, rec, sizeof(*rec));
1745 ++ debug_event(dbf->hba, level, rec, sizeof(*rec));
1746 + spin_unlock_irqrestore(&dbf->hba_lock, flags);
1747 + }
1748 +
1749 +@@ -241,7 +243,8 @@ static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
1750 + if (sdev) {
1751 + rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status);
1752 + rec->lun = zfcp_scsi_dev_lun(sdev);
1753 +- }
1754 ++ } else
1755 ++ rec->lun = ZFCP_DBF_INVALID_LUN;
1756 + }
1757 +
1758 + /**
1759 +@@ -320,13 +323,48 @@ void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
1760 + spin_unlock_irqrestore(&dbf->rec_lock, flags);
1761 + }
1762 +
1763 ++/**
1764 ++ * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
1765 ++ * @tag: identifier for event
1766 ++ * @wka_port: well known address port
1767 ++ * @req_id: request ID to correlate with potential HBA trace record
1768 ++ */
1769 ++void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port,
1770 ++ u64 req_id)
1771 ++{
1772 ++ struct zfcp_dbf *dbf = wka_port->adapter->dbf;
1773 ++ struct zfcp_dbf_rec *rec = &dbf->rec_buf;
1774 ++ unsigned long flags;
1775 ++
1776 ++ spin_lock_irqsave(&dbf->rec_lock, flags);
1777 ++ memset(rec, 0, sizeof(*rec));
1778 ++
1779 ++ rec->id = ZFCP_DBF_REC_RUN;
1780 ++ memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
1781 ++ rec->port_status = wka_port->status;
1782 ++ rec->d_id = wka_port->d_id;
1783 ++ rec->lun = ZFCP_DBF_INVALID_LUN;
1784 ++
1785 ++ rec->u.run.fsf_req_id = req_id;
1786 ++ rec->u.run.rec_status = ~0;
1787 ++ rec->u.run.rec_step = ~0;
1788 ++ rec->u.run.rec_action = ~0;
1789 ++ rec->u.run.rec_count = ~0;
1790 ++
1791 ++ debug_event(dbf->rec, 1, rec, sizeof(*rec));
1792 ++ spin_unlock_irqrestore(&dbf->rec_lock, flags);
1793 ++}
1794 ++
1795 + static inline
1796 +-void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len,
1797 +- u64 req_id, u32 d_id)
1798 ++void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
1799 ++ char *paytag, struct scatterlist *sg, u8 id, u16 len,
1800 ++ u64 req_id, u32 d_id, u16 cap_len)
1801 + {
1802 + struct zfcp_dbf_san *rec = &dbf->san_buf;
1803 + u16 rec_len;
1804 + unsigned long flags;
1805 ++ struct zfcp_dbf_pay *payload = &dbf->pay_buf;
1806 ++ u16 pay_sum = 0;
1807 +
1808 + spin_lock_irqsave(&dbf->san_lock, flags);
1809 + memset(rec, 0, sizeof(*rec));
1810 +@@ -334,10 +372,41 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len,
1811 + rec->id = id;
1812 + rec->fsf_req_id = req_id;
1813 + rec->d_id = d_id;
1814 +- rec_len = min(len, (u16)ZFCP_DBF_SAN_MAX_PAYLOAD);
1815 +- memcpy(rec->payload, data, rec_len);
1816 + memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
1817 ++ rec->pl_len = len; /* full length even if we cap pay below */
1818 ++ if (!sg)
1819 ++ goto out;
1820 ++ rec_len = min_t(unsigned int, sg->length, ZFCP_DBF_SAN_MAX_PAYLOAD);
1821 ++ memcpy(rec->payload, sg_virt(sg), rec_len); /* part of 1st sg entry */
1822 ++ if (len <= rec_len)
1823 ++ goto out; /* skip pay record if full content in rec->payload */
1824 ++
1825 ++ /* if (len > rec_len):
1826 ++ * dump data up to cap_len ignoring small duplicate in rec->payload
1827 ++ */
1828 ++ spin_lock(&dbf->pay_lock);
1829 ++ memset(payload, 0, sizeof(*payload));
1830 ++ memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN);
1831 ++ payload->fsf_req_id = req_id;
1832 ++ payload->counter = 0;
1833 ++ for (; sg && pay_sum < cap_len; sg = sg_next(sg)) {
1834 ++ u16 pay_len, offset = 0;
1835 ++
1836 ++ while (offset < sg->length && pay_sum < cap_len) {
1837 ++ pay_len = min((u16)ZFCP_DBF_PAY_MAX_REC,
1838 ++ (u16)(sg->length - offset));
1839 ++ /* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */
1840 ++ memcpy(payload->data, sg_virt(sg) + offset, pay_len);
1841 ++ debug_event(dbf->pay, 1, payload,
1842 ++ zfcp_dbf_plen(pay_len));
1843 ++ payload->counter++;
1844 ++ offset += pay_len;
1845 ++ pay_sum += pay_len;
1846 ++ }
1847 ++ }
1848 ++ spin_unlock(&dbf->pay_lock);
1849 +
1850 ++out:
1851 + debug_event(dbf->san, 1, rec, sizeof(*rec));
1852 + spin_unlock_irqrestore(&dbf->san_lock, flags);
1853 + }
1854 +@@ -354,9 +423,62 @@ void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
1855 + struct zfcp_fsf_ct_els *ct_els = fsf->data;
1856 + u16 length;
1857 +
1858 +- length = (u16)(ct_els->req->length + FC_CT_HDR_LEN);
1859 +- zfcp_dbf_san(tag, dbf, sg_virt(ct_els->req), ZFCP_DBF_SAN_REQ, length,
1860 +- fsf->req_id, d_id);
1861 ++ length = (u16)zfcp_qdio_real_bytes(ct_els->req);
1862 ++ zfcp_dbf_san(tag, dbf, "san_req", ct_els->req, ZFCP_DBF_SAN_REQ,
1863 ++ length, fsf->req_id, d_id, length);
1864 ++}
1865 ++
1866 ++static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
1867 ++ struct zfcp_fsf_req *fsf,
1868 ++ u16 len)
1869 ++{
1870 ++ struct zfcp_fsf_ct_els *ct_els = fsf->data;
1871 ++ struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
1872 ++ struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
1873 ++ struct scatterlist *resp_entry = ct_els->resp;
1874 ++ struct fc_gpn_ft_resp *acc;
1875 ++ int max_entries, x, last = 0;
1876 ++
1877 ++ if (!(memcmp(tag, "fsscth2", 7) == 0
1878 ++ && ct_els->d_id == FC_FID_DIR_SERV
1879 ++ && reqh->ct_rev == FC_CT_REV
1880 ++ && reqh->ct_in_id[0] == 0
1881 ++ && reqh->ct_in_id[1] == 0
1882 ++ && reqh->ct_in_id[2] == 0
1883 ++ && reqh->ct_fs_type == FC_FST_DIR
1884 ++ && reqh->ct_fs_subtype == FC_NS_SUBTYPE
1885 ++ && reqh->ct_options == 0
1886 ++ && reqh->_ct_resvd1 == 0
1887 ++ && reqh->ct_cmd == FC_NS_GPN_FT
1888 ++ /* reqh->ct_mr_size can vary so do not match but read below */
1889 ++ && reqh->_ct_resvd2 == 0
1890 ++ && reqh->ct_reason == 0
1891 ++ && reqh->ct_explan == 0
1892 ++ && reqh->ct_vendor == 0
1893 ++ && reqn->fn_resvd == 0
1894 ++ && reqn->fn_domain_id_scope == 0
1895 ++ && reqn->fn_area_id_scope == 0
1896 ++ && reqn->fn_fc4_type == FC_TYPE_FCP))
1897 ++ return len; /* not GPN_FT response so do not cap */
1898 ++
1899 ++ acc = sg_virt(resp_entry);
1900 ++ max_entries = (reqh->ct_mr_size * 4 / sizeof(struct fc_gpn_ft_resp))
1901 ++ + 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
1902 ++ * to account for header as 1st pseudo "entry" */;
1903 ++
1904 ++ /* the basic CT_IU preamble is the same size as one entry in the GPN_FT
1905 ++ * response, allowing us to skip special handling for it - just skip it
1906 ++ */
1907 ++ for (x = 1; x < max_entries && !last; x++) {
1908 ++ if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
1909 ++ acc++;
1910 ++ else
1911 ++ acc = sg_virt(++resp_entry);
1912 ++
1913 ++ last = acc->fp_flags & FC_NS_FID_LAST;
1914 ++ }
1915 ++ len = min(len, (u16)(x * sizeof(struct fc_gpn_ft_resp)));
1916 ++ return len; /* cap after last entry */
1917 + }
1918 +
1919 + /**
1920 +@@ -370,9 +492,10 @@ void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
1921 + struct zfcp_fsf_ct_els *ct_els = fsf->data;
1922 + u16 length;
1923 +
1924 +- length = (u16)(ct_els->resp->length + FC_CT_HDR_LEN);
1925 +- zfcp_dbf_san(tag, dbf, sg_virt(ct_els->resp), ZFCP_DBF_SAN_RES, length,
1926 +- fsf->req_id, 0);
1927 ++ length = (u16)zfcp_qdio_real_bytes(ct_els->resp);
1928 ++ zfcp_dbf_san(tag, dbf, "san_res", ct_els->resp, ZFCP_DBF_SAN_RES,
1929 ++ length, fsf->req_id, ct_els->d_id,
1930 ++ zfcp_dbf_san_res_cap_len_if_gpn_ft(tag, fsf, length));
1931 + }
1932 +
1933 + /**
1934 +@@ -386,11 +509,13 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
1935 + struct fsf_status_read_buffer *srb =
1936 + (struct fsf_status_read_buffer *) fsf->data;
1937 + u16 length;
1938 ++ struct scatterlist sg;
1939 +
1940 + length = (u16)(srb->length -
1941 + offsetof(struct fsf_status_read_buffer, payload));
1942 +- zfcp_dbf_san(tag, dbf, srb->payload.data, ZFCP_DBF_SAN_ELS, length,
1943 +- fsf->req_id, ntoh24(srb->d_id));
1944 ++ sg_init_one(&sg, srb->payload.data, length);
1945 ++ zfcp_dbf_san(tag, dbf, "san_els", &sg, ZFCP_DBF_SAN_ELS, length,
1946 ++ fsf->req_id, ntoh24(srb->d_id), length);
1947 + }
1948 +
1949 + /**
1950 +@@ -399,7 +524,8 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
1951 + * @sc: pointer to struct scsi_cmnd
1952 + * @fsf: pointer to struct zfcp_fsf_req
1953 + */
1954 +-void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
1955 ++void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
1956 ++ struct zfcp_fsf_req *fsf)
1957 + {
1958 + struct zfcp_adapter *adapter =
1959 + (struct zfcp_adapter *) sc->device->host->hostdata[0];
1960 +@@ -442,7 +568,7 @@ void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
1961 + }
1962 + }
1963 +
1964 +- debug_event(dbf->scsi, 1, rec, sizeof(*rec));
1965 ++ debug_event(dbf->scsi, level, rec, sizeof(*rec));
1966 + spin_unlock_irqrestore(&dbf->scsi_lock, flags);
1967 + }
1968 +
1969 +diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
1970 +index 0be3d48681ae..36d07584271d 100644
1971 +--- a/drivers/s390/scsi/zfcp_dbf.h
1972 ++++ b/drivers/s390/scsi/zfcp_dbf.h
1973 +@@ -2,7 +2,7 @@
1974 + * zfcp device driver
1975 + * debug feature declarations
1976 + *
1977 +- * Copyright IBM Corp. 2008, 2010
1978 ++ * Copyright IBM Corp. 2008, 2015
1979 + */
1980 +
1981 + #ifndef ZFCP_DBF_H
1982 +@@ -17,6 +17,11 @@
1983 +
1984 + #define ZFCP_DBF_INVALID_LUN 0xFFFFFFFFFFFFFFFFull
1985 +
1986 ++enum zfcp_dbf_pseudo_erp_act_type {
1987 ++ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD = 0xff,
1988 ++ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL = 0xfe,
1989 ++};
1990 ++
1991 + /**
1992 + * struct zfcp_dbf_rec_trigger - trace record for triggered recovery action
1993 + * @ready: number of ready recovery actions
1994 +@@ -110,6 +115,7 @@ struct zfcp_dbf_san {
1995 + u32 d_id;
1996 + #define ZFCP_DBF_SAN_MAX_PAYLOAD (FC_CT_HDR_LEN + 32)
1997 + char payload[ZFCP_DBF_SAN_MAX_PAYLOAD];
1998 ++ u16 pl_len;
1999 + } __packed;
2000 +
2001 + /**
2002 +@@ -126,6 +132,8 @@ struct zfcp_dbf_hba_res {
2003 + u8 prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE];
2004 + u32 fsf_status;
2005 + u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
2006 ++ u32 port_handle;
2007 ++ u32 lun_handle;
2008 + } __packed;
2009 +
2010 + /**
2011 +@@ -279,7 +287,7 @@ static inline
2012 + void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
2013 + {
2014 + if (debug_level_enabled(req->adapter->dbf->hba, level))
2015 +- zfcp_dbf_hba_fsf_res(tag, req);
2016 ++ zfcp_dbf_hba_fsf_res(tag, level, req);
2017 + }
2018 +
2019 + /**
2020 +@@ -318,7 +326,7 @@ void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd,
2021 + scmd->device->host->hostdata[0];
2022 +
2023 + if (debug_level_enabled(adapter->dbf->scsi, level))
2024 +- zfcp_dbf_scsi(tag, scmd, req);
2025 ++ zfcp_dbf_scsi(tag, level, scmd, req);
2026 + }
2027 +
2028 + /**
2029 +diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
2030 +index 3fb410977014..a59d678125bd 100644
2031 +--- a/drivers/s390/scsi/zfcp_erp.c
2032 ++++ b/drivers/s390/scsi/zfcp_erp.c
2033 +@@ -3,7 +3,7 @@
2034 + *
2035 + * Error Recovery Procedures (ERP).
2036 + *
2037 +- * Copyright IBM Corp. 2002, 2010
2038 ++ * Copyright IBM Corp. 2002, 2015
2039 + */
2040 +
2041 + #define KMSG_COMPONENT "zfcp"
2042 +@@ -1217,8 +1217,14 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
2043 + break;
2044 +
2045 + case ZFCP_ERP_ACTION_REOPEN_PORT:
2046 +- if (result == ZFCP_ERP_SUCCEEDED)
2047 +- zfcp_scsi_schedule_rport_register(port);
2048 ++ /* This switch case might also happen after a forced reopen
2049 ++ * was successfully done and thus overwritten with a new
2050 ++ * non-forced reopen at `ersfs_2'. In this case, we must not
2051 ++ * do the clean-up of the non-forced version.
2052 ++ */
2053 ++ if (act->step != ZFCP_ERP_STEP_UNINITIALIZED)
2054 ++ if (result == ZFCP_ERP_SUCCEEDED)
2055 ++ zfcp_scsi_schedule_rport_register(port);
2056 + /* fall through */
2057 + case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
2058 + put_device(&port->dev);
2059 +diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
2060 +index 5b500652572b..c8fed9fa1cca 100644
2061 +--- a/drivers/s390/scsi/zfcp_ext.h
2062 ++++ b/drivers/s390/scsi/zfcp_ext.h
2063 +@@ -3,7 +3,7 @@
2064 + *
2065 + * External function declarations.
2066 + *
2067 +- * Copyright IBM Corp. 2002, 2010
2068 ++ * Copyright IBM Corp. 2002, 2015
2069 + */
2070 +
2071 + #ifndef ZFCP_EXT_H
2072 +@@ -35,8 +35,9 @@ extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
2073 + extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
2074 + struct zfcp_port *, struct scsi_device *, u8, u8);
2075 + extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
2076 ++extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64);
2077 + extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
2078 +-extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *);
2079 ++extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *);
2080 + extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
2081 + extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
2082 + extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
2083 +@@ -44,7 +45,8 @@ extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
2084 + extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
2085 + extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
2086 + extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
2087 +-extern void zfcp_dbf_scsi(char *, struct scsi_cmnd *, struct zfcp_fsf_req *);
2088 ++extern void zfcp_dbf_scsi(char *, int, struct scsi_cmnd *,
2089 ++ struct zfcp_fsf_req *);
2090 +
2091 + /* zfcp_erp.c */
2092 + extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
2093 +diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
2094 +index 522a633c866a..75f820ca17b7 100644
2095 +--- a/drivers/s390/scsi/zfcp_fsf.c
2096 ++++ b/drivers/s390/scsi/zfcp_fsf.c
2097 +@@ -3,7 +3,7 @@
2098 + *
2099 + * Implementation of FSF commands.
2100 + *
2101 +- * Copyright IBM Corp. 2002, 2013
2102 ++ * Copyright IBM Corp. 2002, 2015
2103 + */
2104 +
2105 + #define KMSG_COMPONENT "zfcp"
2106 +@@ -508,7 +508,10 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
2107 + fc_host_port_type(shost) = FC_PORTTYPE_PTP;
2108 + break;
2109 + case FSF_TOPO_FABRIC:
2110 +- fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
2111 ++ if (bottom->connection_features & FSF_FEATURE_NPIV_MODE)
2112 ++ fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
2113 ++ else
2114 ++ fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
2115 + break;
2116 + case FSF_TOPO_AL:
2117 + fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
2118 +@@ -613,7 +616,6 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
2119 +
2120 + if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
2121 + fc_host_permanent_port_name(shost) = bottom->wwpn;
2122 +- fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
2123 + } else
2124 + fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
2125 + fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
2126 +@@ -982,8 +984,12 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
2127 + if (zfcp_adapter_multi_buffer_active(adapter)) {
2128 + if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
2129 + return -EIO;
2130 ++ qtcb->bottom.support.req_buf_length =
2131 ++ zfcp_qdio_real_bytes(sg_req);
2132 + if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
2133 + return -EIO;
2134 ++ qtcb->bottom.support.resp_buf_length =
2135 ++ zfcp_qdio_real_bytes(sg_resp);
2136 +
2137 + zfcp_qdio_set_data_div(qdio, &req->qdio_req,
2138 + zfcp_qdio_sbale_count(sg_req));
2139 +@@ -1073,6 +1079,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
2140 +
2141 + req->handler = zfcp_fsf_send_ct_handler;
2142 + req->qtcb->header.port_handle = wka_port->handle;
2143 ++ ct->d_id = wka_port->d_id;
2144 + req->data = ct;
2145 +
2146 + zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
2147 +@@ -1169,6 +1176,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
2148 +
2149 + hton24(req->qtcb->bottom.support.d_id, d_id);
2150 + req->handler = zfcp_fsf_send_els_handler;
2151 ++ els->d_id = d_id;
2152 + req->data = els;
2153 +
2154 + zfcp_dbf_san_req("fssels1", req, d_id);
2155 +@@ -1575,7 +1583,7 @@ out:
2156 + int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
2157 + {
2158 + struct zfcp_qdio *qdio = wka_port->adapter->qdio;
2159 +- struct zfcp_fsf_req *req;
2160 ++ struct zfcp_fsf_req *req = NULL;
2161 + int retval = -EIO;
2162 +
2163 + spin_lock_irq(&qdio->req_q_lock);
2164 +@@ -1604,6 +1612,8 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
2165 + zfcp_fsf_req_free(req);
2166 + out:
2167 + spin_unlock_irq(&qdio->req_q_lock);
2168 ++ if (req && !IS_ERR(req))
2169 ++ zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
2170 + return retval;
2171 + }
2172 +
2173 +@@ -1628,7 +1638,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
2174 + int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
2175 + {
2176 + struct zfcp_qdio *qdio = wka_port->adapter->qdio;
2177 +- struct zfcp_fsf_req *req;
2178 ++ struct zfcp_fsf_req *req = NULL;
2179 + int retval = -EIO;
2180 +
2181 + spin_lock_irq(&qdio->req_q_lock);
2182 +@@ -1657,6 +1667,8 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
2183 + zfcp_fsf_req_free(req);
2184 + out:
2185 + spin_unlock_irq(&qdio->req_q_lock);
2186 ++ if (req && !IS_ERR(req))
2187 ++ zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
2188 + return retval;
2189 + }
2190 +
2191 +diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
2192 +index 57ae3ae1046d..be1c04b334c5 100644
2193 +--- a/drivers/s390/scsi/zfcp_fsf.h
2194 ++++ b/drivers/s390/scsi/zfcp_fsf.h
2195 +@@ -3,7 +3,7 @@
2196 + *
2197 + * Interface to the FSF support functions.
2198 + *
2199 +- * Copyright IBM Corp. 2002, 2010
2200 ++ * Copyright IBM Corp. 2002, 2015
2201 + */
2202 +
2203 + #ifndef FSF_H
2204 +@@ -436,6 +436,7 @@ struct zfcp_blk_drv_data {
2205 + * @handler_data: data passed to handler function
2206 + * @port: Optional pointer to port for zfcp internal ELS (only test link ADISC)
2207 + * @status: used to pass error status to calling function
2208 ++ * @d_id: Destination ID of either open WKA port for CT or of D_ID for ELS
2209 + */
2210 + struct zfcp_fsf_ct_els {
2211 + struct scatterlist *req;
2212 +@@ -444,6 +445,7 @@ struct zfcp_fsf_ct_els {
2213 + void *handler_data;
2214 + struct zfcp_port *port;
2215 + int status;
2216 ++ u32 d_id;
2217 + };
2218 +
2219 + #endif /* FSF_H */
2220 +diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
2221 +index b3c6ff49103b..9069f98a1817 100644
2222 +--- a/drivers/s390/scsi/zfcp_scsi.c
2223 ++++ b/drivers/s390/scsi/zfcp_scsi.c
2224 +@@ -3,7 +3,7 @@
2225 + *
2226 + * Interface to Linux SCSI midlayer.
2227 + *
2228 +- * Copyright IBM Corp. 2002, 2013
2229 ++ * Copyright IBM Corp. 2002, 2015
2230 + */
2231 +
2232 + #define KMSG_COMPONENT "zfcp"
2233 +@@ -556,6 +556,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
2234 + ids.port_id = port->d_id;
2235 + ids.roles = FC_RPORT_ROLE_FCP_TARGET;
2236 +
2237 ++ zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL,
2238 ++ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
2239 ++ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
2240 + rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
2241 + if (!rport) {
2242 + dev_err(&port->adapter->ccw_device->dev,
2243 +@@ -577,6 +580,9 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port)
2244 + struct fc_rport *rport = port->rport;
2245 +
2246 + if (rport) {
2247 ++ zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL,
2248 ++ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
2249 ++ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
2250 + fc_remote_port_delete(rport);
2251 + port->rport = NULL;
2252 + }
2253 +diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
2254 +index a3860367b568..e9ce74afd13f 100644
2255 +--- a/drivers/scsi/hpsa.c
2256 ++++ b/drivers/scsi/hpsa.c
2257 +@@ -3930,6 +3930,70 @@ static int hpsa_set_local_logical_count(struct ctlr_info *h,
2258 + return rc;
2259 + }
2260 +
2261 ++static bool hpsa_is_disk_spare(struct ctlr_info *h, u8 *lunaddrbytes)
2262 ++{
2263 ++ struct bmic_identify_physical_device *id_phys;
2264 ++ bool is_spare = false;
2265 ++ int rc;
2266 ++
2267 ++ id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
2268 ++ if (!id_phys)
2269 ++ return false;
2270 ++
2271 ++ rc = hpsa_bmic_id_physical_device(h,
2272 ++ lunaddrbytes,
2273 ++ GET_BMIC_DRIVE_NUMBER(lunaddrbytes),
2274 ++ id_phys, sizeof(*id_phys));
2275 ++ if (rc == 0)
2276 ++ is_spare = (id_phys->more_flags >> 6) & 0x01;
2277 ++
2278 ++ kfree(id_phys);
2279 ++ return is_spare;
2280 ++}
2281 ++
2282 ++#define RPL_DEV_FLAG_NON_DISK 0x1
2283 ++#define RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED 0x2
2284 ++#define RPL_DEV_FLAG_UNCONFIG_DISK 0x4
2285 ++
2286 ++#define BMIC_DEVICE_TYPE_ENCLOSURE 6
2287 ++
2288 ++static bool hpsa_skip_device(struct ctlr_info *h, u8 *lunaddrbytes,
2289 ++ struct ext_report_lun_entry *rle)
2290 ++{
2291 ++ u8 device_flags;
2292 ++ u8 device_type;
2293 ++
2294 ++ if (!MASKED_DEVICE(lunaddrbytes))
2295 ++ return false;
2296 ++
2297 ++ device_flags = rle->device_flags;
2298 ++ device_type = rle->device_type;
2299 ++
2300 ++ if (device_flags & RPL_DEV_FLAG_NON_DISK) {
2301 ++ if (device_type == BMIC_DEVICE_TYPE_ENCLOSURE)
2302 ++ return false;
2303 ++ return true;
2304 ++ }
2305 ++
2306 ++ if (!(device_flags & RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED))
2307 ++ return false;
2308 ++
2309 ++ if (device_flags & RPL_DEV_FLAG_UNCONFIG_DISK)
2310 ++ return false;
2311 ++
2312 ++ /*
2313 ++ * Spares may be spun down, we do not want to
2314 ++ * do an Inquiry to a RAID set spare drive as
2315 ++ * that would have them spun up, that is a
2316 ++ * performance hit because I/O to the RAID device
2317 ++ * stops while the spin up occurs which can take
2318 ++ * over 50 seconds.
2319 ++ */
2320 ++ if (hpsa_is_disk_spare(h, lunaddrbytes))
2321 ++ return true;
2322 ++
2323 ++ return false;
2324 ++}
2325 +
2326 + static void hpsa_update_scsi_devices(struct ctlr_info *h)
2327 + {
2328 +@@ -4023,6 +4087,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
2329 + u8 *lunaddrbytes, is_OBDR = 0;
2330 + int rc = 0;
2331 + int phys_dev_index = i - (raid_ctlr_position == 0);
2332 ++ bool skip_device = false;
2333 +
2334 + physical_device = i < nphysicals + (raid_ctlr_position == 0);
2335 +
2336 +@@ -4030,10 +4095,15 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
2337 + lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
2338 + i, nphysicals, nlogicals, physdev_list, logdev_list);
2339 +
2340 +- /* skip masked non-disk devices */
2341 +- if (MASKED_DEVICE(lunaddrbytes) && physical_device &&
2342 +- (physdev_list->LUN[phys_dev_index].device_flags & 0x01))
2343 +- continue;
2344 ++ /*
2345 ++ * Skip over some devices such as a spare.
2346 ++ */
2347 ++ if (!tmpdevice->external && physical_device) {
2348 ++ skip_device = hpsa_skip_device(h, lunaddrbytes,
2349 ++ &physdev_list->LUN[phys_dev_index]);
2350 ++ if (skip_device)
2351 ++ continue;
2352 ++ }
2353 +
2354 + /* Get device type, vendor, model, device id */
2355 + rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
2356 +diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
2357 +index f0cfaacbfabd..692445bcca6f 100644
2358 +--- a/drivers/scsi/scsi_scan.c
2359 ++++ b/drivers/scsi/scsi_scan.c
2360 +@@ -1459,12 +1459,12 @@ retry:
2361 + out_err:
2362 + kfree(lun_data);
2363 + out:
2364 +- scsi_device_put(sdev);
2365 + if (scsi_device_created(sdev))
2366 + /*
2367 + * the sdev we used didn't appear in the report luns scan
2368 + */
2369 + __scsi_remove_device(sdev);
2370 ++ scsi_device_put(sdev);
2371 + return ret;
2372 + }
2373 +
2374 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
2375 +index 0d7c6e86f149..6ee50742f6a5 100644
2376 +--- a/drivers/scsi/sd.c
2377 ++++ b/drivers/scsi/sd.c
2378 +@@ -2879,10 +2879,10 @@ static int sd_revalidate_disk(struct gendisk *disk)
2379 + if (sdkp->opt_xfer_blocks &&
2380 + sdkp->opt_xfer_blocks <= dev_max &&
2381 + sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
2382 +- sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE)
2383 +- rw_max = q->limits.io_opt =
2384 +- sdkp->opt_xfer_blocks * sdp->sector_size;
2385 +- else
2386 ++ logical_to_bytes(sdp, sdkp->opt_xfer_blocks) >= PAGE_CACHE_SIZE) {
2387 ++ q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
2388 ++ rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
2389 ++ } else
2390 + rw_max = BLK_DEF_MAX_SECTORS;
2391 +
2392 + /* Combine with controller limits */
2393 +diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
2394 +index 654630bb7d0e..765a6f1ac1b7 100644
2395 +--- a/drivers/scsi/sd.h
2396 ++++ b/drivers/scsi/sd.h
2397 +@@ -151,6 +151,11 @@ static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blo
2398 + return blocks << (ilog2(sdev->sector_size) - 9);
2399 + }
2400 +
2401 ++static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t blocks)
2402 ++{
2403 ++ return blocks * sdev->sector_size;
2404 ++}
2405 ++
2406 + /*
2407 + * A DIF-capable target device can be formatted with different
2408 + * protection schemes. Currently 0 through 3 are defined:
2409 +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
2410 +index 7bc3778a1ac9..2a67af4e2e13 100644
2411 +--- a/drivers/target/target_core_transport.c
2412 ++++ b/drivers/target/target_core_transport.c
2413 +@@ -1680,6 +1680,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
2414 + case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
2415 + case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
2416 + case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
2417 ++ case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
2418 + break;
2419 + case TCM_OUT_OF_RESOURCES:
2420 + sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2421 +@@ -2509,8 +2510,10 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
2422 + * fabric acknowledgement that requires two target_put_sess_cmd()
2423 + * invocations before se_cmd descriptor release.
2424 + */
2425 +- if (ack_kref)
2426 ++ if (ack_kref) {
2427 + kref_get(&se_cmd->cmd_kref);
2428 ++ se_cmd->se_cmd_flags |= SCF_ACK_KREF;
2429 ++ }
2430 +
2431 + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2432 + if (se_sess->sess_tearing_down) {
2433 +@@ -2833,6 +2836,12 @@ static const struct sense_info sense_info_table[] = {
2434 + .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
2435 + .add_sector_info = true,
2436 + },
2437 ++ [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
2438 ++ .key = COPY_ABORTED,
2439 ++ .asc = 0x0d,
2440 ++ .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */
2441 ++
2442 ++ },
2443 + [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
2444 + /*
2445 + * Returning ILLEGAL REQUEST would cause immediate IO errors on
2446 +diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
2447 +index 47fe94ee10b8..153a6f255b6d 100644
2448 +--- a/drivers/target/target_core_xcopy.c
2449 ++++ b/drivers/target/target_core_xcopy.c
2450 +@@ -104,7 +104,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
2451 + }
2452 + mutex_unlock(&g_device_mutex);
2453 +
2454 +- pr_err("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
2455 ++ pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
2456 + return -EINVAL;
2457 + }
2458 +
2459 +@@ -185,7 +185,7 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op
2460 +
2461 + static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
2462 + struct xcopy_op *xop, unsigned char *p,
2463 +- unsigned short tdll)
2464 ++ unsigned short tdll, sense_reason_t *sense_ret)
2465 + {
2466 + struct se_device *local_dev = se_cmd->se_dev;
2467 + unsigned char *desc = p;
2468 +@@ -193,6 +193,8 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
2469 + unsigned short start = 0;
2470 + bool src = true;
2471 +
2472 ++ *sense_ret = TCM_INVALID_PARAMETER_LIST;
2473 ++
2474 + if (offset != 0) {
2475 + pr_err("XCOPY target descriptor list length is not"
2476 + " multiple of %d\n", XCOPY_TARGET_DESC_LEN);
2477 +@@ -243,9 +245,16 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
2478 + rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true);
2479 + else
2480 + rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false);
2481 +-
2482 +- if (rc < 0)
2483 ++ /*
2484 ++ * If a matching IEEE NAA 0x83 descriptor for the requested device
2485 ++ * is not located on this node, return COPY_ABORTED with ASQ/ASQC
2486 ++ * 0x0d/0x02 - COPY_TARGET_DEVICE_NOT_REACHABLE to request the
2487 ++ * initiator to fall back to normal copy method.
2488 ++ */
2489 ++ if (rc < 0) {
2490 ++ *sense_ret = TCM_COPY_TARGET_DEVICE_NOT_REACHABLE;
2491 + goto out;
2492 ++ }
2493 +
2494 + pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n",
2495 + xop->src_dev, &xop->src_tid_wwn[0]);
2496 +@@ -653,6 +662,7 @@ static int target_xcopy_read_source(
2497 + rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
2498 + remote_port, true);
2499 + if (rc < 0) {
2500 ++ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
2501 + transport_generic_free_cmd(se_cmd, 0);
2502 + return rc;
2503 + }
2504 +@@ -664,6 +674,7 @@ static int target_xcopy_read_source(
2505 +
2506 + rc = target_xcopy_issue_pt_cmd(xpt_cmd);
2507 + if (rc < 0) {
2508 ++ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
2509 + transport_generic_free_cmd(se_cmd, 0);
2510 + return rc;
2511 + }
2512 +@@ -714,6 +725,7 @@ static int target_xcopy_write_destination(
2513 + remote_port, false);
2514 + if (rc < 0) {
2515 + struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd;
2516 ++ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
2517 + /*
2518 + * If the failure happened before the t_mem_list hand-off in
2519 + * target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that
2520 +@@ -729,6 +741,7 @@ static int target_xcopy_write_destination(
2521 +
2522 + rc = target_xcopy_issue_pt_cmd(xpt_cmd);
2523 + if (rc < 0) {
2524 ++ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
2525 + se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
2526 + transport_generic_free_cmd(se_cmd, 0);
2527 + return rc;
2528 +@@ -815,9 +828,14 @@ static void target_xcopy_do_work(struct work_struct *work)
2529 + out:
2530 + xcopy_pt_undepend_remotedev(xop);
2531 + kfree(xop);
2532 +-
2533 +- pr_warn("target_xcopy_do_work: Setting X-COPY CHECK_CONDITION -> sending response\n");
2534 +- ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
2535 ++ /*
2536 ++ * Don't override an error scsi status if it has already been set
2537 ++ */
2538 ++ if (ec_cmd->scsi_status == SAM_STAT_GOOD) {
2539 ++ pr_warn_ratelimited("target_xcopy_do_work: rc: %d, Setting X-COPY"
2540 ++ " CHECK_CONDITION -> sending response\n", rc);
2541 ++ ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
2542 ++ }
2543 + target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION);
2544 + }
2545 +
2546 +@@ -875,7 +893,7 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
2547 + " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
2548 + tdll, sdll, inline_dl);
2549 +
2550 +- rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll);
2551 ++ rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret);
2552 + if (rc <= 0)
2553 + goto out;
2554 +
2555 +diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
2556 +index 95d293b7445a..dc2fcda54d53 100644
2557 +--- a/drivers/video/fbdev/efifb.c
2558 ++++ b/drivers/video/fbdev/efifb.c
2559 +@@ -52,9 +52,9 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
2560 + return 1;
2561 +
2562 + if (regno < 16) {
2563 +- red >>= 8;
2564 +- green >>= 8;
2565 +- blue >>= 8;
2566 ++ red >>= 16 - info->var.red.length;
2567 ++ green >>= 16 - info->var.green.length;
2568 ++ blue >>= 16 - info->var.blue.length;
2569 + ((u32 *)(info->pseudo_palette))[regno] =
2570 + (red << info->var.red.offset) |
2571 + (green << info->var.green.offset) |
2572 +diff --git a/fs/ceph/file.c b/fs/ceph/file.c
2573 +index 3c68e6aee2f0..c8222bfe1e56 100644
2574 +--- a/fs/ceph/file.c
2575 ++++ b/fs/ceph/file.c
2576 +@@ -929,7 +929,8 @@ again:
2577 + statret = __ceph_do_getattr(inode, page,
2578 + CEPH_STAT_CAP_INLINE_DATA, !!page);
2579 + if (statret < 0) {
2580 +- __free_page(page);
2581 ++ if (page)
2582 ++ __free_page(page);
2583 + if (statret == -ENODATA) {
2584 + BUG_ON(retry_op != READ_INLINE);
2585 + goto again;
2586 +diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
2587 +index 50b268483302..0a3544fb50f9 100644
2588 +--- a/fs/cifs/cifs_debug.c
2589 ++++ b/fs/cifs/cifs_debug.c
2590 +@@ -152,6 +152,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
2591 + list_for_each(tmp1, &cifs_tcp_ses_list) {
2592 + server = list_entry(tmp1, struct TCP_Server_Info,
2593 + tcp_ses_list);
2594 ++ seq_printf(m, "\nNumber of credits: %d", server->credits);
2595 + i++;
2596 + list_for_each(tmp2, &server->smb_ses_list) {
2597 + ses = list_entry(tmp2, struct cifs_ses,
2598 +@@ -255,7 +256,6 @@ static const struct file_operations cifs_debug_data_proc_fops = {
2599 + static ssize_t cifs_stats_proc_write(struct file *file,
2600 + const char __user *buffer, size_t count, loff_t *ppos)
2601 + {
2602 +- char c;
2603 + bool bv;
2604 + int rc;
2605 + struct list_head *tmp1, *tmp2, *tmp3;
2606 +@@ -263,11 +263,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
2607 + struct cifs_ses *ses;
2608 + struct cifs_tcon *tcon;
2609 +
2610 +- rc = get_user(c, buffer);
2611 +- if (rc)
2612 +- return rc;
2613 +-
2614 +- if (strtobool(&c, &bv) == 0) {
2615 ++ rc = kstrtobool_from_user(buffer, count, &bv);
2616 ++ if (rc == 0) {
2617 + #ifdef CONFIG_CIFS_STATS2
2618 + atomic_set(&totBufAllocCount, 0);
2619 + atomic_set(&totSmBufAllocCount, 0);
2620 +@@ -290,6 +287,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
2621 + }
2622 + }
2623 + spin_unlock(&cifs_tcp_ses_lock);
2624 ++ } else {
2625 ++ return rc;
2626 + }
2627 +
2628 + return count;
2629 +@@ -433,17 +432,17 @@ static int cifsFYI_proc_open(struct inode *inode, struct file *file)
2630 + static ssize_t cifsFYI_proc_write(struct file *file, const char __user *buffer,
2631 + size_t count, loff_t *ppos)
2632 + {
2633 +- char c;
2634 ++ char c[2] = { '\0' };
2635 + bool bv;
2636 + int rc;
2637 +
2638 +- rc = get_user(c, buffer);
2639 ++ rc = get_user(c[0], buffer);
2640 + if (rc)
2641 + return rc;
2642 +- if (strtobool(&c, &bv) == 0)
2643 ++ if (strtobool(c, &bv) == 0)
2644 + cifsFYI = bv;
2645 +- else if ((c > '1') && (c <= '9'))
2646 +- cifsFYI = (int) (c - '0'); /* see cifs_debug.h for meanings */
2647 ++ else if ((c[0] > '1') && (c[0] <= '9'))
2648 ++ cifsFYI = (int) (c[0] - '0'); /* see cifs_debug.h for meanings */
2649 +
2650 + return count;
2651 + }
2652 +@@ -471,20 +470,12 @@ static int cifs_linux_ext_proc_open(struct inode *inode, struct file *file)
2653 + static ssize_t cifs_linux_ext_proc_write(struct file *file,
2654 + const char __user *buffer, size_t count, loff_t *ppos)
2655 + {
2656 +- char c;
2657 +- bool bv;
2658 + int rc;
2659 +
2660 +- rc = get_user(c, buffer);
2661 ++ rc = kstrtobool_from_user(buffer, count, &linuxExtEnabled);
2662 + if (rc)
2663 + return rc;
2664 +
2665 +- rc = strtobool(&c, &bv);
2666 +- if (rc)
2667 +- return rc;
2668 +-
2669 +- linuxExtEnabled = bv;
2670 +-
2671 + return count;
2672 + }
2673 +
2674 +@@ -511,20 +502,12 @@ static int cifs_lookup_cache_proc_open(struct inode *inode, struct file *file)
2675 + static ssize_t cifs_lookup_cache_proc_write(struct file *file,
2676 + const char __user *buffer, size_t count, loff_t *ppos)
2677 + {
2678 +- char c;
2679 +- bool bv;
2680 + int rc;
2681 +
2682 +- rc = get_user(c, buffer);
2683 ++ rc = kstrtobool_from_user(buffer, count, &lookupCacheEnabled);
2684 + if (rc)
2685 + return rc;
2686 +
2687 +- rc = strtobool(&c, &bv);
2688 +- if (rc)
2689 +- return rc;
2690 +-
2691 +- lookupCacheEnabled = bv;
2692 +-
2693 + return count;
2694 + }
2695 +
2696 +@@ -551,20 +534,12 @@ static int traceSMB_proc_open(struct inode *inode, struct file *file)
2697 + static ssize_t traceSMB_proc_write(struct file *file, const char __user *buffer,
2698 + size_t count, loff_t *ppos)
2699 + {
2700 +- char c;
2701 +- bool bv;
2702 + int rc;
2703 +
2704 +- rc = get_user(c, buffer);
2705 ++ rc = kstrtobool_from_user(buffer, count, &traceSMB);
2706 + if (rc)
2707 + return rc;
2708 +
2709 +- rc = strtobool(&c, &bv);
2710 +- if (rc)
2711 +- return rc;
2712 +-
2713 +- traceSMB = bv;
2714 +-
2715 + return count;
2716 + }
2717 +
2718 +@@ -622,7 +597,6 @@ static ssize_t cifs_security_flags_proc_write(struct file *file,
2719 + int rc;
2720 + unsigned int flags;
2721 + char flags_string[12];
2722 +- char c;
2723 + bool bv;
2724 +
2725 + if ((count < 1) || (count > 11))
2726 +@@ -635,11 +609,10 @@ static ssize_t cifs_security_flags_proc_write(struct file *file,
2727 +
2728 + if (count < 3) {
2729 + /* single char or single char followed by null */
2730 +- c = flags_string[0];
2731 +- if (strtobool(&c, &bv) == 0) {
2732 ++ if (strtobool(flags_string, &bv) == 0) {
2733 + global_secflags = bv ? CIFSSEC_MAX : CIFSSEC_DEF;
2734 + return count;
2735 +- } else if (!isdigit(c)) {
2736 ++ } else if (!isdigit(flags_string[0])) {
2737 + cifs_dbg(VFS, "Invalid SecurityFlags: %s\n",
2738 + flags_string);
2739 + return -EINVAL;
2740 +diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
2741 +index 66cf0f9fff89..c611ca2339d7 100644
2742 +--- a/fs/cifs/cifs_debug.h
2743 ++++ b/fs/cifs/cifs_debug.h
2744 +@@ -25,7 +25,7 @@
2745 + void cifs_dump_mem(char *label, void *data, int length);
2746 + void cifs_dump_detail(void *);
2747 + void cifs_dump_mids(struct TCP_Server_Info *);
2748 +-extern int traceSMB; /* flag which enables the function below */
2749 ++extern bool traceSMB; /* flag which enables the function below */
2750 + void dump_smb(void *, int);
2751 + #define CIFS_INFO 0x01
2752 + #define CIFS_RC 0x02
2753 +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
2754 +index 450578097fb7..4f4fc9ff3636 100644
2755 +--- a/fs/cifs/cifsfs.c
2756 ++++ b/fs/cifs/cifsfs.c
2757 +@@ -54,10 +54,10 @@
2758 + #endif
2759 +
2760 + int cifsFYI = 0;
2761 +-int traceSMB = 0;
2762 ++bool traceSMB;
2763 + bool enable_oplocks = true;
2764 +-unsigned int linuxExtEnabled = 1;
2765 +-unsigned int lookupCacheEnabled = 1;
2766 ++bool linuxExtEnabled = true;
2767 ++bool lookupCacheEnabled = true;
2768 + unsigned int global_secflags = CIFSSEC_DEF;
2769 + /* unsigned int ntlmv2_support = 0; */
2770 + unsigned int sign_CIFS_PDUs = 1;
2771 +@@ -268,7 +268,7 @@ cifs_alloc_inode(struct super_block *sb)
2772 + cifs_inode->createtime = 0;
2773 + cifs_inode->epoch = 0;
2774 + #ifdef CONFIG_CIFS_SMB2
2775 +- get_random_bytes(cifs_inode->lease_key, SMB2_LEASE_KEY_SIZE);
2776 ++ generate_random_uuid(cifs_inode->lease_key);
2777 + #endif
2778 + /*
2779 + * Can not set i_flags here - they get immediately overwritten to zero
2780 +@@ -1210,7 +1210,6 @@ init_cifs(void)
2781 + GlobalTotalActiveXid = 0;
2782 + GlobalMaxActiveXid = 0;
2783 + spin_lock_init(&cifs_tcp_ses_lock);
2784 +- spin_lock_init(&cifs_file_list_lock);
2785 + spin_lock_init(&GlobalMid_Lock);
2786 +
2787 + if (cifs_max_pending < 2) {
2788 +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
2789 +index 2b510c537a0d..c669a1471395 100644
2790 +--- a/fs/cifs/cifsglob.h
2791 ++++ b/fs/cifs/cifsglob.h
2792 +@@ -827,6 +827,7 @@ struct cifs_tcon {
2793 + struct list_head tcon_list;
2794 + int tc_count;
2795 + struct list_head openFileList;
2796 ++ spinlock_t open_file_lock; /* protects list above */
2797 + struct cifs_ses *ses; /* pointer to session associated with */
2798 + char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */
2799 + char *nativeFileSystem;
2800 +@@ -883,7 +884,7 @@ struct cifs_tcon {
2801 + #endif /* CONFIG_CIFS_STATS2 */
2802 + __u64 bytes_read;
2803 + __u64 bytes_written;
2804 +- spinlock_t stat_lock;
2805 ++ spinlock_t stat_lock; /* protects the two fields above */
2806 + #endif /* CONFIG_CIFS_STATS */
2807 + FILE_SYSTEM_DEVICE_INFO fsDevInfo;
2808 + FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo; /* ok if fs name truncated */
2809 +@@ -1034,8 +1035,10 @@ struct cifs_fid_locks {
2810 + };
2811 +
2812 + struct cifsFileInfo {
2813 ++ /* following two lists are protected by tcon->open_file_lock */
2814 + struct list_head tlist; /* pointer to next fid owned by tcon */
2815 + struct list_head flist; /* next fid (file instance) for this inode */
2816 ++ /* lock list below protected by cifsi->lock_sem */
2817 + struct cifs_fid_locks *llist; /* brlocks held by this fid */
2818 + kuid_t uid; /* allows finding which FileInfo structure */
2819 + __u32 pid; /* process id who opened file */
2820 +@@ -1043,11 +1046,12 @@ struct cifsFileInfo {
2821 + /* BB add lock scope info here if needed */ ;
2822 + /* lock scope id (0 if none) */
2823 + struct dentry *dentry;
2824 +- unsigned int f_flags;
2825 + struct tcon_link *tlink;
2826 ++ unsigned int f_flags;
2827 + bool invalidHandle:1; /* file closed via session abend */
2828 + bool oplock_break_cancelled:1;
2829 +- int count; /* refcount protected by cifs_file_list_lock */
2830 ++ int count;
2831 ++ spinlock_t file_info_lock; /* protects four flag/count fields above */
2832 + struct mutex fh_mutex; /* prevents reopen race after dead ses*/
2833 + struct cifs_search_info srch_inf;
2834 + struct work_struct oplock_break; /* work for oplock breaks */
2835 +@@ -1114,7 +1118,7 @@ struct cifs_writedata {
2836 +
2837 + /*
2838 + * Take a reference on the file private data. Must be called with
2839 +- * cifs_file_list_lock held.
2840 ++ * cfile->file_info_lock held.
2841 + */
2842 + static inline void
2843 + cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file)
2844 +@@ -1508,8 +1512,10 @@ require use of the stronger protocol */
2845 + * GlobalMid_Lock protects:
2846 + * list operations on pending_mid_q and oplockQ
2847 + * updates to XID counters, multiplex id and SMB sequence numbers
2848 +- * cifs_file_list_lock protects:
2849 +- * list operations on tcp and SMB session lists and tCon lists
2850 ++ * tcp_ses_lock protects:
2851 ++ * list operations on tcp and SMB session lists
2852 ++ * tcon->open_file_lock protects the list of open files hanging off the tcon
2853 ++ * cfile->file_info_lock protects counters and fields in cifs file struct
2854 + * f_owner.lock protects certain per file struct operations
2855 + * mapping->page_lock protects certain per page operations
2856 + *
2857 +@@ -1541,18 +1547,12 @@ GLOBAL_EXTERN struct list_head cifs_tcp_ses_list;
2858 + * tcp session, and the list of tcon's per smb session. It also protects
2859 + * the reference counters for the server, smb session, and tcon. Finally,
2860 + * changes to the tcon->tidStatus should be done while holding this lock.
2861 ++ * generally the locks should be taken in order tcp_ses_lock before
2862 ++ * tcon->open_file_lock and that before file->file_info_lock since the
2863 ++ * structure order is cifs_socket-->cifs_ses-->cifs_tcon-->cifs_file
2864 + */
2865 + GLOBAL_EXTERN spinlock_t cifs_tcp_ses_lock;
2866 +
2867 +-/*
2868 +- * This lock protects the cifs_file->llist and cifs_file->flist
2869 +- * list operations, and updates to some flags (cifs_file->invalidHandle)
2870 +- * It will be moved to either use the tcon->stat_lock or equivalent later.
2871 +- * If cifs_tcp_ses_lock and the lock below are both needed to be held, then
2872 +- * the cifs_tcp_ses_lock must be grabbed first and released last.
2873 +- */
2874 +-GLOBAL_EXTERN spinlock_t cifs_file_list_lock;
2875 +-
2876 + #ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
2877 + /* Outstanding dir notify requests */
2878 + GLOBAL_EXTERN struct list_head GlobalDnotifyReqList;
2879 +@@ -1588,11 +1588,11 @@ GLOBAL_EXTERN atomic_t midCount;
2880 +
2881 + /* Misc globals */
2882 + GLOBAL_EXTERN bool enable_oplocks; /* enable or disable oplocks */
2883 +-GLOBAL_EXTERN unsigned int lookupCacheEnabled;
2884 ++GLOBAL_EXTERN bool lookupCacheEnabled;
2885 + GLOBAL_EXTERN unsigned int global_secflags; /* if on, session setup sent
2886 + with more secure ntlmssp2 challenge/resp */
2887 + GLOBAL_EXTERN unsigned int sign_CIFS_PDUs; /* enable smb packet signing */
2888 +-GLOBAL_EXTERN unsigned int linuxExtEnabled;/*enable Linux/Unix CIFS extensions*/
2889 ++GLOBAL_EXTERN bool linuxExtEnabled;/*enable Linux/Unix CIFS extensions*/
2890 + GLOBAL_EXTERN unsigned int CIFSMaxBufSize; /* max size not including hdr */
2891 + GLOBAL_EXTERN unsigned int cifs_min_rcv; /* min size of big ntwrk buf pool */
2892 + GLOBAL_EXTERN unsigned int cifs_min_small; /* min size of small buf pool */
2893 +diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
2894 +index 76fcb50295a3..b1104ed8f54c 100644
2895 +--- a/fs/cifs/cifssmb.c
2896 ++++ b/fs/cifs/cifssmb.c
2897 +@@ -98,13 +98,13 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
2898 + struct list_head *tmp1;
2899 +
2900 + /* list all files open on tree connection and mark them invalid */
2901 +- spin_lock(&cifs_file_list_lock);
2902 ++ spin_lock(&tcon->open_file_lock);
2903 + list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
2904 + open_file = list_entry(tmp, struct cifsFileInfo, tlist);
2905 + open_file->invalidHandle = true;
2906 + open_file->oplock_break_cancelled = true;
2907 + }
2908 +- spin_unlock(&cifs_file_list_lock);
2909 ++ spin_unlock(&tcon->open_file_lock);
2910 + /*
2911 + * BB Add call to invalidate_inodes(sb) for all superblocks mounted
2912 + * to this tcon.
2913 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
2914 +index 61c3a5ab8637..812a8cb07c63 100644
2915 +--- a/fs/cifs/connect.c
2916 ++++ b/fs/cifs/connect.c
2917 +@@ -2200,7 +2200,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
2918 + memcpy(&tcp_ses->dstaddr, &volume_info->dstaddr,
2919 + sizeof(tcp_ses->dstaddr));
2920 + #ifdef CONFIG_CIFS_SMB2
2921 +- get_random_bytes(tcp_ses->client_guid, SMB2_CLIENT_GUID_SIZE);
2922 ++ generate_random_uuid(tcp_ses->client_guid);
2923 + #endif
2924 + /*
2925 + * at this point we are the only ones with the pointer
2926 +@@ -3693,14 +3693,16 @@ remote_path_check:
2927 + goto mount_fail_check;
2928 + }
2929 +
2930 +- rc = cifs_are_all_path_components_accessible(server,
2931 ++ if (rc != -EREMOTE) {
2932 ++ rc = cifs_are_all_path_components_accessible(server,
2933 + xid, tcon, cifs_sb,
2934 + full_path);
2935 +- if (rc != 0) {
2936 +- cifs_dbg(VFS, "cannot query dirs between root and final path, "
2937 +- "enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
2938 +- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
2939 +- rc = 0;
2940 ++ if (rc != 0) {
2941 ++ cifs_dbg(VFS, "cannot query dirs between root and final path, "
2942 ++ "enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
2943 ++ cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
2944 ++ rc = 0;
2945 ++ }
2946 + }
2947 + kfree(full_path);
2948 + }
2949 +diff --git a/fs/cifs/file.c b/fs/cifs/file.c
2950 +index 0068e82217c3..72f270d4bd17 100644
2951 +--- a/fs/cifs/file.c
2952 ++++ b/fs/cifs/file.c
2953 +@@ -305,6 +305,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
2954 + cfile->tlink = cifs_get_tlink(tlink);
2955 + INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
2956 + mutex_init(&cfile->fh_mutex);
2957 ++ spin_lock_init(&cfile->file_info_lock);
2958 +
2959 + cifs_sb_active(inode->i_sb);
2960 +
2961 +@@ -317,7 +318,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
2962 + oplock = 0;
2963 + }
2964 +
2965 +- spin_lock(&cifs_file_list_lock);
2966 ++ spin_lock(&tcon->open_file_lock);
2967 + if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
2968 + oplock = fid->pending_open->oplock;
2969 + list_del(&fid->pending_open->olist);
2970 +@@ -326,12 +327,13 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
2971 + server->ops->set_fid(cfile, fid, oplock);
2972 +
2973 + list_add(&cfile->tlist, &tcon->openFileList);
2974 ++
2975 + /* if readable file instance put first in list*/
2976 + if (file->f_mode & FMODE_READ)
2977 + list_add(&cfile->flist, &cinode->openFileList);
2978 + else
2979 + list_add_tail(&cfile->flist, &cinode->openFileList);
2980 +- spin_unlock(&cifs_file_list_lock);
2981 ++ spin_unlock(&tcon->open_file_lock);
2982 +
2983 + if (fid->purge_cache)
2984 + cifs_zap_mapping(inode);
2985 +@@ -343,16 +345,16 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
2986 + struct cifsFileInfo *
2987 + cifsFileInfo_get(struct cifsFileInfo *cifs_file)
2988 + {
2989 +- spin_lock(&cifs_file_list_lock);
2990 ++ spin_lock(&cifs_file->file_info_lock);
2991 + cifsFileInfo_get_locked(cifs_file);
2992 +- spin_unlock(&cifs_file_list_lock);
2993 ++ spin_unlock(&cifs_file->file_info_lock);
2994 + return cifs_file;
2995 + }
2996 +
2997 + /*
2998 + * Release a reference on the file private data. This may involve closing
2999 + * the filehandle out on the server. Must be called without holding
3000 +- * cifs_file_list_lock.
3001 ++ * tcon->open_file_lock and cifs_file->file_info_lock.
3002 + */
3003 + void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
3004 + {
3005 +@@ -367,11 +369,15 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
3006 + struct cifs_pending_open open;
3007 + bool oplock_break_cancelled;
3008 +
3009 +- spin_lock(&cifs_file_list_lock);
3010 ++ spin_lock(&tcon->open_file_lock);
3011 ++
3012 ++ spin_lock(&cifs_file->file_info_lock);
3013 + if (--cifs_file->count > 0) {
3014 +- spin_unlock(&cifs_file_list_lock);
3015 ++ spin_unlock(&cifs_file->file_info_lock);
3016 ++ spin_unlock(&tcon->open_file_lock);
3017 + return;
3018 + }
3019 ++ spin_unlock(&cifs_file->file_info_lock);
3020 +
3021 + if (server->ops->get_lease_key)
3022 + server->ops->get_lease_key(inode, &fid);
3023 +@@ -395,7 +401,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
3024 + set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
3025 + cifs_set_oplock_level(cifsi, 0);
3026 + }
3027 +- spin_unlock(&cifs_file_list_lock);
3028 ++
3029 ++ spin_unlock(&tcon->open_file_lock);
3030 +
3031 + oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
3032 +
3033 +@@ -772,10 +779,10 @@ int cifs_closedir(struct inode *inode, struct file *file)
3034 + server = tcon->ses->server;
3035 +
3036 + cifs_dbg(FYI, "Freeing private data in close dir\n");
3037 +- spin_lock(&cifs_file_list_lock);
3038 ++ spin_lock(&cfile->file_info_lock);
3039 + if (server->ops->dir_needs_close(cfile)) {
3040 + cfile->invalidHandle = true;
3041 +- spin_unlock(&cifs_file_list_lock);
3042 ++ spin_unlock(&cfile->file_info_lock);
3043 + if (server->ops->close_dir)
3044 + rc = server->ops->close_dir(xid, tcon, &cfile->fid);
3045 + else
3046 +@@ -784,7 +791,7 @@ int cifs_closedir(struct inode *inode, struct file *file)
3047 + /* not much we can do if it fails anyway, ignore rc */
3048 + rc = 0;
3049 + } else
3050 +- spin_unlock(&cifs_file_list_lock);
3051 ++ spin_unlock(&cfile->file_info_lock);
3052 +
3053 + buf = cfile->srch_inf.ntwrk_buf_start;
3054 + if (buf) {
3055 +@@ -1720,12 +1727,13 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
3056 + {
3057 + struct cifsFileInfo *open_file = NULL;
3058 + struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
3059 ++ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
3060 +
3061 + /* only filter by fsuid on multiuser mounts */
3062 + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
3063 + fsuid_only = false;
3064 +
3065 +- spin_lock(&cifs_file_list_lock);
3066 ++ spin_lock(&tcon->open_file_lock);
3067 + /* we could simply get the first_list_entry since write-only entries
3068 + are always at the end of the list but since the first entry might
3069 + have a close pending, we go through the whole list */
3070 +@@ -1736,8 +1744,8 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
3071 + if (!open_file->invalidHandle) {
3072 + /* found a good file */
3073 + /* lock it so it will not be closed on us */
3074 +- cifsFileInfo_get_locked(open_file);
3075 +- spin_unlock(&cifs_file_list_lock);
3076 ++ cifsFileInfo_get(open_file);
3077 ++ spin_unlock(&tcon->open_file_lock);
3078 + return open_file;
3079 + } /* else might as well continue, and look for
3080 + another, or simply have the caller reopen it
3081 +@@ -1745,7 +1753,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
3082 + } else /* write only file */
3083 + break; /* write only files are last so must be done */
3084 + }
3085 +- spin_unlock(&cifs_file_list_lock);
3086 ++ spin_unlock(&tcon->open_file_lock);
3087 + return NULL;
3088 + }
3089 +
3090 +@@ -1754,6 +1762,7 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
3091 + {
3092 + struct cifsFileInfo *open_file, *inv_file = NULL;
3093 + struct cifs_sb_info *cifs_sb;
3094 ++ struct cifs_tcon *tcon;
3095 + bool any_available = false;
3096 + int rc;
3097 + unsigned int refind = 0;
3098 +@@ -1769,15 +1778,16 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
3099 + }
3100 +
3101 + cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
3102 ++ tcon = cifs_sb_master_tcon(cifs_sb);
3103 +
3104 + /* only filter by fsuid on multiuser mounts */
3105 + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
3106 + fsuid_only = false;
3107 +
3108 +- spin_lock(&cifs_file_list_lock);
3109 ++ spin_lock(&tcon->open_file_lock);
3110 + refind_writable:
3111 + if (refind > MAX_REOPEN_ATT) {
3112 +- spin_unlock(&cifs_file_list_lock);
3113 ++ spin_unlock(&tcon->open_file_lock);
3114 + return NULL;
3115 + }
3116 + list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3117 +@@ -1788,8 +1798,8 @@ refind_writable:
3118 + if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3119 + if (!open_file->invalidHandle) {
3120 + /* found a good writable file */
3121 +- cifsFileInfo_get_locked(open_file);
3122 +- spin_unlock(&cifs_file_list_lock);
3123 ++ cifsFileInfo_get(open_file);
3124 ++ spin_unlock(&tcon->open_file_lock);
3125 + return open_file;
3126 + } else {
3127 + if (!inv_file)
3128 +@@ -1805,24 +1815,24 @@ refind_writable:
3129 +
3130 + if (inv_file) {
3131 + any_available = false;
3132 +- cifsFileInfo_get_locked(inv_file);
3133 ++ cifsFileInfo_get(inv_file);
3134 + }
3135 +
3136 +- spin_unlock(&cifs_file_list_lock);
3137 ++ spin_unlock(&tcon->open_file_lock);
3138 +
3139 + if (inv_file) {
3140 + rc = cifs_reopen_file(inv_file, false);
3141 + if (!rc)
3142 + return inv_file;
3143 + else {
3144 +- spin_lock(&cifs_file_list_lock);
3145 ++ spin_lock(&tcon->open_file_lock);
3146 + list_move_tail(&inv_file->flist,
3147 + &cifs_inode->openFileList);
3148 +- spin_unlock(&cifs_file_list_lock);
3149 ++ spin_unlock(&tcon->open_file_lock);
3150 + cifsFileInfo_put(inv_file);
3151 +- spin_lock(&cifs_file_list_lock);
3152 + ++refind;
3153 + inv_file = NULL;
3154 ++ spin_lock(&tcon->open_file_lock);
3155 + goto refind_writable;
3156 + }
3157 + }
3158 +@@ -3632,15 +3642,17 @@ static int cifs_readpage(struct file *file, struct page *page)
3159 + static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3160 + {
3161 + struct cifsFileInfo *open_file;
3162 ++ struct cifs_tcon *tcon =
3163 ++ cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
3164 +
3165 +- spin_lock(&cifs_file_list_lock);
3166 ++ spin_lock(&tcon->open_file_lock);
3167 + list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3168 + if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3169 +- spin_unlock(&cifs_file_list_lock);
3170 ++ spin_unlock(&tcon->open_file_lock);
3171 + return 1;
3172 + }
3173 + }
3174 +- spin_unlock(&cifs_file_list_lock);
3175 ++ spin_unlock(&tcon->open_file_lock);
3176 + return 0;
3177 + }
3178 +
3179 +diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
3180 +index 8442b8b8e0be..2396ab099849 100644
3181 +--- a/fs/cifs/misc.c
3182 ++++ b/fs/cifs/misc.c
3183 +@@ -120,6 +120,7 @@ tconInfoAlloc(void)
3184 + ++ret_buf->tc_count;
3185 + INIT_LIST_HEAD(&ret_buf->openFileList);
3186 + INIT_LIST_HEAD(&ret_buf->tcon_list);
3187 ++ spin_lock_init(&ret_buf->open_file_lock);
3188 + #ifdef CONFIG_CIFS_STATS
3189 + spin_lock_init(&ret_buf->stat_lock);
3190 + #endif
3191 +@@ -465,7 +466,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
3192 + continue;
3193 +
3194 + cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
3195 +- spin_lock(&cifs_file_list_lock);
3196 ++ spin_lock(&tcon->open_file_lock);
3197 + list_for_each(tmp2, &tcon->openFileList) {
3198 + netfile = list_entry(tmp2, struct cifsFileInfo,
3199 + tlist);
3200 +@@ -495,11 +496,11 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
3201 + &netfile->oplock_break);
3202 + netfile->oplock_break_cancelled = false;
3203 +
3204 +- spin_unlock(&cifs_file_list_lock);
3205 ++ spin_unlock(&tcon->open_file_lock);
3206 + spin_unlock(&cifs_tcp_ses_lock);
3207 + return true;
3208 + }
3209 +- spin_unlock(&cifs_file_list_lock);
3210 ++ spin_unlock(&tcon->open_file_lock);
3211 + spin_unlock(&cifs_tcp_ses_lock);
3212 + cifs_dbg(FYI, "No matching file for oplock break\n");
3213 + return true;
3214 +@@ -613,9 +614,9 @@ backup_cred(struct cifs_sb_info *cifs_sb)
3215 + void
3216 + cifs_del_pending_open(struct cifs_pending_open *open)
3217 + {
3218 +- spin_lock(&cifs_file_list_lock);
3219 ++ spin_lock(&tlink_tcon(open->tlink)->open_file_lock);
3220 + list_del(&open->olist);
3221 +- spin_unlock(&cifs_file_list_lock);
3222 ++ spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
3223 + }
3224 +
3225 + void
3226 +@@ -635,7 +636,7 @@ void
3227 + cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
3228 + struct cifs_pending_open *open)
3229 + {
3230 +- spin_lock(&cifs_file_list_lock);
3231 ++ spin_lock(&tlink_tcon(tlink)->open_file_lock);
3232 + cifs_add_pending_open_locked(fid, tlink, open);
3233 +- spin_unlock(&cifs_file_list_lock);
3234 ++ spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
3235 + }
3236 +diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
3237 +index b30a4a6d98a0..833e5844a2db 100644
3238 +--- a/fs/cifs/readdir.c
3239 ++++ b/fs/cifs/readdir.c
3240 +@@ -594,14 +594,14 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
3241 + is_dir_changed(file)) || (index_to_find < first_entry_in_buffer)) {
3242 + /* close and restart search */
3243 + cifs_dbg(FYI, "search backing up - close and restart search\n");
3244 +- spin_lock(&cifs_file_list_lock);
3245 ++ spin_lock(&cfile->file_info_lock);
3246 + if (server->ops->dir_needs_close(cfile)) {
3247 + cfile->invalidHandle = true;
3248 +- spin_unlock(&cifs_file_list_lock);
3249 ++ spin_unlock(&cfile->file_info_lock);
3250 + if (server->ops->close_dir)
3251 + server->ops->close_dir(xid, tcon, &cfile->fid);
3252 + } else
3253 +- spin_unlock(&cifs_file_list_lock);
3254 ++ spin_unlock(&cfile->file_info_lock);
3255 + if (cfile->srch_inf.ntwrk_buf_start) {
3256 + cifs_dbg(FYI, "freeing SMB ff cache buf on search rewind\n");
3257 + if (cfile->srch_inf.smallBuf)
3258 +diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
3259 +index 0ffa18094335..238759c146ba 100644
3260 +--- a/fs/cifs/smb2glob.h
3261 ++++ b/fs/cifs/smb2glob.h
3262 +@@ -61,4 +61,14 @@
3263 + /* Maximum buffer size value we can send with 1 credit */
3264 + #define SMB2_MAX_BUFFER_SIZE 65536
3265 +
3266 ++/*
3267 ++ * Maximum number of credits to keep available.
3268 ++ * This value is chosen somewhat arbitrarily. The Windows client
3269 ++ * defaults to 128 credits, the Windows server allows clients up to
3270 ++ * 512 credits, and the NetApp server does not limit clients at all.
3271 ++ * Choose a high enough value such that the client shouldn't limit
3272 ++ * performance.
3273 ++ */
3274 ++#define SMB2_MAX_CREDITS_AVAILABLE 32000
3275 ++
3276 + #endif /* _SMB2_GLOB_H */
3277 +diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
3278 +index 4f0231e685a9..1238cd3552f9 100644
3279 +--- a/fs/cifs/smb2inode.c
3280 ++++ b/fs/cifs/smb2inode.c
3281 +@@ -266,9 +266,15 @@ smb2_set_file_info(struct inode *inode, const char *full_path,
3282 + struct tcon_link *tlink;
3283 + int rc;
3284 +
3285 ++ if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) &&
3286 ++ (buf->LastWriteTime == 0) && (buf->ChangeTime) &&
3287 ++ (buf->Attributes == 0))
3288 ++ return 0; /* would be a no op, no sense sending this */
3289 ++
3290 + tlink = cifs_sb_tlink(cifs_sb);
3291 + if (IS_ERR(tlink))
3292 + return PTR_ERR(tlink);
3293 ++
3294 + rc = smb2_open_op_close(xid, tlink_tcon(tlink), cifs_sb, full_path,
3295 + FILE_WRITE_ATTRIBUTES, FILE_OPEN, 0, buf,
3296 + SMB2_OP_SET_INFO);
3297 +diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
3298 +index 1c5907019045..e5bc85e49be7 100644
3299 +--- a/fs/cifs/smb2misc.c
3300 ++++ b/fs/cifs/smb2misc.c
3301 +@@ -525,19 +525,19 @@ smb2_is_valid_lease_break(char *buffer)
3302 + list_for_each(tmp1, &server->smb_ses_list) {
3303 + ses = list_entry(tmp1, struct cifs_ses, smb_ses_list);
3304 +
3305 +- spin_lock(&cifs_file_list_lock);
3306 + list_for_each(tmp2, &ses->tcon_list) {
3307 + tcon = list_entry(tmp2, struct cifs_tcon,
3308 + tcon_list);
3309 ++ spin_lock(&tcon->open_file_lock);
3310 + cifs_stats_inc(
3311 + &tcon->stats.cifs_stats.num_oplock_brks);
3312 + if (smb2_tcon_has_lease(tcon, rsp, lw)) {
3313 +- spin_unlock(&cifs_file_list_lock);
3314 ++ spin_unlock(&tcon->open_file_lock);
3315 + spin_unlock(&cifs_tcp_ses_lock);
3316 + return true;
3317 + }
3318 ++ spin_unlock(&tcon->open_file_lock);
3319 + }
3320 +- spin_unlock(&cifs_file_list_lock);
3321 + }
3322 + }
3323 + spin_unlock(&cifs_tcp_ses_lock);
3324 +@@ -579,7 +579,7 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
3325 + tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
3326 +
3327 + cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
3328 +- spin_lock(&cifs_file_list_lock);
3329 ++ spin_lock(&tcon->open_file_lock);
3330 + list_for_each(tmp2, &tcon->openFileList) {
3331 + cfile = list_entry(tmp2, struct cifsFileInfo,
3332 + tlist);
3333 +@@ -591,7 +591,7 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
3334 +
3335 + cifs_dbg(FYI, "file id match, oplock break\n");
3336 + cinode = CIFS_I(d_inode(cfile->dentry));
3337 +-
3338 ++ spin_lock(&cfile->file_info_lock);
3339 + if (!CIFS_CACHE_WRITE(cinode) &&
3340 + rsp->OplockLevel == SMB2_OPLOCK_LEVEL_NONE)
3341 + cfile->oplock_break_cancelled = true;
3342 +@@ -613,14 +613,14 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
3343 + clear_bit(
3344 + CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
3345 + &cinode->flags);
3346 +-
3347 ++ spin_unlock(&cfile->file_info_lock);
3348 + queue_work(cifsiod_wq, &cfile->oplock_break);
3349 +
3350 +- spin_unlock(&cifs_file_list_lock);
3351 ++ spin_unlock(&tcon->open_file_lock);
3352 + spin_unlock(&cifs_tcp_ses_lock);
3353 + return true;
3354 + }
3355 +- spin_unlock(&cifs_file_list_lock);
3356 ++ spin_unlock(&tcon->open_file_lock);
3357 + spin_unlock(&cifs_tcp_ses_lock);
3358 + cifs_dbg(FYI, "No matching file for oplock break\n");
3359 + return true;
3360 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
3361 +index dd8543caa56e..be34b4860675 100644
3362 +--- a/fs/cifs/smb2ops.c
3363 ++++ b/fs/cifs/smb2ops.c
3364 +@@ -282,7 +282,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
3365 + cifs_dbg(FYI, "Link Speed %lld\n",
3366 + le64_to_cpu(out_buf->LinkSpeed));
3367 + }
3368 +-
3369 ++ kfree(out_buf);
3370 + return rc;
3371 + }
3372 + #endif /* STATS2 */
3373 +@@ -536,6 +536,7 @@ smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
3374 + server->ops->set_oplock_level(cinode, oplock, fid->epoch,
3375 + &fid->purge_cache);
3376 + cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
3377 ++ memcpy(cfile->fid.create_guid, fid->create_guid, 16);
3378 + }
3379 +
3380 + static void
3381 +@@ -694,6 +695,7 @@ smb2_clone_range(const unsigned int xid,
3382 +
3383 + cchunk_out:
3384 + kfree(pcchunk);
3385 ++ kfree(retbuf);
3386 + return rc;
3387 + }
3388 +
3389 +@@ -818,7 +820,6 @@ smb2_duplicate_extents(const unsigned int xid,
3390 + {
3391 + int rc;
3392 + unsigned int ret_data_len;
3393 +- char *retbuf = NULL;
3394 + struct duplicate_extents_to_file dup_ext_buf;
3395 + struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
3396 +
3397 +@@ -844,7 +845,7 @@ smb2_duplicate_extents(const unsigned int xid,
3398 + FSCTL_DUPLICATE_EXTENTS_TO_FILE,
3399 + true /* is_fsctl */, (char *)&dup_ext_buf,
3400 + sizeof(struct duplicate_extents_to_file),
3401 +- (char **)&retbuf,
3402 ++ NULL,
3403 + &ret_data_len);
3404 +
3405 + if (ret_data_len > 0)
3406 +@@ -867,7 +868,6 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
3407 + struct cifsFileInfo *cfile)
3408 + {
3409 + struct fsctl_set_integrity_information_req integr_info;
3410 +- char *retbuf = NULL;
3411 + unsigned int ret_data_len;
3412 +
3413 + integr_info.ChecksumAlgorithm = cpu_to_le16(CHECKSUM_TYPE_UNCHANGED);
3414 +@@ -879,7 +879,7 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
3415 + FSCTL_SET_INTEGRITY_INFORMATION,
3416 + true /* is_fsctl */, (char *)&integr_info,
3417 + sizeof(struct fsctl_set_integrity_information_req),
3418 +- (char **)&retbuf,
3419 ++ NULL,
3420 + &ret_data_len);
3421 +
3422 + }
3423 +@@ -1036,7 +1036,7 @@ smb2_set_lease_key(struct inode *inode, struct cifs_fid *fid)
3424 + static void
3425 + smb2_new_lease_key(struct cifs_fid *fid)
3426 + {
3427 +- get_random_bytes(fid->lease_key, SMB2_LEASE_KEY_SIZE);
3428 ++ generate_random_uuid(fid->lease_key);
3429 + }
3430 +
3431 + #define SMB2_SYMLINK_STRUCT_SIZE \
3432 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
3433 +index 0b6dc1942bdc..0dbbdf5e4aee 100644
3434 +--- a/fs/cifs/smb2pdu.c
3435 ++++ b/fs/cifs/smb2pdu.c
3436 +@@ -103,7 +103,21 @@ smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ ,
3437 + hdr->ProtocolId[3] = 'B';
3438 + hdr->StructureSize = cpu_to_le16(64);
3439 + hdr->Command = smb2_cmd;
3440 +- hdr->CreditRequest = cpu_to_le16(2); /* BB make this dynamic */
3441 ++ if (tcon && tcon->ses && tcon->ses->server) {
3442 ++ struct TCP_Server_Info *server = tcon->ses->server;
3443 ++
3444 ++ spin_lock(&server->req_lock);
3445 ++ /* Request up to 2 credits but don't go over the limit. */
3446 ++ if (server->credits >= SMB2_MAX_CREDITS_AVAILABLE)
3447 ++ hdr->CreditRequest = cpu_to_le16(0);
3448 ++ else
3449 ++ hdr->CreditRequest = cpu_to_le16(
3450 ++ min_t(int, SMB2_MAX_CREDITS_AVAILABLE -
3451 ++ server->credits, 2));
3452 ++ spin_unlock(&server->req_lock);
3453 ++ } else {
3454 ++ hdr->CreditRequest = cpu_to_le16(2);
3455 ++ }
3456 + hdr->ProcessId = cpu_to_le32((__u16)current->tgid);
3457 +
3458 + if (!tcon)
3459 +@@ -593,6 +607,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
3460 + char *security_blob = NULL;
3461 + unsigned char *ntlmssp_blob = NULL;
3462 + bool use_spnego = false; /* else use raw ntlmssp */
3463 ++ u64 previous_session = ses->Suid;
3464 +
3465 + cifs_dbg(FYI, "Session Setup\n");
3466 +
3467 +@@ -630,6 +645,10 @@ ssetup_ntlmssp_authenticate:
3468 + return rc;
3469 +
3470 + req->hdr.SessionId = 0; /* First session, not a reauthenticate */
3471 ++
3472 ++ /* if reconnect, we need to send previous sess id, otherwise it is 0 */
3473 ++ req->PreviousSessionId = previous_session;
3474 ++
3475 + req->Flags = 0; /* MBZ */
3476 + /* to enable echos and oplocks */
3477 + req->hdr.CreditRequest = cpu_to_le16(3);
3478 +@@ -1167,7 +1186,7 @@ create_durable_v2_buf(struct cifs_fid *pfid)
3479 +
3480 + buf->dcontext.Timeout = 0; /* Should this be configurable by workload */
3481 + buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
3482 +- get_random_bytes(buf->dcontext.CreateGuid, 16);
3483 ++ generate_random_uuid(buf->dcontext.CreateGuid);
3484 + memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
3485 +
3486 + /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DH2Q" */
3487 +@@ -2059,6 +2078,7 @@ smb2_async_readv(struct cifs_readdata *rdata)
3488 + if (rdata->credits) {
3489 + buf->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
3490 + SMB2_MAX_BUFFER_SIZE));
3491 ++ buf->CreditRequest = buf->CreditCharge;
3492 + spin_lock(&server->req_lock);
3493 + server->credits += rdata->credits -
3494 + le16_to_cpu(buf->CreditCharge);
3495 +@@ -2245,6 +2265,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
3496 + if (wdata->credits) {
3497 + req->hdr.CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
3498 + SMB2_MAX_BUFFER_SIZE));
3499 ++ req->hdr.CreditRequest = req->hdr.CreditCharge;
3500 + spin_lock(&server->req_lock);
3501 + server->credits += wdata->credits -
3502 + le16_to_cpu(req->hdr.CreditCharge);
3503 +diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
3504 +index 4af52780ec35..b8f553b32dda 100644
3505 +--- a/fs/cifs/smb2pdu.h
3506 ++++ b/fs/cifs/smb2pdu.h
3507 +@@ -276,7 +276,7 @@ struct smb2_sess_setup_req {
3508 + __le32 Channel;
3509 + __le16 SecurityBufferOffset;
3510 + __le16 SecurityBufferLength;
3511 +- __le64 PreviousSessionId;
3512 ++ __u64 PreviousSessionId;
3513 + __u8 Buffer[1]; /* variable length GSS security buffer */
3514 + } __packed;
3515 +
3516 +diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
3517 +index 1420a3c614af..5d09ea585840 100644
3518 +--- a/fs/ext4/sysfs.c
3519 ++++ b/fs/ext4/sysfs.c
3520 +@@ -223,14 +223,18 @@ static struct attribute *ext4_attrs[] = {
3521 + EXT4_ATTR_FEATURE(lazy_itable_init);
3522 + EXT4_ATTR_FEATURE(batched_discard);
3523 + EXT4_ATTR_FEATURE(meta_bg_resize);
3524 ++#ifdef CONFIG_EXT4_FS_ENCRYPTION
3525 + EXT4_ATTR_FEATURE(encryption);
3526 ++#endif
3527 + EXT4_ATTR_FEATURE(metadata_csum_seed);
3528 +
3529 + static struct attribute *ext4_feat_attrs[] = {
3530 + ATTR_LIST(lazy_itable_init),
3531 + ATTR_LIST(batched_discard),
3532 + ATTR_LIST(meta_bg_resize),
3533 ++#ifdef CONFIG_EXT4_FS_ENCRYPTION
3534 + ATTR_LIST(encryption),
3535 ++#endif
3536 + ATTR_LIST(metadata_csum_seed),
3537 + NULL,
3538 + };
3539 +diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
3540 +index d67a16f2a45d..350f67fb5b9c 100644
3541 +--- a/fs/isofs/inode.c
3542 ++++ b/fs/isofs/inode.c
3543 +@@ -690,6 +690,11 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
3544 + pri_bh = NULL;
3545 +
3546 + root_found:
3547 ++ /* We don't support read-write mounts */
3548 ++ if (!(s->s_flags & MS_RDONLY)) {
3549 ++ error = -EACCES;
3550 ++ goto out_freebh;
3551 ++ }
3552 +
3553 + if (joliet_level && (pri == NULL || !opt.rock)) {
3554 + /* This is the case of Joliet with the norock mount flag.
3555 +@@ -1503,9 +1508,6 @@ struct inode *__isofs_iget(struct super_block *sb,
3556 + static struct dentry *isofs_mount(struct file_system_type *fs_type,
3557 + int flags, const char *dev_name, void *data)
3558 + {
3559 +- /* We don't support read-write mounts */
3560 +- if (!(flags & MS_RDONLY))
3561 +- return ERR_PTR(-EACCES);
3562 + return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super);
3563 + }
3564 +
3565 +diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
3566 +index ca181e81c765..fa1b8e0dcacf 100644
3567 +--- a/fs/jbd2/transaction.c
3568 ++++ b/fs/jbd2/transaction.c
3569 +@@ -1156,6 +1156,7 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
3570 + JBUFFER_TRACE(jh, "file as BJ_Reserved");
3571 + spin_lock(&journal->j_list_lock);
3572 + __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
3573 ++ spin_unlock(&journal->j_list_lock);
3574 + } else if (jh->b_transaction == journal->j_committing_transaction) {
3575 + /* first access by this transaction */
3576 + jh->b_modified = 0;
3577 +@@ -1163,8 +1164,8 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
3578 + JBUFFER_TRACE(jh, "set next transaction");
3579 + spin_lock(&journal->j_list_lock);
3580 + jh->b_next_transaction = transaction;
3581 ++ spin_unlock(&journal->j_list_lock);
3582 + }
3583 +- spin_unlock(&journal->j_list_lock);
3584 + jbd_unlock_bh_state(bh);
3585 +
3586 + /*
3587 +diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
3588 +index 5166adcfc0fb..7af5eeabc80e 100644
3589 +--- a/fs/nfs/delegation.c
3590 ++++ b/fs/nfs/delegation.c
3591 +@@ -41,6 +41,17 @@ void nfs_mark_delegation_referenced(struct nfs_delegation *delegation)
3592 + set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags);
3593 + }
3594 +
3595 ++static bool
3596 ++nfs4_is_valid_delegation(const struct nfs_delegation *delegation,
3597 ++ fmode_t flags)
3598 ++{
3599 ++ if (delegation != NULL && (delegation->type & flags) == flags &&
3600 ++ !test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) &&
3601 ++ !test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
3602 ++ return true;
3603 ++ return false;
3604 ++}
3605 ++
3606 + static int
3607 + nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark)
3608 + {
3609 +@@ -50,8 +61,7 @@ nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark)
3610 + flags &= FMODE_READ|FMODE_WRITE;
3611 + rcu_read_lock();
3612 + delegation = rcu_dereference(NFS_I(inode)->delegation);
3613 +- if (delegation != NULL && (delegation->type & flags) == flags &&
3614 +- !test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
3615 ++ if (nfs4_is_valid_delegation(delegation, flags)) {
3616 + if (mark)
3617 + nfs_mark_delegation_referenced(delegation);
3618 + ret = 1;
3619 +@@ -892,7 +902,7 @@ bool nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode,
3620 + flags &= FMODE_READ|FMODE_WRITE;
3621 + rcu_read_lock();
3622 + delegation = rcu_dereference(nfsi->delegation);
3623 +- ret = (delegation != NULL && (delegation->type & flags) == flags);
3624 ++ ret = nfs4_is_valid_delegation(delegation, flags);
3625 + if (ret) {
3626 + nfs4_stateid_copy(dst, &delegation->stateid);
3627 + nfs_mark_delegation_referenced(delegation);
3628 +diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
3629 +index 6b1ce9825430..7f1a0fb8c493 100644
3630 +--- a/fs/nfs/nfs42proc.c
3631 ++++ b/fs/nfs/nfs42proc.c
3632 +@@ -269,6 +269,7 @@ int nfs42_proc_layoutstats_generic(struct nfs_server *server,
3633 + task = rpc_run_task(&task_setup);
3634 + if (IS_ERR(task))
3635 + return PTR_ERR(task);
3636 ++ rpc_put_task(task);
3637 + return 0;
3638 + }
3639 +
3640 +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
3641 +index d854693a15b0..82dc3035ea45 100644
3642 +--- a/fs/nfs/nfs4state.c
3643 ++++ b/fs/nfs/nfs4state.c
3644 +@@ -1493,6 +1493,9 @@ restart:
3645 + __func__, status);
3646 + case -ENOENT:
3647 + case -ENOMEM:
3648 ++ case -EACCES:
3649 ++ case -EROFS:
3650 ++ case -EIO:
3651 + case -ESTALE:
3652 + /* Open state on this file cannot be recovered */
3653 + nfs4_state_mark_recovery_failed(state, status);
3654 +diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
3655 +index 9e52609cd683..ea0dd9ee138d 100644
3656 +--- a/fs/overlayfs/copy_up.c
3657 ++++ b/fs/overlayfs/copy_up.c
3658 +@@ -25,6 +25,7 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
3659 + ssize_t list_size, size, value_size = 0;
3660 + char *buf, *name, *value = NULL;
3661 + int uninitialized_var(error);
3662 ++ size_t slen;
3663 +
3664 + if (!old->d_inode->i_op->getxattr ||
3665 + !new->d_inode->i_op->getxattr)
3666 +@@ -47,7 +48,16 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
3667 + goto out;
3668 + }
3669 +
3670 +- for (name = buf; name < (buf + list_size); name += strlen(name) + 1) {
3671 ++ for (name = buf; list_size; name += slen) {
3672 ++ slen = strnlen(name, list_size) + 1;
3673 ++
3674 ++ /* underlying fs providing us with an broken xattr list? */
3675 ++ if (WARN_ON(slen > list_size)) {
3676 ++ error = -EIO;
3677 ++ break;
3678 ++ }
3679 ++ list_size -= slen;
3680 ++
3681 + if (ovl_is_private_xattr(name))
3682 + continue;
3683 + retry:
3684 +diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
3685 +index ba5ef733951f..327177df03a5 100644
3686 +--- a/fs/overlayfs/dir.c
3687 ++++ b/fs/overlayfs/dir.c
3688 +@@ -12,6 +12,7 @@
3689 + #include <linux/xattr.h>
3690 + #include <linux/security.h>
3691 + #include <linux/cred.h>
3692 ++#include <linux/atomic.h>
3693 + #include "overlayfs.h"
3694 +
3695 + void ovl_cleanup(struct inode *wdir, struct dentry *wdentry)
3696 +@@ -35,8 +36,10 @@ struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry)
3697 + {
3698 + struct dentry *temp;
3699 + char name[20];
3700 ++ static atomic_t temp_id = ATOMIC_INIT(0);
3701 +
3702 +- snprintf(name, sizeof(name), "#%lx", (unsigned long) dentry);
3703 ++ /* counter is allowed to wrap, since temp dentries are ephemeral */
3704 ++ snprintf(name, sizeof(name), "#%x", atomic_inc_return(&temp_id));
3705 +
3706 + temp = lookup_one_len(name, workdir, strlen(name));
3707 + if (!IS_ERR(temp) && temp->d_inode) {
3708 +diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
3709 +index 319c3a60cfa5..905caba36529 100644
3710 +--- a/fs/pstore/ram.c
3711 ++++ b/fs/pstore/ram.c
3712 +@@ -375,13 +375,14 @@ static void ramoops_free_przs(struct ramoops_context *cxt)
3713 + {
3714 + int i;
3715 +
3716 +- cxt->max_dump_cnt = 0;
3717 + if (!cxt->przs)
3718 + return;
3719 +
3720 +- for (i = 0; !IS_ERR_OR_NULL(cxt->przs[i]); i++)
3721 ++ for (i = 0; i < cxt->max_dump_cnt; i++)
3722 + persistent_ram_free(cxt->przs[i]);
3723 ++
3724 + kfree(cxt->przs);
3725 ++ cxt->max_dump_cnt = 0;
3726 + }
3727 +
3728 + static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
3729 +@@ -406,7 +407,7 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
3730 + GFP_KERNEL);
3731 + if (!cxt->przs) {
3732 + dev_err(dev, "failed to initialize a prz array for dumps\n");
3733 +- goto fail_prz;
3734 ++ goto fail_mem;
3735 + }
3736 +
3737 + for (i = 0; i < cxt->max_dump_cnt; i++) {
3738 +@@ -417,6 +418,11 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
3739 + err = PTR_ERR(cxt->przs[i]);
3740 + dev_err(dev, "failed to request mem region (0x%zx@0x%llx): %d\n",
3741 + cxt->record_size, (unsigned long long)*paddr, err);
3742 ++
3743 ++ while (i > 0) {
3744 ++ i--;
3745 ++ persistent_ram_free(cxt->przs[i]);
3746 ++ }
3747 + goto fail_prz;
3748 + }
3749 + *paddr += cxt->record_size;
3750 +@@ -424,7 +430,9 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
3751 +
3752 + return 0;
3753 + fail_prz:
3754 +- ramoops_free_przs(cxt);
3755 ++ kfree(cxt->przs);
3756 ++fail_mem:
3757 ++ cxt->max_dump_cnt = 0;
3758 + return err;
3759 + }
3760 +
3761 +@@ -583,7 +591,6 @@ static int ramoops_remove(struct platform_device *pdev)
3762 + struct ramoops_context *cxt = &oops_cxt;
3763 +
3764 + pstore_unregister(&cxt->pstore);
3765 +- cxt->max_dump_cnt = 0;
3766 +
3767 + kfree(cxt->pstore.buf);
3768 + cxt->pstore.bufsize = 0;
3769 +diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
3770 +index 76c3f80efdfa..364d2dffe5a6 100644
3771 +--- a/fs/pstore/ram_core.c
3772 ++++ b/fs/pstore/ram_core.c
3773 +@@ -47,43 +47,10 @@ static inline size_t buffer_start(struct persistent_ram_zone *prz)
3774 + return atomic_read(&prz->buffer->start);
3775 + }
3776 +
3777 +-/* increase and wrap the start pointer, returning the old value */
3778 +-static size_t buffer_start_add_atomic(struct persistent_ram_zone *prz, size_t a)
3779 +-{
3780 +- int old;
3781 +- int new;
3782 +-
3783 +- do {
3784 +- old = atomic_read(&prz->buffer->start);
3785 +- new = old + a;
3786 +- while (unlikely(new >= prz->buffer_size))
3787 +- new -= prz->buffer_size;
3788 +- } while (atomic_cmpxchg(&prz->buffer->start, old, new) != old);
3789 +-
3790 +- return old;
3791 +-}
3792 +-
3793 +-/* increase the size counter until it hits the max size */
3794 +-static void buffer_size_add_atomic(struct persistent_ram_zone *prz, size_t a)
3795 +-{
3796 +- size_t old;
3797 +- size_t new;
3798 +-
3799 +- if (atomic_read(&prz->buffer->size) == prz->buffer_size)
3800 +- return;
3801 +-
3802 +- do {
3803 +- old = atomic_read(&prz->buffer->size);
3804 +- new = old + a;
3805 +- if (new > prz->buffer_size)
3806 +- new = prz->buffer_size;
3807 +- } while (atomic_cmpxchg(&prz->buffer->size, old, new) != old);
3808 +-}
3809 +-
3810 + static DEFINE_RAW_SPINLOCK(buffer_lock);
3811 +
3812 + /* increase and wrap the start pointer, returning the old value */
3813 +-static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a)
3814 ++static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
3815 + {
3816 + int old;
3817 + int new;
3818 +@@ -103,7 +70,7 @@ static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a)
3819 + }
3820 +
3821 + /* increase the size counter until it hits the max size */
3822 +-static void buffer_size_add_locked(struct persistent_ram_zone *prz, size_t a)
3823 ++static void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
3824 + {
3825 + size_t old;
3826 + size_t new;
3827 +@@ -124,9 +91,6 @@ exit:
3828 + raw_spin_unlock_irqrestore(&buffer_lock, flags);
3829 + }
3830 +
3831 +-static size_t (*buffer_start_add)(struct persistent_ram_zone *, size_t) = buffer_start_add_atomic;
3832 +-static void (*buffer_size_add)(struct persistent_ram_zone *, size_t) = buffer_size_add_atomic;
3833 +-
3834 + static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
3835 + uint8_t *data, size_t len, uint8_t *ecc)
3836 + {
3837 +@@ -299,7 +263,7 @@ static void notrace persistent_ram_update(struct persistent_ram_zone *prz,
3838 + const void *s, unsigned int start, unsigned int count)
3839 + {
3840 + struct persistent_ram_buffer *buffer = prz->buffer;
3841 +- memcpy(buffer->data + start, s, count);
3842 ++ memcpy_toio(buffer->data + start, s, count);
3843 + persistent_ram_update_ecc(prz, start, count);
3844 + }
3845 +
3846 +@@ -322,8 +286,8 @@ void persistent_ram_save_old(struct persistent_ram_zone *prz)
3847 + }
3848 +
3849 + prz->old_log_size = size;
3850 +- memcpy(prz->old_log, &buffer->data[start], size - start);
3851 +- memcpy(prz->old_log + size - start, &buffer->data[0], start);
3852 ++ memcpy_fromio(prz->old_log, &buffer->data[start], size - start);
3853 ++ memcpy_fromio(prz->old_log + size - start, &buffer->data[0], start);
3854 + }
3855 +
3856 + int notrace persistent_ram_write(struct persistent_ram_zone *prz,
3857 +@@ -426,9 +390,6 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size,
3858 + return NULL;
3859 + }
3860 +
3861 +- buffer_start_add = buffer_start_add_locked;
3862 +- buffer_size_add = buffer_size_add_locked;
3863 +-
3864 + if (memtype)
3865 + va = ioremap(start, size);
3866 + else
3867 +diff --git a/fs/super.c b/fs/super.c
3868 +index f5f4b328f860..d4d2591b77c8 100644
3869 +--- a/fs/super.c
3870 ++++ b/fs/super.c
3871 +@@ -1326,8 +1326,8 @@ int freeze_super(struct super_block *sb)
3872 + }
3873 + }
3874 + /*
3875 +- * This is just for debugging purposes so that fs can warn if it
3876 +- * sees write activity when frozen is set to SB_FREEZE_COMPLETE.
3877 ++ * For debugging purposes so that fs can warn if it sees write activity
3878 ++ * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super().
3879 + */
3880 + sb->s_writers.frozen = SB_FREEZE_COMPLETE;
3881 + up_write(&sb->s_umount);
3882 +@@ -1346,7 +1346,7 @@ int thaw_super(struct super_block *sb)
3883 + int error;
3884 +
3885 + down_write(&sb->s_umount);
3886 +- if (sb->s_writers.frozen == SB_UNFROZEN) {
3887 ++ if (sb->s_writers.frozen != SB_FREEZE_COMPLETE) {
3888 + up_write(&sb->s_umount);
3889 + return -EINVAL;
3890 + }
3891 +diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
3892 +index e8b01b721e99..b5bf23b34241 100644
3893 +--- a/fs/ubifs/xattr.c
3894 ++++ b/fs/ubifs/xattr.c
3895 +@@ -173,6 +173,7 @@ out_cancel:
3896 + host_ui->xattr_cnt -= 1;
3897 + host_ui->xattr_size -= CALC_DENT_SIZE(nm->len);
3898 + host_ui->xattr_size -= CALC_XATTR_BYTES(size);
3899 ++ host_ui->xattr_names -= nm->len;
3900 + mutex_unlock(&host_ui->ui_mutex);
3901 + out_free:
3902 + make_bad_inode(inode);
3903 +@@ -533,6 +534,7 @@ out_cancel:
3904 + host_ui->xattr_cnt += 1;
3905 + host_ui->xattr_size += CALC_DENT_SIZE(nm->len);
3906 + host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len);
3907 ++ host_ui->xattr_names += nm->len;
3908 + mutex_unlock(&host_ui->ui_mutex);
3909 + ubifs_release_budget(c, &req);
3910 + make_bad_inode(inode);
3911 +diff --git a/include/linux/devfreq-event.h b/include/linux/devfreq-event.h
3912 +index 0a83a1e648b0..4db00b02ca3f 100644
3913 +--- a/include/linux/devfreq-event.h
3914 ++++ b/include/linux/devfreq-event.h
3915 +@@ -148,11 +148,6 @@ static inline int devfreq_event_reset_event(struct devfreq_event_dev *edev)
3916 + return -EINVAL;
3917 + }
3918 +
3919 +-static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev)
3920 +-{
3921 +- return ERR_PTR(-EINVAL);
3922 +-}
3923 +-
3924 + static inline struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(
3925 + struct device *dev, int index)
3926 + {
3927 +diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
3928 +index e98425058f20..54048f336a1f 100644
3929 +--- a/include/linux/irqchip/arm-gic-v3.h
3930 ++++ b/include/linux/irqchip/arm-gic-v3.h
3931 +@@ -218,7 +218,7 @@
3932 + #define GITS_BASER_TYPE_SHIFT (56)
3933 + #define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7)
3934 + #define GITS_BASER_ENTRY_SIZE_SHIFT (48)
3935 +-#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1)
3936 ++#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
3937 + #define GITS_BASER_NonShareable (0UL << 10)
3938 + #define GITS_BASER_InnerShareable (1UL << 10)
3939 + #define GITS_BASER_OuterShareable (2UL << 10)
3940 +diff --git a/include/linux/kernel.h b/include/linux/kernel.h
3941 +index e571e592e53a..50220cab738c 100644
3942 +--- a/include/linux/kernel.h
3943 ++++ b/include/linux/kernel.h
3944 +@@ -356,6 +356,7 @@ int __must_check kstrtou16(const char *s, unsigned int base, u16 *res);
3945 + int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
3946 + int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
3947 + int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
3948 ++int __must_check kstrtobool(const char *s, bool *res);
3949 +
3950 + int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
3951 + int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
3952 +@@ -367,6 +368,7 @@ int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigne
3953 + int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
3954 + int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
3955 + int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
3956 ++int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res);
3957 +
3958 + static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
3959 + {
3960 +diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
3961 +index f09648d14694..782d4e814e21 100644
3962 +--- a/include/linux/lightnvm.h
3963 ++++ b/include/linux/lightnvm.h
3964 +@@ -1,6 +1,8 @@
3965 + #ifndef NVM_H
3966 + #define NVM_H
3967 +
3968 ++#include <linux/types.h>
3969 ++
3970 + enum {
3971 + NVM_IO_OK = 0,
3972 + NVM_IO_REQUEUE = 1,
3973 +@@ -11,10 +13,71 @@ enum {
3974 + NVM_IOTYPE_GC = 1,
3975 + };
3976 +
3977 ++#define NVM_BLK_BITS (16)
3978 ++#define NVM_PG_BITS (16)
3979 ++#define NVM_SEC_BITS (8)
3980 ++#define NVM_PL_BITS (8)
3981 ++#define NVM_LUN_BITS (8)
3982 ++#define NVM_CH_BITS (8)
3983 ++
3984 ++struct ppa_addr {
3985 ++ /* Generic structure for all addresses */
3986 ++ union {
3987 ++ struct {
3988 ++ u64 blk : NVM_BLK_BITS;
3989 ++ u64 pg : NVM_PG_BITS;
3990 ++ u64 sec : NVM_SEC_BITS;
3991 ++ u64 pl : NVM_PL_BITS;
3992 ++ u64 lun : NVM_LUN_BITS;
3993 ++ u64 ch : NVM_CH_BITS;
3994 ++ } g;
3995 ++
3996 ++ u64 ppa;
3997 ++ };
3998 ++};
3999 ++
4000 ++struct nvm_rq;
4001 ++struct nvm_id;
4002 ++struct nvm_dev;
4003 ++
4004 ++typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
4005 ++typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
4006 ++typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
4007 ++typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
4008 ++ nvm_l2p_update_fn *, void *);
4009 ++typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int,
4010 ++ nvm_bb_update_fn *, void *);
4011 ++typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int);
4012 ++typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
4013 ++typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
4014 ++typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
4015 ++typedef void (nvm_destroy_dma_pool_fn)(void *);
4016 ++typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
4017 ++ dma_addr_t *);
4018 ++typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
4019 ++
4020 ++struct nvm_dev_ops {
4021 ++ nvm_id_fn *identity;
4022 ++ nvm_get_l2p_tbl_fn *get_l2p_tbl;
4023 ++ nvm_op_bb_tbl_fn *get_bb_tbl;
4024 ++ nvm_op_set_bb_fn *set_bb_tbl;
4025 ++
4026 ++ nvm_submit_io_fn *submit_io;
4027 ++ nvm_erase_blk_fn *erase_block;
4028 ++
4029 ++ nvm_create_dma_pool_fn *create_dma_pool;
4030 ++ nvm_destroy_dma_pool_fn *destroy_dma_pool;
4031 ++ nvm_dev_dma_alloc_fn *dev_dma_alloc;
4032 ++ nvm_dev_dma_free_fn *dev_dma_free;
4033 ++
4034 ++ unsigned int max_phys_sect;
4035 ++};
4036 ++
4037 ++
4038 ++
4039 + #ifdef CONFIG_NVM
4040 +
4041 + #include <linux/blkdev.h>
4042 +-#include <linux/types.h>
4043 + #include <linux/file.h>
4044 + #include <linux/dmapool.h>
4045 +
4046 +@@ -126,29 +189,6 @@ struct nvm_tgt_instance {
4047 + #define NVM_VERSION_MINOR 0
4048 + #define NVM_VERSION_PATCH 0
4049 +
4050 +-#define NVM_BLK_BITS (16)
4051 +-#define NVM_PG_BITS (16)
4052 +-#define NVM_SEC_BITS (8)
4053 +-#define NVM_PL_BITS (8)
4054 +-#define NVM_LUN_BITS (8)
4055 +-#define NVM_CH_BITS (8)
4056 +-
4057 +-struct ppa_addr {
4058 +- /* Generic structure for all addresses */
4059 +- union {
4060 +- struct {
4061 +- u64 blk : NVM_BLK_BITS;
4062 +- u64 pg : NVM_PG_BITS;
4063 +- u64 sec : NVM_SEC_BITS;
4064 +- u64 pl : NVM_PL_BITS;
4065 +- u64 lun : NVM_LUN_BITS;
4066 +- u64 ch : NVM_CH_BITS;
4067 +- } g;
4068 +-
4069 +- u64 ppa;
4070 +- };
4071 +-};
4072 +-
4073 + struct nvm_rq {
4074 + struct nvm_tgt_instance *ins;
4075 + struct nvm_dev *dev;
4076 +@@ -182,39 +222,6 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
4077 +
4078 + struct nvm_block;
4079 +
4080 +-typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
4081 +-typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
4082 +-typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
4083 +-typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
4084 +- nvm_l2p_update_fn *, void *);
4085 +-typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int,
4086 +- nvm_bb_update_fn *, void *);
4087 +-typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int);
4088 +-typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
4089 +-typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
4090 +-typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
4091 +-typedef void (nvm_destroy_dma_pool_fn)(void *);
4092 +-typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
4093 +- dma_addr_t *);
4094 +-typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
4095 +-
4096 +-struct nvm_dev_ops {
4097 +- nvm_id_fn *identity;
4098 +- nvm_get_l2p_tbl_fn *get_l2p_tbl;
4099 +- nvm_op_bb_tbl_fn *get_bb_tbl;
4100 +- nvm_op_set_bb_fn *set_bb_tbl;
4101 +-
4102 +- nvm_submit_io_fn *submit_io;
4103 +- nvm_erase_blk_fn *erase_block;
4104 +-
4105 +- nvm_create_dma_pool_fn *create_dma_pool;
4106 +- nvm_destroy_dma_pool_fn *destroy_dma_pool;
4107 +- nvm_dev_dma_alloc_fn *dev_dma_alloc;
4108 +- nvm_dev_dma_free_fn *dev_dma_free;
4109 +-
4110 +- unsigned int max_phys_sect;
4111 +-};
4112 +-
4113 + struct nvm_lun {
4114 + int id;
4115 +
4116 +diff --git a/include/linux/sem.h b/include/linux/sem.h
4117 +index 976ce3a19f1b..d0efd6e6c20a 100644
4118 +--- a/include/linux/sem.h
4119 ++++ b/include/linux/sem.h
4120 +@@ -21,6 +21,7 @@ struct sem_array {
4121 + struct list_head list_id; /* undo requests on this array */
4122 + int sem_nsems; /* no. of semaphores in array */
4123 + int complex_count; /* pending complex operations */
4124 ++ bool complex_mode; /* no parallel simple ops */
4125 + };
4126 +
4127 + #ifdef CONFIG_SYSVIPC
4128 +diff --git a/include/linux/string.h b/include/linux/string.h
4129 +index 9ef7795e65e4..aa30789b0f65 100644
4130 +--- a/include/linux/string.h
4131 ++++ b/include/linux/string.h
4132 +@@ -127,7 +127,11 @@ extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
4133 + extern void argv_free(char **argv);
4134 +
4135 + extern bool sysfs_streq(const char *s1, const char *s2);
4136 +-extern int strtobool(const char *s, bool *res);
4137 ++extern int kstrtobool(const char *s, bool *res);
4138 ++static inline int strtobool(const char *s, bool *res)
4139 ++{
4140 ++ return kstrtobool(s, res);
4141 ++}
4142 +
4143 + #ifdef CONFIG_BINARY_PRINTF
4144 + int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
4145 +diff --git a/include/linux/swap.h b/include/linux/swap.h
4146 +index 7ba7dccaf0e7..d8ca2eaa3a8b 100644
4147 +--- a/include/linux/swap.h
4148 ++++ b/include/linux/swap.h
4149 +@@ -266,6 +266,7 @@ static inline void workingset_node_pages_inc(struct radix_tree_node *node)
4150 +
4151 + static inline void workingset_node_pages_dec(struct radix_tree_node *node)
4152 + {
4153 ++ VM_WARN_ON_ONCE(!workingset_node_pages(node));
4154 + node->count--;
4155 + }
4156 +
4157 +@@ -281,6 +282,7 @@ static inline void workingset_node_shadows_inc(struct radix_tree_node *node)
4158 +
4159 + static inline void workingset_node_shadows_dec(struct radix_tree_node *node)
4160 + {
4161 ++ VM_WARN_ON_ONCE(!workingset_node_shadows(node));
4162 + node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
4163 + }
4164 +
4165 +diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
4166 +index 59081c73b296..6afc6f388edf 100644
4167 +--- a/include/target/target_core_base.h
4168 ++++ b/include/target/target_core_base.h
4169 +@@ -180,6 +180,7 @@ enum tcm_sense_reason_table {
4170 + TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED = R(0x15),
4171 + TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED = R(0x16),
4172 + TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED = R(0x17),
4173 ++ TCM_COPY_TARGET_DEVICE_NOT_REACHABLE = R(0x18),
4174 + #undef R
4175 + };
4176 +
4177 +diff --git a/ipc/sem.c b/ipc/sem.c
4178 +index 20d07008ad5e..9862c3d1c26d 100644
4179 +--- a/ipc/sem.c
4180 ++++ b/ipc/sem.c
4181 +@@ -155,14 +155,21 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
4182 +
4183 + /*
4184 + * Locking:
4185 ++ * a) global sem_lock() for read/write
4186 + * sem_undo.id_next,
4187 + * sem_array.complex_count,
4188 +- * sem_array.pending{_alter,_cont},
4189 +- * sem_array.sem_undo: global sem_lock() for read/write
4190 +- * sem_undo.proc_next: only "current" is allowed to read/write that field.
4191 ++ * sem_array.complex_mode
4192 ++ * sem_array.pending{_alter,_const},
4193 ++ * sem_array.sem_undo
4194 + *
4195 ++ * b) global or semaphore sem_lock() for read/write:
4196 + * sem_array.sem_base[i].pending_{const,alter}:
4197 +- * global or semaphore sem_lock() for read/write
4198 ++ * sem_array.complex_mode (for read)
4199 ++ *
4200 ++ * c) special:
4201 ++ * sem_undo_list.list_proc:
4202 ++ * * undo_list->lock for write
4203 ++ * * rcu for read
4204 + */
4205 +
4206 + #define sc_semmsl sem_ctls[0]
4207 +@@ -263,24 +270,25 @@ static void sem_rcu_free(struct rcu_head *head)
4208 + #define ipc_smp_acquire__after_spin_is_unlocked() smp_rmb()
4209 +
4210 + /*
4211 +- * Wait until all currently ongoing simple ops have completed.
4212 ++ * Enter the mode suitable for non-simple operations:
4213 + * Caller must own sem_perm.lock.
4214 +- * New simple ops cannot start, because simple ops first check
4215 +- * that sem_perm.lock is free.
4216 +- * that a) sem_perm.lock is free and b) complex_count is 0.
4217 + */
4218 +-static void sem_wait_array(struct sem_array *sma)
4219 ++static void complexmode_enter(struct sem_array *sma)
4220 + {
4221 + int i;
4222 + struct sem *sem;
4223 +
4224 +- if (sma->complex_count) {
4225 +- /* The thread that increased sma->complex_count waited on
4226 +- * all sem->lock locks. Thus we don't need to wait again.
4227 +- */
4228 ++ if (sma->complex_mode) {
4229 ++ /* We are already in complex_mode. Nothing to do */
4230 + return;
4231 + }
4232 +
4233 ++ /* We need a full barrier after seting complex_mode:
4234 ++ * The write to complex_mode must be visible
4235 ++ * before we read the first sem->lock spinlock state.
4236 ++ */
4237 ++ smp_store_mb(sma->complex_mode, true);
4238 ++
4239 + for (i = 0; i < sma->sem_nsems; i++) {
4240 + sem = sma->sem_base + i;
4241 + spin_unlock_wait(&sem->lock);
4242 +@@ -289,6 +297,28 @@ static void sem_wait_array(struct sem_array *sma)
4243 + }
4244 +
4245 + /*
4246 ++ * Try to leave the mode that disallows simple operations:
4247 ++ * Caller must own sem_perm.lock.
4248 ++ */
4249 ++static void complexmode_tryleave(struct sem_array *sma)
4250 ++{
4251 ++ if (sma->complex_count) {
4252 ++ /* Complex ops are sleeping.
4253 ++ * We must stay in complex mode
4254 ++ */
4255 ++ return;
4256 ++ }
4257 ++ /*
4258 ++ * Immediately after setting complex_mode to false,
4259 ++ * a simple op can start. Thus: all memory writes
4260 ++ * performed by the current operation must be visible
4261 ++ * before we set complex_mode to false.
4262 ++ */
4263 ++ smp_store_release(&sma->complex_mode, false);
4264 ++}
4265 ++
4266 ++#define SEM_GLOBAL_LOCK (-1)
4267 ++/*
4268 + * If the request contains only one semaphore operation, and there are
4269 + * no complex transactions pending, lock only the semaphore involved.
4270 + * Otherwise, lock the entire semaphore array, since we either have
4271 +@@ -304,56 +334,42 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
4272 + /* Complex operation - acquire a full lock */
4273 + ipc_lock_object(&sma->sem_perm);
4274 +
4275 +- /* And wait until all simple ops that are processed
4276 +- * right now have dropped their locks.
4277 +- */
4278 +- sem_wait_array(sma);
4279 +- return -1;
4280 ++ /* Prevent parallel simple ops */
4281 ++ complexmode_enter(sma);
4282 ++ return SEM_GLOBAL_LOCK;
4283 + }
4284 +
4285 + /*
4286 + * Only one semaphore affected - try to optimize locking.
4287 +- * The rules are:
4288 +- * - optimized locking is possible if no complex operation
4289 +- * is either enqueued or processed right now.
4290 +- * - The test for enqueued complex ops is simple:
4291 +- * sma->complex_count != 0
4292 +- * - Testing for complex ops that are processed right now is
4293 +- * a bit more difficult. Complex ops acquire the full lock
4294 +- * and first wait that the running simple ops have completed.
4295 +- * (see above)
4296 +- * Thus: If we own a simple lock and the global lock is free
4297 +- * and complex_count is now 0, then it will stay 0 and
4298 +- * thus just locking sem->lock is sufficient.
4299 ++ * Optimized locking is possible if no complex operation
4300 ++ * is either enqueued or processed right now.
4301 ++ *
4302 ++ * Both facts are tracked by complex_mode.
4303 + */
4304 + sem = sma->sem_base + sops->sem_num;
4305 +
4306 +- if (sma->complex_count == 0) {
4307 ++ /*
4308 ++ * Initial check for complex_mode. Just an optimization,
4309 ++ * no locking, no memory barrier.
4310 ++ */
4311 ++ if (!sma->complex_mode) {
4312 + /*
4313 + * It appears that no complex operation is around.
4314 + * Acquire the per-semaphore lock.
4315 + */
4316 + spin_lock(&sem->lock);
4317 +
4318 +- /* Then check that the global lock is free */
4319 +- if (!spin_is_locked(&sma->sem_perm.lock)) {
4320 +- /*
4321 +- * We need a memory barrier with acquire semantics,
4322 +- * otherwise we can race with another thread that does:
4323 +- * complex_count++;
4324 +- * spin_unlock(sem_perm.lock);
4325 +- */
4326 +- ipc_smp_acquire__after_spin_is_unlocked();
4327 ++ /*
4328 ++ * See 51d7d5205d33
4329 ++ * ("powerpc: Add smp_mb() to arch_spin_is_locked()"):
4330 ++ * A full barrier is required: the write of sem->lock
4331 ++ * must be visible before the read is executed
4332 ++ */
4333 ++ smp_mb();
4334 +
4335 +- /*
4336 +- * Now repeat the test of complex_count:
4337 +- * It can't change anymore until we drop sem->lock.
4338 +- * Thus: if is now 0, then it will stay 0.
4339 +- */
4340 +- if (sma->complex_count == 0) {
4341 +- /* fast path successful! */
4342 +- return sops->sem_num;
4343 +- }
4344 ++ if (!smp_load_acquire(&sma->complex_mode)) {
4345 ++ /* fast path successful! */
4346 ++ return sops->sem_num;
4347 + }
4348 + spin_unlock(&sem->lock);
4349 + }
4350 +@@ -373,15 +389,16 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
4351 + /* Not a false alarm, thus complete the sequence for a
4352 + * full lock.
4353 + */
4354 +- sem_wait_array(sma);
4355 +- return -1;
4356 ++ complexmode_enter(sma);
4357 ++ return SEM_GLOBAL_LOCK;
4358 + }
4359 + }
4360 +
4361 + static inline void sem_unlock(struct sem_array *sma, int locknum)
4362 + {
4363 +- if (locknum == -1) {
4364 ++ if (locknum == SEM_GLOBAL_LOCK) {
4365 + unmerge_queues(sma);
4366 ++ complexmode_tryleave(sma);
4367 + ipc_unlock_object(&sma->sem_perm);
4368 + } else {
4369 + struct sem *sem = sma->sem_base + locknum;
4370 +@@ -533,6 +550,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
4371 + }
4372 +
4373 + sma->complex_count = 0;
4374 ++ sma->complex_mode = true; /* dropped by sem_unlock below */
4375 + INIT_LIST_HEAD(&sma->pending_alter);
4376 + INIT_LIST_HEAD(&sma->pending_const);
4377 + INIT_LIST_HEAD(&sma->list_id);
4378 +@@ -2186,10 +2204,10 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
4379 + /*
4380 + * The proc interface isn't aware of sem_lock(), it calls
4381 + * ipc_lock_object() directly (in sysvipc_find_ipc).
4382 +- * In order to stay compatible with sem_lock(), we must wait until
4383 +- * all simple semop() calls have left their critical regions.
4384 ++ * In order to stay compatible with sem_lock(), we must
4385 ++ * enter / leave complex_mode.
4386 + */
4387 +- sem_wait_array(sma);
4388 ++ complexmode_enter(sma);
4389 +
4390 + sem_otime = get_semotime(sma);
4391 +
4392 +@@ -2206,6 +2224,8 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
4393 + sem_otime,
4394 + sma->sem_ctime);
4395 +
4396 ++ complexmode_tryleave(sma);
4397 ++
4398 + return 0;
4399 + }
4400 + #endif
4401 +diff --git a/lib/kstrtox.c b/lib/kstrtox.c
4402 +index 94be244e8441..d8a5cf66c316 100644
4403 +--- a/lib/kstrtox.c
4404 ++++ b/lib/kstrtox.c
4405 +@@ -321,6 +321,70 @@ int kstrtos8(const char *s, unsigned int base, s8 *res)
4406 + }
4407 + EXPORT_SYMBOL(kstrtos8);
4408 +
4409 ++/**
4410 ++ * kstrtobool - convert common user inputs into boolean values
4411 ++ * @s: input string
4412 ++ * @res: result
4413 ++ *
4414 ++ * This routine returns 0 iff the first character is one of 'Yy1Nn0', or
4415 ++ * [oO][NnFf] for "on" and "off". Otherwise it will return -EINVAL. Value
4416 ++ * pointed to by res is updated upon finding a match.
4417 ++ */
4418 ++int kstrtobool(const char *s, bool *res)
4419 ++{
4420 ++ if (!s)
4421 ++ return -EINVAL;
4422 ++
4423 ++ switch (s[0]) {
4424 ++ case 'y':
4425 ++ case 'Y':
4426 ++ case '1':
4427 ++ *res = true;
4428 ++ return 0;
4429 ++ case 'n':
4430 ++ case 'N':
4431 ++ case '0':
4432 ++ *res = false;
4433 ++ return 0;
4434 ++ case 'o':
4435 ++ case 'O':
4436 ++ switch (s[1]) {
4437 ++ case 'n':
4438 ++ case 'N':
4439 ++ *res = true;
4440 ++ return 0;
4441 ++ case 'f':
4442 ++ case 'F':
4443 ++ *res = false;
4444 ++ return 0;
4445 ++ default:
4446 ++ break;
4447 ++ }
4448 ++ default:
4449 ++ break;
4450 ++ }
4451 ++
4452 ++ return -EINVAL;
4453 ++}
4454 ++EXPORT_SYMBOL(kstrtobool);
4455 ++
4456 ++/*
4457 ++ * Since "base" would be a nonsense argument, this open-codes the
4458 ++ * _from_user helper instead of using the helper macro below.
4459 ++ */
4460 ++int kstrtobool_from_user(const char __user *s, size_t count, bool *res)
4461 ++{
4462 ++ /* Longest string needed to differentiate, newline, terminator */
4463 ++ char buf[4];
4464 ++
4465 ++ count = min(count, sizeof(buf) - 1);
4466 ++ if (copy_from_user(buf, s, count))
4467 ++ return -EFAULT;
4468 ++ buf[count] = '\0';
4469 ++ return kstrtobool(buf, res);
4470 ++}
4471 ++EXPORT_SYMBOL(kstrtobool_from_user);
4472 ++
4473 + #define kstrto_from_user(f, g, type) \
4474 + int f(const char __user *s, size_t count, unsigned int base, type *res) \
4475 + { \
4476 +diff --git a/lib/string.c b/lib/string.c
4477 +index 0323c0d5629a..1a90db9bc6e1 100644
4478 +--- a/lib/string.c
4479 ++++ b/lib/string.c
4480 +@@ -630,35 +630,6 @@ bool sysfs_streq(const char *s1, const char *s2)
4481 + }
4482 + EXPORT_SYMBOL(sysfs_streq);
4483 +
4484 +-/**
4485 +- * strtobool - convert common user inputs into boolean values
4486 +- * @s: input string
4487 +- * @res: result
4488 +- *
4489 +- * This routine returns 0 iff the first character is one of 'Yy1Nn0'.
4490 +- * Otherwise it will return -EINVAL. Value pointed to by res is
4491 +- * updated upon finding a match.
4492 +- */
4493 +-int strtobool(const char *s, bool *res)
4494 +-{
4495 +- switch (s[0]) {
4496 +- case 'y':
4497 +- case 'Y':
4498 +- case '1':
4499 +- *res = true;
4500 +- break;
4501 +- case 'n':
4502 +- case 'N':
4503 +- case '0':
4504 +- *res = false;
4505 +- break;
4506 +- default:
4507 +- return -EINVAL;
4508 +- }
4509 +- return 0;
4510 +-}
4511 +-EXPORT_SYMBOL(strtobool);
4512 +-
4513 + #ifndef __HAVE_ARCH_MEMSET
4514 + /**
4515 + * memset - Fill a region of memory with the given value
4516 +diff --git a/mm/filemap.c b/mm/filemap.c
4517 +index 1bb007624b53..c588d1222b2a 100644
4518 +--- a/mm/filemap.c
4519 ++++ b/mm/filemap.c
4520 +@@ -109,6 +109,48 @@
4521 + * ->tasklist_lock (memory_failure, collect_procs_ao)
4522 + */
4523 +
4524 ++static int page_cache_tree_insert(struct address_space *mapping,
4525 ++ struct page *page, void **shadowp)
4526 ++{
4527 ++ struct radix_tree_node *node;
4528 ++ void **slot;
4529 ++ int error;
4530 ++
4531 ++ error = __radix_tree_create(&mapping->page_tree, page->index,
4532 ++ &node, &slot);
4533 ++ if (error)
4534 ++ return error;
4535 ++ if (*slot) {
4536 ++ void *p;
4537 ++
4538 ++ p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
4539 ++ if (!radix_tree_exceptional_entry(p))
4540 ++ return -EEXIST;
4541 ++ if (shadowp)
4542 ++ *shadowp = p;
4543 ++ mapping->nrshadows--;
4544 ++ if (node)
4545 ++ workingset_node_shadows_dec(node);
4546 ++ }
4547 ++ radix_tree_replace_slot(slot, page);
4548 ++ mapping->nrpages++;
4549 ++ if (node) {
4550 ++ workingset_node_pages_inc(node);
4551 ++ /*
4552 ++ * Don't track node that contains actual pages.
4553 ++ *
4554 ++ * Avoid acquiring the list_lru lock if already
4555 ++ * untracked. The list_empty() test is safe as
4556 ++ * node->private_list is protected by
4557 ++ * mapping->tree_lock.
4558 ++ */
4559 ++ if (!list_empty(&node->private_list))
4560 ++ list_lru_del(&workingset_shadow_nodes,
4561 ++ &node->private_list);
4562 ++ }
4563 ++ return 0;
4564 ++}
4565 ++
4566 + static void page_cache_tree_delete(struct address_space *mapping,
4567 + struct page *page, void *shadow)
4568 + {
4569 +@@ -122,6 +164,14 @@ static void page_cache_tree_delete(struct address_space *mapping,
4570 +
4571 + __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot);
4572 +
4573 ++ if (!node) {
4574 ++ /*
4575 ++ * We need a node to properly account shadow
4576 ++ * entries. Don't plant any without. XXX
4577 ++ */
4578 ++ shadow = NULL;
4579 ++ }
4580 ++
4581 + if (shadow) {
4582 + mapping->nrshadows++;
4583 + /*
4584 +@@ -538,9 +588,8 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
4585 + memcg = mem_cgroup_begin_page_stat(old);
4586 + spin_lock_irqsave(&mapping->tree_lock, flags);
4587 + __delete_from_page_cache(old, NULL, memcg);
4588 +- error = radix_tree_insert(&mapping->page_tree, offset, new);
4589 ++ error = page_cache_tree_insert(mapping, new, NULL);
4590 + BUG_ON(error);
4591 +- mapping->nrpages++;
4592 +
4593 + /*
4594 + * hugetlb pages do not participate in page cache accounting.
4595 +@@ -562,48 +611,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
4596 + }
4597 + EXPORT_SYMBOL_GPL(replace_page_cache_page);
4598 +
4599 +-static int page_cache_tree_insert(struct address_space *mapping,
4600 +- struct page *page, void **shadowp)
4601 +-{
4602 +- struct radix_tree_node *node;
4603 +- void **slot;
4604 +- int error;
4605 +-
4606 +- error = __radix_tree_create(&mapping->page_tree, page->index,
4607 +- &node, &slot);
4608 +- if (error)
4609 +- return error;
4610 +- if (*slot) {
4611 +- void *p;
4612 +-
4613 +- p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
4614 +- if (!radix_tree_exceptional_entry(p))
4615 +- return -EEXIST;
4616 +- if (shadowp)
4617 +- *shadowp = p;
4618 +- mapping->nrshadows--;
4619 +- if (node)
4620 +- workingset_node_shadows_dec(node);
4621 +- }
4622 +- radix_tree_replace_slot(slot, page);
4623 +- mapping->nrpages++;
4624 +- if (node) {
4625 +- workingset_node_pages_inc(node);
4626 +- /*
4627 +- * Don't track node that contains actual pages.
4628 +- *
4629 +- * Avoid acquiring the list_lru lock if already
4630 +- * untracked. The list_empty() test is safe as
4631 +- * node->private_list is protected by
4632 +- * mapping->tree_lock.
4633 +- */
4634 +- if (!list_empty(&node->private_list))
4635 +- list_lru_del(&workingset_shadow_nodes,
4636 +- &node->private_list);
4637 +- }
4638 +- return 0;
4639 +-}
4640 +-
4641 + static int __add_to_page_cache_locked(struct page *page,
4642 + struct address_space *mapping,
4643 + pgoff_t offset, gfp_t gfp_mask,
4644 +diff --git a/mm/workingset.c b/mm/workingset.c
4645 +index aa017133744b..df66f426fdcf 100644
4646 +--- a/mm/workingset.c
4647 ++++ b/mm/workingset.c
4648 +@@ -341,21 +341,19 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
4649 + * no pages, so we expect to be able to remove them all and
4650 + * delete and free the empty node afterwards.
4651 + */
4652 +-
4653 +- BUG_ON(!node->count);
4654 +- BUG_ON(node->count & RADIX_TREE_COUNT_MASK);
4655 ++ BUG_ON(!workingset_node_shadows(node));
4656 ++ BUG_ON(workingset_node_pages(node));
4657 +
4658 + for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
4659 + if (node->slots[i]) {
4660 + BUG_ON(!radix_tree_exceptional_entry(node->slots[i]));
4661 + node->slots[i] = NULL;
4662 +- BUG_ON(node->count < (1U << RADIX_TREE_COUNT_SHIFT));
4663 +- node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
4664 ++ workingset_node_shadows_dec(node);
4665 + BUG_ON(!mapping->nrshadows);
4666 + mapping->nrshadows--;
4667 + }
4668 + }
4669 +- BUG_ON(node->count);
4670 ++ BUG_ON(workingset_node_shadows(node));
4671 + inc_zone_state(page_zone(virt_to_page(node)), WORKINGSET_NODERECLAIM);
4672 + if (!__radix_tree_delete_node(&mapping->page_tree, node))
4673 + BUG();
4674 +diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
4675 +index 1ba417207465..27b6f55fa43a 100644
4676 +--- a/net/sunrpc/xprtsock.c
4677 ++++ b/net/sunrpc/xprtsock.c
4678 +@@ -474,7 +474,16 @@ static int xs_nospace(struct rpc_task *task)
4679 + spin_unlock_bh(&xprt->transport_lock);
4680 +
4681 + /* Race breaker in case memory is freed before above code is called */
4682 +- sk->sk_write_space(sk);
4683 ++ if (ret == -EAGAIN) {
4684 ++ struct socket_wq *wq;
4685 ++
4686 ++ rcu_read_lock();
4687 ++ wq = rcu_dereference(sk->sk_wq);
4688 ++ set_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags);
4689 ++ rcu_read_unlock();
4690 ++
4691 ++ sk->sk_write_space(sk);
4692 ++ }
4693 + return ret;
4694 + }
4695 +
4696 +diff --git a/sound/pci/hda/dell_wmi_helper.c b/sound/pci/hda/dell_wmi_helper.c
4697 +index 9c22f95838ef..19d41da79f93 100644
4698 +--- a/sound/pci/hda/dell_wmi_helper.c
4699 ++++ b/sound/pci/hda/dell_wmi_helper.c
4700 +@@ -49,7 +49,7 @@ static void alc_fixup_dell_wmi(struct hda_codec *codec,
4701 + removefunc = true;
4702 + if (dell_led_set_func(DELL_LED_MICMUTE, false) >= 0) {
4703 + dell_led_value = 0;
4704 +- if (spec->gen.num_adc_nids > 1)
4705 ++ if (spec->gen.num_adc_nids > 1 && !spec->gen.dyn_adc_switch)
4706 + codec_dbg(codec, "Skipping micmute LED control due to several ADCs");
4707 + else {
4708 + dell_old_cap_hook = spec->gen.cap_sync_hook;
4709 +diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
4710 +index 0a4ad5feb82e..12826ac0381f 100644
4711 +--- a/sound/pci/hda/thinkpad_helper.c
4712 ++++ b/sound/pci/hda/thinkpad_helper.c
4713 +@@ -75,7 +75,7 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec,
4714 + removefunc = false;
4715 + }
4716 + if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) {
4717 +- if (spec->num_adc_nids > 1)
4718 ++ if (spec->num_adc_nids > 1 && !spec->dyn_adc_switch)
4719 + codec_dbg(codec,
4720 + "Skipping micmute LED control due to several ADCs");
4721 + else {
4722 +diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
4723 +index 9409d014b46c..71df7acf8643 100644
4724 +--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
4725 ++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
4726 +@@ -89,6 +89,7 @@ struct intel_pt_decoder {
4727 + bool pge;
4728 + bool have_tma;
4729 + bool have_cyc;
4730 ++ bool fixup_last_mtc;
4731 + uint64_t pos;
4732 + uint64_t last_ip;
4733 + uint64_t ip;
4734 +@@ -584,10 +585,31 @@ struct intel_pt_calc_cyc_to_tsc_info {
4735 + uint64_t tsc_timestamp;
4736 + uint64_t timestamp;
4737 + bool have_tma;
4738 ++ bool fixup_last_mtc;
4739 + bool from_mtc;
4740 + double cbr_cyc_to_tsc;
4741 + };
4742 +
4743 ++/*
4744 ++ * MTC provides a 8-bit slice of CTC but the TMA packet only provides the lower
4745 ++ * 16 bits of CTC. If mtc_shift > 8 then some of the MTC bits are not in the CTC
4746 ++ * provided by the TMA packet. Fix-up the last_mtc calculated from the TMA
4747 ++ * packet by copying the missing bits from the current MTC assuming the least
4748 ++ * difference between the two, and that the current MTC comes after last_mtc.
4749 ++ */
4750 ++static void intel_pt_fixup_last_mtc(uint32_t mtc, int mtc_shift,
4751 ++ uint32_t *last_mtc)
4752 ++{
4753 ++ uint32_t first_missing_bit = 1U << (16 - mtc_shift);
4754 ++ uint32_t mask = ~(first_missing_bit - 1);
4755 ++
4756 ++ *last_mtc |= mtc & mask;
4757 ++ if (*last_mtc >= mtc) {
4758 ++ *last_mtc -= first_missing_bit;
4759 ++ *last_mtc &= 0xff;
4760 ++ }
4761 ++}
4762 ++
4763 + static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info)
4764 + {
4765 + struct intel_pt_decoder *decoder = pkt_info->decoder;
4766 +@@ -617,6 +639,11 @@ static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info)
4767 + return 0;
4768 +
4769 + mtc = pkt_info->packet.payload;
4770 ++ if (decoder->mtc_shift > 8 && data->fixup_last_mtc) {
4771 ++ data->fixup_last_mtc = false;
4772 ++ intel_pt_fixup_last_mtc(mtc, decoder->mtc_shift,
4773 ++ &data->last_mtc);
4774 ++ }
4775 + if (mtc > data->last_mtc)
4776 + mtc_delta = mtc - data->last_mtc;
4777 + else
4778 +@@ -685,6 +712,7 @@ static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info)
4779 +
4780 + data->ctc_delta = 0;
4781 + data->have_tma = true;
4782 ++ data->fixup_last_mtc = true;
4783 +
4784 + return 0;
4785 +
4786 +@@ -751,6 +779,7 @@ static void intel_pt_calc_cyc_to_tsc(struct intel_pt_decoder *decoder,
4787 + .tsc_timestamp = decoder->tsc_timestamp,
4788 + .timestamp = decoder->timestamp,
4789 + .have_tma = decoder->have_tma,
4790 ++ .fixup_last_mtc = decoder->fixup_last_mtc,
4791 + .from_mtc = from_mtc,
4792 + .cbr_cyc_to_tsc = 0,
4793 + };
4794 +@@ -1241,6 +1270,7 @@ static void intel_pt_calc_tma(struct intel_pt_decoder *decoder)
4795 + }
4796 + decoder->ctc_delta = 0;
4797 + decoder->have_tma = true;
4798 ++ decoder->fixup_last_mtc = true;
4799 + intel_pt_log("CTC timestamp " x64_fmt " last MTC %#x CTC rem %#x\n",
4800 + decoder->ctc_timestamp, decoder->last_mtc, ctc_rem);
4801 + }
4802 +@@ -1255,6 +1285,12 @@ static void intel_pt_calc_mtc_timestamp(struct intel_pt_decoder *decoder)
4803 +
4804 + mtc = decoder->packet.payload;
4805 +
4806 ++ if (decoder->mtc_shift > 8 && decoder->fixup_last_mtc) {
4807 ++ decoder->fixup_last_mtc = false;
4808 ++ intel_pt_fixup_last_mtc(mtc, decoder->mtc_shift,
4809 ++ &decoder->last_mtc);
4810 ++ }
4811 ++
4812 + if (mtc > decoder->last_mtc)
4813 + mtc_delta = mtc - decoder->last_mtc;
4814 + else
4815 +@@ -1323,6 +1359,8 @@ static void intel_pt_calc_cyc_timestamp(struct intel_pt_decoder *decoder)
4816 + timestamp, decoder->timestamp);
4817 + else
4818 + decoder->timestamp = timestamp;
4819 ++
4820 ++ decoder->timestamp_insn_cnt = 0;
4821 + }
4822 +
4823 + /* Walk PSB+ packets when already in sync. */
4824 +diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
4825 +index 9227c2f076c3..89927b5beebf 100644
4826 +--- a/tools/perf/util/intel-pt.c
4827 ++++ b/tools/perf/util/intel-pt.c
4828 +@@ -238,7 +238,7 @@ static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
4829 + }
4830 +
4831 + queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
4832 +-
4833 ++next:
4834 + buffer = auxtrace_buffer__next(queue, buffer);
4835 + if (!buffer) {
4836 + if (old_buffer)
4837 +@@ -261,9 +261,6 @@ static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
4838 + intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer))
4839 + return -ENOMEM;
4840 +
4841 +- if (old_buffer)
4842 +- auxtrace_buffer__drop_data(old_buffer);
4843 +-
4844 + if (buffer->use_data) {
4845 + b->len = buffer->use_size;
4846 + b->buf = buffer->use_data;
4847 +@@ -273,6 +270,16 @@ static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
4848 + }
4849 + b->ref_timestamp = buffer->reference;
4850 +
4851 ++ /*
4852 ++ * If in snapshot mode and the buffer has no usable data, get next
4853 ++ * buffer and again check overlap against old_buffer.
4854 ++ */
4855 ++ if (ptq->pt->snapshot_mode && !b->len)
4856 ++ goto next;
4857 ++
4858 ++ if (old_buffer)
4859 ++ auxtrace_buffer__drop_data(old_buffer);
4860 ++
4861 + if (!old_buffer || ptq->pt->sampling_mode || (ptq->pt->snapshot_mode &&
4862 + !buffer->consecutive)) {
4863 + b->consecutive = false;