Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:3.12 commit in: /
Date: Tue, 30 Sep 2014 17:16:34
Message-Id: 1412097234.540c21f81b78f7c07ce2518aa9d3077f14ae25d6.mpagano@gentoo
1 commit: 540c21f81b78f7c07ce2518aa9d3077f14ae25d6
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Sep 30 17:13:54 2014 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Sep 30 17:13:54 2014 +0000
6 URL: http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=540c21f8
7
8 Linux patch 3.12.29
9
10 ---
11 0000_README | 4 +
12 1028_linux-3.12.29.patch | 5580 ++++++++++++++++++++++++++++++++++++++++++++++
13 2 files changed, 5584 insertions(+)
14
15 diff --git a/0000_README b/0000_README
16 index 7f93023..ae0f6aa 100644
17 --- a/0000_README
18 +++ b/0000_README
19 @@ -154,6 +154,10 @@ Patch: 1027_linux-3.12.28.patch
20 From: http://www.kernel.org
21 Desc: Linux 3.12.28
22
23 +Patch: 1028_linux-3.12.29.patch
24 +From: http://www.kernel.org
25 +Desc: Linux 3.12.29
26 +
27 Patch: 1500_XATTR_USER_PREFIX.patch
28 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
29 Desc: Support for namespace user.pax.* on tmpfs.
30
31 diff --git a/1028_linux-3.12.29.patch b/1028_linux-3.12.29.patch
32 new file mode 100644
33 index 0000000..45fb6d1
34 --- /dev/null
35 +++ b/1028_linux-3.12.29.patch
36 @@ -0,0 +1,5580 @@
37 +diff --git a/Makefile b/Makefile
38 +index 300584fe5ad4..67cec33d00c7 100644
39 +--- a/Makefile
40 ++++ b/Makefile
41 +@@ -1,6 +1,6 @@
42 + VERSION = 3
43 + PATCHLEVEL = 12
44 +-SUBLEVEL = 28
45 ++SUBLEVEL = 29
46 + EXTRAVERSION =
47 + NAME = One Giant Leap for Frogkind
48 +
49 +diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
50 +index 7ae8a1f00c3c..7af6183daa2e 100644
51 +--- a/arch/arm64/kernel/process.c
52 ++++ b/arch/arm64/kernel/process.c
53 +@@ -183,9 +183,27 @@ void exit_thread(void)
54 + {
55 + }
56 +
57 ++static void tls_thread_flush(void)
58 ++{
59 ++ asm ("msr tpidr_el0, xzr");
60 ++
61 ++ if (is_compat_task()) {
62 ++ current->thread.tp_value = 0;
63 ++
64 ++ /*
65 ++ * We need to ensure ordering between the shadow state and the
66 ++ * hardware state, so that we don't corrupt the hardware state
67 ++ * with a stale shadow state during context switch.
68 ++ */
69 ++ barrier();
70 ++ asm ("msr tpidrro_el0, xzr");
71 ++ }
72 ++}
73 ++
74 + void flush_thread(void)
75 + {
76 + fpsimd_flush_thread();
77 ++ tls_thread_flush();
78 + flush_ptrace_hw_breakpoint(current);
79 + }
80 +
81 +diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
82 +index 26e9c4eeaba8..78039927c807 100644
83 +--- a/arch/arm64/kernel/sys_compat.c
84 ++++ b/arch/arm64/kernel/sys_compat.c
85 +@@ -79,6 +79,12 @@ long compat_arm_syscall(struct pt_regs *regs)
86 +
87 + case __ARM_NR_compat_set_tls:
88 + current->thread.tp_value = regs->regs[0];
89 ++
90 ++ /*
91 ++ * Protect against register corruption from context switch.
92 ++ * See comment in tls_thread_flush.
93 ++ */
94 ++ barrier();
95 + asm ("msr tpidrro_el0, %0" : : "r" (regs->regs[0]));
96 + return 0;
97 +
98 +diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
99 +index b212ae12e5ac..8a0079981cc8 100644
100 +--- a/arch/mips/cavium-octeon/setup.c
101 ++++ b/arch/mips/cavium-octeon/setup.c
102 +@@ -458,6 +458,18 @@ static void octeon_halt(void)
103 + octeon_kill_core(NULL);
104 + }
105 +
106 ++static char __read_mostly octeon_system_type[80];
107 ++
108 ++static int __init init_octeon_system_type(void)
109 ++{
110 ++ snprintf(octeon_system_type, sizeof(octeon_system_type), "%s (%s)",
111 ++ cvmx_board_type_to_string(octeon_bootinfo->board_type),
112 ++ octeon_model_get_string(read_c0_prid()));
113 ++
114 ++ return 0;
115 ++}
116 ++early_initcall(init_octeon_system_type);
117 ++
118 + /**
119 + * Return a string representing the system type
120 + *
121 +@@ -465,11 +477,7 @@ static void octeon_halt(void)
122 + */
123 + const char *octeon_board_type_string(void)
124 + {
125 +- static char name[80];
126 +- sprintf(name, "%s (%s)",
127 +- cvmx_board_type_to_string(octeon_bootinfo->board_type),
128 +- octeon_model_get_string(read_c0_prid()));
129 +- return name;
130 ++ return octeon_system_type;
131 + }
132 +
133 + const char *get_system_type(void)
134 +diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
135 +index 5b5ddb231f26..78f18436cdf2 100644
136 +--- a/arch/mips/kernel/irq-gic.c
137 ++++ b/arch/mips/kernel/irq-gic.c
138 +@@ -255,11 +255,13 @@ static void __init gic_setup_intr(unsigned int intr, unsigned int cpu,
139 +
140 + /* Setup Intr to Pin mapping */
141 + if (pin & GIC_MAP_TO_NMI_MSK) {
142 ++ int i;
143 ++
144 + GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin);
145 + /* FIXME: hack to route NMI to all cpu's */
146 +- for (cpu = 0; cpu < NR_CPUS; cpu += 32) {
147 ++ for (i = 0; i < NR_CPUS; i += 32) {
148 + GICWRITE(GIC_REG_ADDR(SHARED,
149 +- GIC_SH_MAP_TO_VPE_REG_OFF(intr, cpu)),
150 ++ GIC_SH_MAP_TO_VPE_REG_OFF(intr, i)),
151 + 0xffffffff);
152 + }
153 + } else {
154 +diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
155 +index 8ae1ebef8b71..5404cab551f3 100644
156 +--- a/arch/mips/kernel/ptrace.c
157 ++++ b/arch/mips/kernel/ptrace.c
158 +@@ -162,6 +162,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
159 + __get_user(fregs[i], i + (__u64 __user *) data);
160 +
161 + __get_user(child->thread.fpu.fcr31, data + 64);
162 ++ child->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
163 +
164 + /* FIR may not be written. */
165 +
166 +@@ -452,7 +453,7 @@ long arch_ptrace(struct task_struct *child, long request,
167 + break;
168 + #endif
169 + case FPC_CSR:
170 +- child->thread.fpu.fcr31 = data;
171 ++ child->thread.fpu.fcr31 = data & ~FPU_CSR_ALL_X;
172 + break;
173 + case DSP_BASE ... DSP_BASE + 5: {
174 + dspreg_t *dregs;
175 +diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
176 +index c369a5d35527..b897dde93e7a 100644
177 +--- a/arch/mips/kernel/unaligned.c
178 ++++ b/arch/mips/kernel/unaligned.c
179 +@@ -605,7 +605,6 @@ static void emulate_load_store_insn(struct pt_regs *regs,
180 + case sdc1_op:
181 + die_if_kernel("Unaligned FP access in kernel code", regs);
182 + BUG_ON(!used_math());
183 +- BUG_ON(!is_fpu_owner());
184 +
185 + lose_fpu(1); /* Save FPU state for the emulator. */
186 + res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
187 +diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
188 +index 9bb3a9363b06..db7a050f5c2c 100644
189 +--- a/arch/mips/mm/tlbex.c
190 ++++ b/arch/mips/mm/tlbex.c
191 +@@ -1333,6 +1333,7 @@ static void build_r4000_tlb_refill_handler(void)
192 + }
193 + #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
194 + uasm_l_tlb_huge_update(&l, p);
195 ++ UASM_i_LW(&p, K0, 0, K1);
196 + build_huge_update_entries(&p, htlb_info.huge_pte, K1);
197 + build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
198 + htlb_info.restore_scratch);
199 +diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
200 +index d8a455ede5a7..fec8bf97d806 100644
201 +--- a/arch/openrisc/kernel/entry.S
202 ++++ b/arch/openrisc/kernel/entry.S
203 +@@ -853,37 +853,44 @@ UNHANDLED_EXCEPTION(_vector_0x1f00,0x1f00)
204 +
205 + /* ========================================================[ return ] === */
206 +
207 ++_resume_userspace:
208 ++ DISABLE_INTERRUPTS(r3,r4)
209 ++ l.lwz r4,TI_FLAGS(r10)
210 ++ l.andi r13,r4,_TIF_WORK_MASK
211 ++ l.sfeqi r13,0
212 ++ l.bf _restore_all
213 ++ l.nop
214 ++
215 + _work_pending:
216 +- /*
217 +- * if (current_thread_info->flags & _TIF_NEED_RESCHED)
218 +- * schedule();
219 +- */
220 +- l.lwz r5,TI_FLAGS(r10)
221 +- l.andi r3,r5,_TIF_NEED_RESCHED
222 +- l.sfnei r3,0
223 +- l.bnf _work_notifysig
224 ++ l.lwz r5,PT_ORIG_GPR11(r1)
225 ++ l.sfltsi r5,0
226 ++ l.bnf 1f
227 + l.nop
228 +- l.jal schedule
229 ++ l.andi r5,r5,0
230 ++1:
231 ++ l.jal do_work_pending
232 ++ l.ori r3,r1,0 /* pt_regs */
233 ++
234 ++ l.sfeqi r11,0
235 ++ l.bf _restore_all
236 + l.nop
237 +- l.j _resume_userspace
238 ++ l.sfltsi r11,0
239 ++ l.bnf 1f
240 + l.nop
241 +-
242 +-/* Handle pending signals and notify-resume requests.
243 +- * do_notify_resume must be passed the latest pushed pt_regs, not
244 +- * necessarily the "userspace" ones. Also, pt_regs->syscallno
245 +- * must be set so that the syscall restart functionality works.
246 +- */
247 +-_work_notifysig:
248 +- l.jal do_notify_resume
249 +- l.ori r3,r1,0 /* pt_regs */
250 +-
251 +-_resume_userspace:
252 +- DISABLE_INTERRUPTS(r3,r4)
253 +- l.lwz r3,TI_FLAGS(r10)
254 +- l.andi r3,r3,_TIF_WORK_MASK
255 +- l.sfnei r3,0
256 +- l.bf _work_pending
257 ++ l.and r11,r11,r0
258 ++ l.ori r11,r11,__NR_restart_syscall
259 ++ l.j _syscall_check_trace_enter
260 + l.nop
261 ++1:
262 ++ l.lwz r11,PT_ORIG_GPR11(r1)
263 ++ /* Restore arg registers */
264 ++ l.lwz r3,PT_GPR3(r1)
265 ++ l.lwz r4,PT_GPR4(r1)
266 ++ l.lwz r5,PT_GPR5(r1)
267 ++ l.lwz r6,PT_GPR6(r1)
268 ++ l.lwz r7,PT_GPR7(r1)
269 ++ l.j _syscall_check_trace_enter
270 ++ l.lwz r8,PT_GPR8(r1)
271 +
272 + _restore_all:
273 + RESTORE_ALL
274 +diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c
275 +index ae167f7e081a..c277ec82783d 100644
276 +--- a/arch/openrisc/kernel/signal.c
277 ++++ b/arch/openrisc/kernel/signal.c
278 +@@ -28,24 +28,24 @@
279 + #include <linux/tracehook.h>
280 +
281 + #include <asm/processor.h>
282 ++#include <asm/syscall.h>
283 + #include <asm/ucontext.h>
284 + #include <asm/uaccess.h>
285 +
286 + #define DEBUG_SIG 0
287 +
288 + struct rt_sigframe {
289 +- struct siginfo *pinfo;
290 +- void *puc;
291 + struct siginfo info;
292 + struct ucontext uc;
293 + unsigned char retcode[16]; /* trampoline code */
294 + };
295 +
296 +-static int restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
297 ++static int restore_sigcontext(struct pt_regs *regs,
298 ++ struct sigcontext __user *sc)
299 + {
300 +- unsigned int err = 0;
301 ++ int err = 0;
302 +
303 +- /* Alwys make any pending restarted system call return -EINTR */
304 ++ /* Always make any pending restarted system calls return -EINTR */
305 + current_thread_info()->restart_block.fn = do_no_restart_syscall;
306 +
307 + /*
308 +@@ -53,25 +53,21 @@ static int restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
309 + * (sc is already checked for VERIFY_READ since the sigframe was
310 + * checked in sys_sigreturn previously)
311 + */
312 +- if (__copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long)))
313 +- goto badframe;
314 +- if (__copy_from_user(&regs->pc, &sc->regs.pc, sizeof(unsigned long)))
315 +- goto badframe;
316 +- if (__copy_from_user(&regs->sr, &sc->regs.sr, sizeof(unsigned long)))
317 +- goto badframe;
318 ++ err |= __copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long));
319 ++ err |= __copy_from_user(&regs->pc, &sc->regs.pc, sizeof(unsigned long));
320 ++ err |= __copy_from_user(&regs->sr, &sc->regs.sr, sizeof(unsigned long));
321 +
322 + /* make sure the SM-bit is cleared so user-mode cannot fool us */
323 + regs->sr &= ~SPR_SR_SM;
324 +
325 ++ regs->orig_gpr11 = -1; /* Avoid syscall restart checks */
326 ++
327 + /* TODO: the other ports use regs->orig_XX to disable syscall checks
328 + * after this completes, but we don't use that mechanism. maybe we can
329 + * use it now ?
330 + */
331 +
332 + return err;
333 +-
334 +-badframe:
335 +- return 1;
336 + }
337 +
338 + asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs)
339 +@@ -111,21 +107,18 @@ badframe:
340 + * Set up a signal frame.
341 + */
342 +
343 +-static int setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
344 +- unsigned long mask)
345 ++static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
346 + {
347 + int err = 0;
348 +
349 + /* copy the regs */
350 +-
351 ++ /* There should be no need to save callee-saved registers here...
352 ++ * ...but we save them anyway. Revisit this
353 ++ */
354 + err |= __copy_to_user(sc->regs.gpr, regs, 32 * sizeof(unsigned long));
355 + err |= __copy_to_user(&sc->regs.pc, &regs->pc, sizeof(unsigned long));
356 + err |= __copy_to_user(&sc->regs.sr, &regs->sr, sizeof(unsigned long));
357 +
358 +- /* then some other stuff */
359 +-
360 +- err |= __put_user(mask, &sc->oldmask);
361 +-
362 + return err;
363 + }
364 +
365 +@@ -181,24 +174,18 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
366 + int err = 0;
367 +
368 + frame = get_sigframe(ka, regs, sizeof(*frame));
369 +-
370 + if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
371 + goto give_sigsegv;
372 +
373 +- err |= __put_user(&frame->info, &frame->pinfo);
374 +- err |= __put_user(&frame->uc, &frame->puc);
375 +-
376 ++ /* Create siginfo. */
377 + if (ka->sa.sa_flags & SA_SIGINFO)
378 + err |= copy_siginfo_to_user(&frame->info, info);
379 +- if (err)
380 +- goto give_sigsegv;
381 +
382 +- /* Clear all the bits of the ucontext we don't use. */
383 +- err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));
384 ++ /* Create the ucontext. */
385 + err |= __put_user(0, &frame->uc.uc_flags);
386 + err |= __put_user(NULL, &frame->uc.uc_link);
387 + err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
388 +- err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]);
389 ++ err |= setup_sigcontext(regs, &frame->uc.uc_mcontext);
390 +
391 + err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
392 +
393 +@@ -207,9 +194,12 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
394 +
395 + /* trampoline - the desired return ip is the retcode itself */
396 + return_ip = (unsigned long)&frame->retcode;
397 +- /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */
398 +- err |= __put_user(0xa960, (short *)(frame->retcode + 0));
399 +- err |= __put_user(__NR_rt_sigreturn, (short *)(frame->retcode + 2));
400 ++ /* This is:
401 ++ l.ori r11,r0,__NR_sigreturn
402 ++ l.sys 1
403 ++ */
404 ++ err |= __put_user(0xa960, (short *)(frame->retcode + 0));
405 ++ err |= __put_user(__NR_rt_sigreturn, (short *)(frame->retcode + 2));
406 + err |= __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
407 + err |= __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
408 +
409 +@@ -262,82 +252,106 @@ handle_signal(unsigned long sig,
410 + * mode below.
411 + */
412 +
413 +-void do_signal(struct pt_regs *regs)
414 ++int do_signal(struct pt_regs *regs, int syscall)
415 + {
416 + siginfo_t info;
417 + int signr;
418 + struct k_sigaction ka;
419 +-
420 +- /*
421 +- * We want the common case to go fast, which
422 +- * is why we may in certain cases get here from
423 +- * kernel mode. Just return without doing anything
424 +- * if so.
425 +- */
426 +- if (!user_mode(regs))
427 +- return;
428 +-
429 +- signr = get_signal_to_deliver(&info, &ka, regs, NULL);
430 +-
431 +- /* If we are coming out of a syscall then we need
432 +- * to check if the syscall was interrupted and wants to be
433 +- * restarted after handling the signal. If so, the original
434 +- * syscall number is put back into r11 and the PC rewound to
435 +- * point at the l.sys instruction that resulted in the
436 +- * original syscall. Syscall results other than the four
437 +- * below mean that the syscall executed to completion and no
438 +- * restart is necessary.
439 +- */
440 +- if (regs->orig_gpr11) {
441 +- int restart = 0;
442 +-
443 +- switch (regs->gpr[11]) {
444 ++ unsigned long continue_addr = 0;
445 ++ unsigned long restart_addr = 0;
446 ++ unsigned long retval = 0;
447 ++ int restart = 0;
448 ++
449 ++ if (syscall) {
450 ++ continue_addr = regs->pc;
451 ++ restart_addr = continue_addr - 4;
452 ++ retval = regs->gpr[11];
453 ++
454 ++ /*
455 ++ * Setup syscall restart here so that a debugger will
456 ++ * see the already changed PC.
457 ++ */
458 ++ switch (retval) {
459 + case -ERESTART_RESTARTBLOCK:
460 ++ restart = -2;
461 ++ /* Fall through */
462 + case -ERESTARTNOHAND:
463 +- /* Restart if there is no signal handler */
464 +- restart = (signr <= 0);
465 +- break;
466 + case -ERESTARTSYS:
467 +- /* Restart if there no signal handler or
468 +- * SA_RESTART flag is set */
469 +- restart = (signr <= 0 || (ka.sa.sa_flags & SA_RESTART));
470 +- break;
471 + case -ERESTARTNOINTR:
472 +- /* Always restart */
473 +- restart = 1;
474 ++ restart++;
475 ++ regs->gpr[11] = regs->orig_gpr11;
476 ++ regs->pc = restart_addr;
477 + break;
478 + }
479 ++ }
480 +
481 +- if (restart) {
482 +- if (regs->gpr[11] == -ERESTART_RESTARTBLOCK)
483 +- regs->gpr[11] = __NR_restart_syscall;
484 +- else
485 +- regs->gpr[11] = regs->orig_gpr11;
486 +- regs->pc -= 4;
487 +- } else {
488 +- regs->gpr[11] = -EINTR;
489 ++ /*
490 ++ * Get the signal to deliver. When running under ptrace, at this
491 ++ * point the debugger may change all our registers ...
492 ++ */
493 ++ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
494 ++ /*
495 ++ * Depending on the signal settings we may need to revert the
496 ++ * decision to restart the system call. But skip this if a
497 ++ * debugger has chosen to restart at a different PC.
498 ++ */
499 ++ if (signr > 0) {
500 ++ if (unlikely(restart) && regs->pc == restart_addr) {
501 ++ if (retval == -ERESTARTNOHAND ||
502 ++ retval == -ERESTART_RESTARTBLOCK
503 ++ || (retval == -ERESTARTSYS
504 ++ && !(ka.sa.sa_flags & SA_RESTART))) {
505 ++ /* No automatic restart */
506 ++ regs->gpr[11] = -EINTR;
507 ++ regs->pc = continue_addr;
508 ++ }
509 + }
510 +- }
511 +
512 +- if (signr <= 0) {
513 +- /* no signal to deliver so we just put the saved sigmask
514 +- * back */
515 +- restore_saved_sigmask();
516 +- } else { /* signr > 0 */
517 +- /* Whee! Actually deliver the signal. */
518 + handle_signal(signr, &info, &ka, regs);
519 ++ } else {
520 ++ /* no handler */
521 ++ restore_saved_sigmask();
522 ++ /*
523 ++ * Restore pt_regs PC as syscall restart will be handled by
524 ++ * kernel without return to userspace
525 ++ */
526 ++ if (unlikely(restart) && regs->pc == restart_addr) {
527 ++ regs->pc = continue_addr;
528 ++ return restart;
529 ++ }
530 + }
531 +
532 +- return;
533 ++ return 0;
534 + }
535 +
536 +-asmlinkage void do_notify_resume(struct pt_regs *regs)
537 ++asmlinkage int
538 ++do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
539 + {
540 +- if (current_thread_info()->flags & _TIF_SIGPENDING)
541 +- do_signal(regs);
542 +-
543 +- if (current_thread_info()->flags & _TIF_NOTIFY_RESUME) {
544 +- clear_thread_flag(TIF_NOTIFY_RESUME);
545 +- tracehook_notify_resume(regs);
546 +- }
547 ++ do {
548 ++ if (likely(thread_flags & _TIF_NEED_RESCHED)) {
549 ++ schedule();
550 ++ } else {
551 ++ if (unlikely(!user_mode(regs)))
552 ++ return 0;
553 ++ local_irq_enable();
554 ++ if (thread_flags & _TIF_SIGPENDING) {
555 ++ int restart = do_signal(regs, syscall);
556 ++ if (unlikely(restart)) {
557 ++ /*
558 ++ * Restart without handlers.
559 ++ * Deal with it without leaving
560 ++ * the kernel space.
561 ++ */
562 ++ return restart;
563 ++ }
564 ++ syscall = 0;
565 ++ } else {
566 ++ clear_thread_flag(TIF_NOTIFY_RESUME);
567 ++ tracehook_notify_resume(regs);
568 ++ }
569 ++ }
570 ++ local_irq_disable();
571 ++ thread_flags = current_thread_info()->flags;
572 ++ } while (thread_flags & _TIF_WORK_MASK);
573 ++ return 0;
574 + }
575 +diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
576 +index 8b480901165a..3a52b9b01133 100644
577 +--- a/arch/powerpc/include/asm/machdep.h
578 ++++ b/arch/powerpc/include/asm/machdep.h
579 +@@ -57,10 +57,10 @@ struct machdep_calls {
580 + void (*hpte_removebolted)(unsigned long ea,
581 + int psize, int ssize);
582 + void (*flush_hash_range)(unsigned long number, int local);
583 +- void (*hugepage_invalidate)(struct mm_struct *mm,
584 ++ void (*hugepage_invalidate)(unsigned long vsid,
585 ++ unsigned long addr,
586 + unsigned char *hpte_slot_array,
587 +- unsigned long addr, int psize);
588 +-
589 ++ int psize, int ssize);
590 + /* special for kexec, to be called in real mode, linear mapping is
591 + * destroyed as well */
592 + void (*hpte_clear_all)(void);
593 +diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
594 +index 46db09414a10..832a39d042d4 100644
595 +--- a/arch/powerpc/include/asm/pgtable-ppc64.h
596 ++++ b/arch/powerpc/include/asm/pgtable-ppc64.h
597 +@@ -409,7 +409,7 @@ static inline char *get_hpte_slot_array(pmd_t *pmdp)
598 + }
599 +
600 + extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
601 +- pmd_t *pmdp);
602 ++ pmd_t *pmdp, unsigned long old_pmd);
603 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE
604 + extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
605 + extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
606 +diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
607 +index d836d945068d..9ecede1e124c 100644
608 +--- a/arch/powerpc/include/asm/pte-hash64-64k.h
609 ++++ b/arch/powerpc/include/asm/pte-hash64-64k.h
610 +@@ -46,11 +46,31 @@
611 + * in order to deal with 64K made of 4K HW pages. Thus we override the
612 + * generic accessors and iterators here
613 + */
614 +-#define __real_pte(e,p) ((real_pte_t) { \
615 +- (e), (pte_val(e) & _PAGE_COMBO) ? \
616 +- (pte_val(*((p) + PTRS_PER_PTE))) : 0 })
617 +-#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \
618 +- (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
619 ++#define __real_pte __real_pte
620 ++static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep)
621 ++{
622 ++ real_pte_t rpte;
623 ++
624 ++ rpte.pte = pte;
625 ++ rpte.hidx = 0;
626 ++ if (pte_val(pte) & _PAGE_COMBO) {
627 ++ /*
628 ++ * Make sure we order the hidx load against the _PAGE_COMBO
629 ++ * check. The store side ordering is done in __hash_page_4K
630 ++ */
631 ++ smp_rmb();
632 ++ rpte.hidx = pte_val(*((ptep) + PTRS_PER_PTE));
633 ++ }
634 ++ return rpte;
635 ++}
636 ++
637 ++static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
638 ++{
639 ++ if ((pte_val(rpte.pte) & _PAGE_COMBO))
640 ++ return (rpte.hidx >> (index<<2)) & 0xf;
641 ++ return (pte_val(rpte.pte) >> 12) & 0xf;
642 ++}
643 ++
644 + #define __rpte_to_pte(r) ((r).pte)
645 + #define __rpte_sub_valid(rpte, index) \
646 + (pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index)))
647 +diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
648 +index c33d939120c9..9ca9c160dee4 100644
649 +--- a/arch/powerpc/mm/hash_native_64.c
650 ++++ b/arch/powerpc/mm/hash_native_64.c
651 +@@ -413,18 +413,18 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
652 + local_irq_restore(flags);
653 + }
654 +
655 +-static void native_hugepage_invalidate(struct mm_struct *mm,
656 ++static void native_hugepage_invalidate(unsigned long vsid,
657 ++ unsigned long addr,
658 + unsigned char *hpte_slot_array,
659 +- unsigned long addr, int psize)
660 ++ int psize, int ssize)
661 + {
662 +- int ssize = 0, i;
663 +- int lock_tlbie;
664 ++ int i;
665 + struct hash_pte *hptep;
666 + int actual_psize = MMU_PAGE_16M;
667 + unsigned int max_hpte_count, valid;
668 + unsigned long flags, s_addr = addr;
669 + unsigned long hpte_v, want_v, shift;
670 +- unsigned long hidx, vpn = 0, vsid, hash, slot;
671 ++ unsigned long hidx, vpn = 0, hash, slot;
672 +
673 + shift = mmu_psize_defs[psize].shift;
674 + max_hpte_count = 1U << (PMD_SHIFT - shift);
675 +@@ -438,15 +438,6 @@ static void native_hugepage_invalidate(struct mm_struct *mm,
676 +
677 + /* get the vpn */
678 + addr = s_addr + (i * (1ul << shift));
679 +- if (!is_kernel_addr(addr)) {
680 +- ssize = user_segment_size(addr);
681 +- vsid = get_vsid(mm->context.id, addr, ssize);
682 +- WARN_ON(vsid == 0);
683 +- } else {
684 +- vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
685 +- ssize = mmu_kernel_ssize;
686 +- }
687 +-
688 + vpn = hpt_vpn(addr, vsid, ssize);
689 + hash = hpt_hash(vpn, shift, ssize);
690 + if (hidx & _PTEIDX_SECONDARY)
691 +@@ -466,22 +457,13 @@ static void native_hugepage_invalidate(struct mm_struct *mm,
692 + else
693 + /* Invalidate the hpte. NOTE: this also unlocks it */
694 + hptep->v = 0;
695 ++ /*
696 ++ * We need to do tlb invalidate for all the address, tlbie
697 ++ * instruction compares entry_VA in tlb with the VA specified
698 ++ * here
699 ++ */
700 ++ tlbie(vpn, psize, actual_psize, ssize, 0);
701 + }
702 +- /*
703 +- * Since this is a hugepage, we just need a single tlbie.
704 +- * use the last vpn.
705 +- */
706 +- lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
707 +- if (lock_tlbie)
708 +- raw_spin_lock(&native_tlbie_lock);
709 +-
710 +- asm volatile("ptesync":::"memory");
711 +- __tlbie(vpn, psize, actual_psize, ssize);
712 +- asm volatile("eieio; tlbsync; ptesync":::"memory");
713 +-
714 +- if (lock_tlbie)
715 +- raw_spin_unlock(&native_tlbie_lock);
716 +-
717 + local_irq_restore(flags);
718 + }
719 +
720 +diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
721 +index 34de9e0cdc34..7d86c868040d 100644
722 +--- a/arch/powerpc/mm/hugepage-hash64.c
723 ++++ b/arch/powerpc/mm/hugepage-hash64.c
724 +@@ -18,6 +18,57 @@
725 + #include <linux/mm.h>
726 + #include <asm/machdep.h>
727 +
728 ++static void invalidate_old_hpte(unsigned long vsid, unsigned long addr,
729 ++ pmd_t *pmdp, unsigned int psize, int ssize)
730 ++{
731 ++ int i, max_hpte_count, valid;
732 ++ unsigned long s_addr;
733 ++ unsigned char *hpte_slot_array;
734 ++ unsigned long hidx, shift, vpn, hash, slot;
735 ++
736 ++ s_addr = addr & HPAGE_PMD_MASK;
737 ++ hpte_slot_array = get_hpte_slot_array(pmdp);
738 ++ /*
739 ++ * IF we try to do a HUGE PTE update after a withdraw is done.
740 ++ * we will find the below NULL. This happens when we do
741 ++ * split_huge_page_pmd
742 ++ */
743 ++ if (!hpte_slot_array)
744 ++ return;
745 ++
746 ++ if (ppc_md.hugepage_invalidate)
747 ++ return ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array,
748 ++ psize, ssize);
749 ++ /*
750 ++ * No bluk hpte removal support, invalidate each entry
751 ++ */
752 ++ shift = mmu_psize_defs[psize].shift;
753 ++ max_hpte_count = HPAGE_PMD_SIZE >> shift;
754 ++ for (i = 0; i < max_hpte_count; i++) {
755 ++ /*
756 ++ * 8 bits per each hpte entries
757 ++ * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit]
758 ++ */
759 ++ valid = hpte_valid(hpte_slot_array, i);
760 ++ if (!valid)
761 ++ continue;
762 ++ hidx = hpte_hash_index(hpte_slot_array, i);
763 ++
764 ++ /* get the vpn */
765 ++ addr = s_addr + (i * (1ul << shift));
766 ++ vpn = hpt_vpn(addr, vsid, ssize);
767 ++ hash = hpt_hash(vpn, shift, ssize);
768 ++ if (hidx & _PTEIDX_SECONDARY)
769 ++ hash = ~hash;
770 ++
771 ++ slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
772 ++ slot += hidx & _PTEIDX_GROUP_IX;
773 ++ ppc_md.hpte_invalidate(slot, vpn, psize,
774 ++ MMU_PAGE_16M, ssize, 0);
775 ++ }
776 ++}
777 ++
778 ++
779 + int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
780 + pmd_t *pmdp, unsigned long trap, int local, int ssize,
781 + unsigned int psize)
782 +@@ -33,7 +84,9 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
783 + * atomically mark the linux large page PMD busy and dirty
784 + */
785 + do {
786 +- old_pmd = pmd_val(*pmdp);
787 ++ pmd_t pmd = ACCESS_ONCE(*pmdp);
788 ++
789 ++ old_pmd = pmd_val(pmd);
790 + /* If PMD busy, retry the access */
791 + if (unlikely(old_pmd & _PAGE_BUSY))
792 + return 0;
793 +@@ -85,6 +138,15 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
794 + vpn = hpt_vpn(ea, vsid, ssize);
795 + hash = hpt_hash(vpn, shift, ssize);
796 + hpte_slot_array = get_hpte_slot_array(pmdp);
797 ++ if (psize == MMU_PAGE_4K) {
798 ++ /*
799 ++ * invalidate the old hpte entry if we have that mapped via 64K
800 ++ * base page size. This is because demote_segment won't flush
801 ++ * hash page table entries.
802 ++ */
803 ++ if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO))
804 ++ invalidate_old_hpte(vsid, ea, pmdp, MMU_PAGE_64K, ssize);
805 ++ }
806 +
807 + valid = hpte_valid(hpte_slot_array, index);
808 + if (valid) {
809 +@@ -107,11 +169,8 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
810 + * safely update this here.
811 + */
812 + valid = 0;
813 +- new_pmd &= ~_PAGE_HPTEFLAGS;
814 + hpte_slot_array[index] = 0;
815 +- } else
816 +- /* clear the busy bits and set the hash pte bits */
817 +- new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
818 ++ }
819 + }
820 +
821 + if (!valid) {
822 +@@ -119,15 +178,13 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
823 +
824 + /* insert new entry */
825 + pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
826 +-repeat:
827 +- hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
828 +-
829 +- /* clear the busy bits and set the hash pte bits */
830 +- new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
831 ++ new_pmd |= _PAGE_HASHPTE;
832 +
833 + /* Add in WIMG bits */
834 + rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
835 + _PAGE_COHERENT | _PAGE_GUARDED));
836 ++repeat:
837 ++ hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
838 +
839 + /* Insert into the hash table, primary slot */
840 + slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
841 +@@ -168,8 +225,17 @@ repeat:
842 + mark_hpte_slot_valid(hpte_slot_array, index, slot);
843 + }
844 + /*
845 +- * No need to use ldarx/stdcx here
846 ++ * Mark the pte with _PAGE_COMBO, if we are trying to hash it with
847 ++ * base page size 4k.
848 ++ */
849 ++ if (psize == MMU_PAGE_4K)
850 ++ new_pmd |= _PAGE_COMBO;
851 ++ /*
852 ++ * The hpte valid is stored in the pgtable whose address is in the
853 ++ * second half of the PMD. Order this against clearing of the busy bit in
854 ++ * huge pmd.
855 + */
856 ++ smp_wmb();
857 + *pmdp = __pmd(new_pmd & ~_PAGE_BUSY);
858 + return 0;
859 + }
860 +diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
861 +index 14c05547bd74..e91079b796d2 100644
862 +--- a/arch/powerpc/mm/numa.c
863 ++++ b/arch/powerpc/mm/numa.c
864 +@@ -589,8 +589,8 @@ static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action,
865 + case CPU_UP_CANCELED:
866 + case CPU_UP_CANCELED_FROZEN:
867 + unmap_cpu_from_node(lcpu);
868 +- break;
869 + ret = NOTIFY_OK;
870 ++ break;
871 + #endif
872 + }
873 + return ret;
874 +diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
875 +index 536eec72c0f7..c9379a2d6006 100644
876 +--- a/arch/powerpc/mm/pgtable_64.c
877 ++++ b/arch/powerpc/mm/pgtable_64.c
878 +@@ -524,7 +524,7 @@ unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
879 + *pmdp = __pmd(old & ~clr);
880 + #endif
881 + if (old & _PAGE_HASHPTE)
882 +- hpte_do_hugepage_flush(mm, addr, pmdp);
883 ++ hpte_do_hugepage_flush(mm, addr, pmdp, old);
884 + return old;
885 + }
886 +
887 +@@ -631,7 +631,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma,
888 + if (!(old & _PAGE_SPLITTING)) {
889 + /* We need to flush the hpte */
890 + if (old & _PAGE_HASHPTE)
891 +- hpte_do_hugepage_flush(vma->vm_mm, address, pmdp);
892 ++ hpte_do_hugepage_flush(vma->vm_mm, address, pmdp, old);
893 + }
894 + }
895 +
896 +@@ -704,7 +704,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
897 + * neesd to be flushed.
898 + */
899 + void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
900 +- pmd_t *pmdp)
901 ++ pmd_t *pmdp, unsigned long old_pmd)
902 + {
903 + int ssize, i;
904 + unsigned long s_addr;
905 +@@ -726,12 +726,29 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
906 + if (!hpte_slot_array)
907 + return;
908 +
909 +- /* get the base page size */
910 ++ /* get the base page size,vsid and segment size */
911 ++#ifdef CONFIG_DEBUG_VM
912 + psize = get_slice_psize(mm, s_addr);
913 ++ BUG_ON(psize == MMU_PAGE_16M);
914 ++#endif
915 ++ if (old_pmd & _PAGE_COMBO)
916 ++ psize = MMU_PAGE_4K;
917 ++ else
918 ++ psize = MMU_PAGE_64K;
919 ++
920 ++ if (!is_kernel_addr(s_addr)) {
921 ++ ssize = user_segment_size(s_addr);
922 ++ vsid = get_vsid(mm->context.id, s_addr, ssize);
923 ++ WARN_ON(vsid == 0);
924 ++ } else {
925 ++ vsid = get_kernel_vsid(s_addr, mmu_kernel_ssize);
926 ++ ssize = mmu_kernel_ssize;
927 ++ }
928 +
929 + if (ppc_md.hugepage_invalidate)
930 +- return ppc_md.hugepage_invalidate(mm, hpte_slot_array,
931 +- s_addr, psize);
932 ++ return ppc_md.hugepage_invalidate(vsid, s_addr,
933 ++ hpte_slot_array,
934 ++ psize, ssize);
935 + /*
936 + * No bluk hpte removal support, invalidate each entry
937 + */
938 +@@ -749,15 +766,6 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
939 +
940 + /* get the vpn */
941 + addr = s_addr + (i * (1ul << shift));
942 +- if (!is_kernel_addr(addr)) {
943 +- ssize = user_segment_size(addr);
944 +- vsid = get_vsid(mm->context.id, addr, ssize);
945 +- WARN_ON(vsid == 0);
946 +- } else {
947 +- vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
948 +- ssize = mmu_kernel_ssize;
949 +- }
950 +-
951 + vpn = hpt_vpn(addr, vsid, ssize);
952 + hash = hpt_hash(vpn, shift, ssize);
953 + if (hidx & _PTEIDX_SECONDARY)
954 +diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
955 +index 36e44b4260eb..c66e445d9890 100644
956 +--- a/arch/powerpc/mm/tlb_hash64.c
957 ++++ b/arch/powerpc/mm/tlb_hash64.c
958 +@@ -217,7 +217,7 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
959 + if (!(pte & _PAGE_HASHPTE))
960 + continue;
961 + if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte)))
962 +- hpte_do_hugepage_flush(mm, start, (pmd_t *)pte);
963 ++ hpte_do_hugepage_flush(mm, start, (pmd_t *)ptep, pte);
964 + else
965 + hpte_need_flush(mm, start, ptep, pte, 0);
966 + }
967 +diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
968 +index 9a432de363b8..bebe64ed5dc3 100644
969 +--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
970 ++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
971 +@@ -158,7 +158,7 @@ static int pseries_remove_memory(struct device_node *np)
972 + static inline int pseries_remove_memblock(unsigned long base,
973 + unsigned int memblock_size)
974 + {
975 +- return -EOPNOTSUPP;
976 ++ return 0;
977 + }
978 + static inline int pseries_remove_memory(struct device_node *np)
979 + {
980 +diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
981 +index 0307901e4132..261c5095d5d3 100644
982 +--- a/arch/powerpc/platforms/pseries/iommu.c
983 ++++ b/arch/powerpc/platforms/pseries/iommu.c
984 +@@ -731,13 +731,13 @@ static inline void __remove_ddw(struct device_node *np, const u32 *ddw_avail, u6
985 + np->full_name, ret, ddw_avail[2], liobn);
986 + }
987 +
988 +-static void remove_ddw(struct device_node *np)
989 ++static void remove_ddw(struct device_node *np, bool remove_prop)
990 + {
991 + struct dynamic_dma_window_prop *dwp;
992 + struct property *win64;
993 + const u32 *ddw_avail;
994 + u64 liobn;
995 +- int len, ret;
996 ++ int len, ret = 0;
997 +
998 + ddw_avail = of_get_property(np, "ibm,ddw-applicable", &len);
999 + win64 = of_find_property(np, DIRECT64_PROPNAME, NULL);
1000 +@@ -763,7 +763,8 @@ static void remove_ddw(struct device_node *np)
1001 + __remove_ddw(np, ddw_avail, liobn);
1002 +
1003 + delprop:
1004 +- ret = of_remove_property(np, win64);
1005 ++ if (remove_prop)
1006 ++ ret = of_remove_property(np, win64);
1007 + if (ret)
1008 + pr_warning("%s: failed to remove direct window property: %d\n",
1009 + np->full_name, ret);
1010 +@@ -835,7 +836,7 @@ static int find_existing_ddw_windows(void)
1011 + * can clear the table or find the holes. To that end,
1012 + * first, remove any existing DDW configuration.
1013 + */
1014 +- remove_ddw(pdn);
1015 ++ remove_ddw(pdn, true);
1016 +
1017 + /*
1018 + * Second, if we are running on a new enough level of
1019 +@@ -1125,7 +1126,7 @@ out_free_window:
1020 + kfree(window);
1021 +
1022 + out_clear_window:
1023 +- remove_ddw(pdn);
1024 ++ remove_ddw(pdn, true);
1025 +
1026 + out_free_prop:
1027 + kfree(win64->name);
1028 +@@ -1337,7 +1338,14 @@ static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long acti
1029 +
1030 + switch (action) {
1031 + case OF_RECONFIG_DETACH_NODE:
1032 +- remove_ddw(np);
1033 ++ /*
1034 ++ * Removing the property will invoke the reconfig
1035 ++ * notifier again, which causes dead-lock on the
1036 ++ * read-write semaphore of the notifier chain. So
1037 ++ * we have to remove the property when releasing
1038 ++ * the device node.
1039 ++ */
1040 ++ remove_ddw(np, false);
1041 + if (pci && pci->iommu_table)
1042 + iommu_free_table(pci->iommu_table, np->full_name);
1043 +
1044 +diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
1045 +index 356bc75ca74f..691a479f7d97 100644
1046 +--- a/arch/powerpc/platforms/pseries/lpar.c
1047 ++++ b/arch/powerpc/platforms/pseries/lpar.c
1048 +@@ -412,16 +412,17 @@ static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
1049 + spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
1050 + }
1051 +
1052 +-static void pSeries_lpar_hugepage_invalidate(struct mm_struct *mm,
1053 +- unsigned char *hpte_slot_array,
1054 +- unsigned long addr, int psize)
1055 ++static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
1056 ++ unsigned long addr,
1057 ++ unsigned char *hpte_slot_array,
1058 ++ int psize, int ssize)
1059 + {
1060 +- int ssize = 0, i, index = 0;
1061 ++ int i, index = 0;
1062 + unsigned long s_addr = addr;
1063 + unsigned int max_hpte_count, valid;
1064 + unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH];
1065 + unsigned long slot_array[PPC64_HUGE_HPTE_BATCH];
1066 +- unsigned long shift, hidx, vpn = 0, vsid, hash, slot;
1067 ++ unsigned long shift, hidx, vpn = 0, hash, slot;
1068 +
1069 + shift = mmu_psize_defs[psize].shift;
1070 + max_hpte_count = 1U << (PMD_SHIFT - shift);
1071 +@@ -434,15 +435,6 @@ static void pSeries_lpar_hugepage_invalidate(struct mm_struct *mm,
1072 +
1073 + /* get the vpn */
1074 + addr = s_addr + (i * (1ul << shift));
1075 +- if (!is_kernel_addr(addr)) {
1076 +- ssize = user_segment_size(addr);
1077 +- vsid = get_vsid(mm->context.id, addr, ssize);
1078 +- WARN_ON(vsid == 0);
1079 +- } else {
1080 +- vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
1081 +- ssize = mmu_kernel_ssize;
1082 +- }
1083 +-
1084 + vpn = hpt_vpn(addr, vsid, ssize);
1085 + hash = hpt_hash(vpn, shift, ssize);
1086 + if (hidx & _PTEIDX_SECONDARY)
1087 +diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
1088 +index 6671e8db1861..faa97bd4948e 100644
1089 +--- a/arch/s390/Kconfig
1090 ++++ b/arch/s390/Kconfig
1091 +@@ -93,6 +93,7 @@ config S390
1092 + select ARCH_INLINE_WRITE_UNLOCK_IRQ
1093 + select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
1094 + select ARCH_SAVE_PAGE_KEYS if HIBERNATION
1095 ++ select ARCH_SUPPORTS_ATOMIC_RMW
1096 + select ARCH_USE_CMPXCHG_LOCKREF
1097 + select ARCH_WANT_IPC_PARSE_VERSION
1098 + select BUILDTIME_EXTABLE_SORT
1099 +diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
1100 +index 0ea10f27d613..cb6cfcd034cf 100644
1101 +--- a/arch/x86/include/asm/irq.h
1102 ++++ b/arch/x86/include/asm/irq.h
1103 +@@ -25,6 +25,7 @@ extern void irq_ctx_init(int cpu);
1104 +
1105 + #ifdef CONFIG_HOTPLUG_CPU
1106 + #include <linux/cpumask.h>
1107 ++extern int check_irq_vectors_for_cpu_disable(void);
1108 + extern void fixup_irqs(void);
1109 + extern void irq_force_complete_move(int);
1110 + #endif
1111 +diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
1112 +index 22d0687e7fda..39100783cf26 100644
1113 +--- a/arch/x86/kernel/irq.c
1114 ++++ b/arch/x86/kernel/irq.c
1115 +@@ -262,6 +262,83 @@ __visible void smp_trace_x86_platform_ipi(struct pt_regs *regs)
1116 + EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
1117 +
1118 + #ifdef CONFIG_HOTPLUG_CPU
1119 ++
1120 ++/* These two declarations are only used in check_irq_vectors_for_cpu_disable()
1121 ++ * below, which is protected by stop_machine(). Putting them on the stack
1122 ++ * results in a stack frame overflow. Dynamically allocating could result in a
1123 ++ * failure so declare these two cpumasks as global.
1124 ++ */
1125 ++static struct cpumask affinity_new, online_new;
1126 ++
1127 ++/*
1128 ++ * This cpu is going to be removed and its vectors migrated to the remaining
1129 ++ * online cpus. Check to see if there are enough vectors in the remaining cpus.
1130 ++ * This function is protected by stop_machine().
1131 ++ */
1132 ++int check_irq_vectors_for_cpu_disable(void)
1133 ++{
1134 ++ int irq, cpu;
1135 ++ unsigned int this_cpu, vector, this_count, count;
1136 ++ struct irq_desc *desc;
1137 ++ struct irq_data *data;
1138 ++
1139 ++ this_cpu = smp_processor_id();
1140 ++ cpumask_copy(&online_new, cpu_online_mask);
1141 ++ cpu_clear(this_cpu, online_new);
1142 ++
1143 ++ this_count = 0;
1144 ++ for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
1145 ++ irq = __this_cpu_read(vector_irq[vector]);
1146 ++ if (irq >= 0) {
1147 ++ desc = irq_to_desc(irq);
1148 ++ data = irq_desc_get_irq_data(desc);
1149 ++ cpumask_copy(&affinity_new, data->affinity);
1150 ++ cpu_clear(this_cpu, affinity_new);
1151 ++
1152 ++ /* Do not count inactive or per-cpu irqs. */
1153 ++ if (!irq_has_action(irq) || irqd_is_per_cpu(data))
1154 ++ continue;
1155 ++
1156 ++ /*
1157 ++ * A single irq may be mapped to multiple
1158 ++ * cpu's vector_irq[] (for example IOAPIC cluster
1159 ++ * mode). In this case we have two
1160 ++ * possibilities:
1161 ++ *
1162 ++ * 1) the resulting affinity mask is empty; that is
1163 ++ * this the down'd cpu is the last cpu in the irq's
1164 ++ * affinity mask, or
1165 ++ *
1166 ++ * 2) the resulting affinity mask is no longer
1167 ++ * a subset of the online cpus but the affinity
1168 ++ * mask is not zero; that is the down'd cpu is the
1169 ++ * last online cpu in a user set affinity mask.
1170 ++ */
1171 ++ if (cpumask_empty(&affinity_new) ||
1172 ++ !cpumask_subset(&affinity_new, &online_new))
1173 ++ this_count++;
1174 ++ }
1175 ++ }
1176 ++
1177 ++ count = 0;
1178 ++ for_each_online_cpu(cpu) {
1179 ++ if (cpu == this_cpu)
1180 ++ continue;
1181 ++ for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
1182 ++ vector++) {
1183 ++ if (per_cpu(vector_irq, cpu)[vector] < 0)
1184 ++ count++;
1185 ++ }
1186 ++ }
1187 ++
1188 ++ if (count < this_count) {
1189 ++ pr_warn("CPU %d disable failed: CPU has %u vectors assigned and there are only %u available.\n",
1190 ++ this_cpu, this_count, count);
1191 ++ return -ERANGE;
1192 ++ }
1193 ++ return 0;
1194 ++}
1195 ++
1196 + /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
1197 + void fixup_irqs(void)
1198 + {
1199 +diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
1200 +index 42c26a485533..b17dfe212233 100644
1201 +--- a/arch/x86/kernel/smpboot.c
1202 ++++ b/arch/x86/kernel/smpboot.c
1203 +@@ -1317,6 +1317,12 @@ void cpu_disable_common(void)
1204 +
1205 + int native_cpu_disable(void)
1206 + {
1207 ++ int ret;
1208 ++
1209 ++ ret = check_irq_vectors_for_cpu_disable();
1210 ++ if (ret)
1211 ++ return ret;
1212 ++
1213 + clear_local_APIC();
1214 +
1215 + cpu_disable_common();
1216 +diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
1217 +index d8f80e733cf8..a573d4bd71d9 100644
1218 +--- a/block/blk-cgroup.c
1219 ++++ b/block/blk-cgroup.c
1220 +@@ -866,6 +866,13 @@ void blkcg_drain_queue(struct request_queue *q)
1221 + if (!q->root_blkg)
1222 + return;
1223 +
1224 ++ /*
1225 ++ * @q could be exiting and already have destroyed all blkgs as
1226 ++ * indicated by NULL root_blkg. If so, don't confuse policies.
1227 ++ */
1228 ++ if (!q->root_blkg)
1229 ++ return;
1230 ++
1231 + blk_throtl_drain(q);
1232 + }
1233 +
1234 +diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
1235 +index 1731c27c36a6..2cac1d1f3863 100644
1236 +--- a/drivers/acpi/acpica/utcopy.c
1237 ++++ b/drivers/acpi/acpica/utcopy.c
1238 +@@ -1001,5 +1001,11 @@ acpi_ut_copy_iobject_to_iobject(union acpi_operand_object *source_desc,
1239 + status = acpi_ut_copy_simple_object(source_desc, *dest_desc);
1240 + }
1241 +
1242 ++ /* Delete the allocated object if copy failed */
1243 ++
1244 ++ if (ACPI_FAILURE(status)) {
1245 ++ acpi_ut_remove_reference(*dest_desc);
1246 ++ }
1247 ++
1248 + return_ACPI_STATUS(status);
1249 + }
1250 +diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
1251 +index c7414a545a4f..2a4ae32c4b97 100644
1252 +--- a/drivers/acpi/processor_idle.c
1253 ++++ b/drivers/acpi/processor_idle.c
1254 +@@ -1099,9 +1099,9 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1255 +
1256 + if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
1257 +
1258 +- cpuidle_pause_and_lock();
1259 + /* Protect against cpu-hotplug */
1260 + get_online_cpus();
1261 ++ cpuidle_pause_and_lock();
1262 +
1263 + /* Disable all cpuidle devices */
1264 + for_each_online_cpu(cpu) {
1265 +@@ -1128,8 +1128,8 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1266 + cpuidle_enable_device(dev);
1267 + }
1268 + }
1269 +- put_online_cpus();
1270 + cpuidle_resume_and_unlock();
1271 ++ put_online_cpus();
1272 + }
1273 +
1274 + return 0;
1275 +diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
1276 +index efa328bf6724..a875de67fb7c 100644
1277 +--- a/drivers/ata/ahci.c
1278 ++++ b/drivers/ata/ahci.c
1279 +@@ -304,6 +304,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
1280 + { PCI_VDEVICE(INTEL, 0x9c85), board_ahci }, /* Wildcat Point-LP RAID */
1281 + { PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */
1282 + { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */
1283 ++ { PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */
1284 ++ { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series AHCI */
1285 ++ { PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */
1286 ++ { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series RAID */
1287 ++ { PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */
1288 ++ { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
1289 ++ { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
1290 ++ { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
1291 +
1292 + /* JMicron 360/1/3/5/6, match class to avoid IDE function */
1293 + { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
1294 +@@ -441,6 +449,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
1295 + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
1296 + .driver_data = board_ahci_yes_fbs }, /* 88se9172 */
1297 + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172),
1298 ++ .driver_data = board_ahci_yes_fbs }, /* 88se9182 */
1299 ++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9182),
1300 + .driver_data = board_ahci_yes_fbs }, /* 88se9172 */
1301 + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192),
1302 + .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */
1303 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
1304 +index 0d9a2f674819..5d0bc51bafea 100644
1305 +--- a/drivers/ata/libata-core.c
1306 ++++ b/drivers/ata/libata-core.c
1307 +@@ -4227,7 +4227,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
1308 + { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
1309 + { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
1310 + { "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
1311 +- { "Crucial_CT???M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
1312 ++ { "Crucial_CT*M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
1313 +
1314 + /*
1315 + * Some WD SATA-I drives spin up and down erratically when the link
1316 +diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
1317 +index f35f15f4d83e..f7badaa39eb6 100644
1318 +--- a/drivers/ata/pata_scc.c
1319 ++++ b/drivers/ata/pata_scc.c
1320 +@@ -586,7 +586,7 @@ static int scc_wait_after_reset(struct ata_link *link, unsigned int devmask,
1321 + * Note: Original code is ata_bus_softreset().
1322 + */
1323 +
1324 +-static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
1325 ++static int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
1326 + unsigned long deadline)
1327 + {
1328 + struct ata_ioports *ioaddr = &ap->ioaddr;
1329 +@@ -600,9 +600,7 @@ static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
1330 + udelay(20);
1331 + out_be32(ioaddr->ctl_addr, ap->ctl);
1332 +
1333 +- scc_wait_after_reset(&ap->link, devmask, deadline);
1334 +-
1335 +- return 0;
1336 ++ return scc_wait_after_reset(&ap->link, devmask, deadline);
1337 + }
1338 +
1339 + /**
1340 +@@ -619,7 +617,8 @@ static int scc_softreset(struct ata_link *link, unsigned int *classes,
1341 + {
1342 + struct ata_port *ap = link->ap;
1343 + unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
1344 +- unsigned int devmask = 0, err_mask;
1345 ++ unsigned int devmask = 0;
1346 ++ int rc;
1347 + u8 err;
1348 +
1349 + DPRINTK("ENTER\n");
1350 +@@ -635,9 +634,9 @@ static int scc_softreset(struct ata_link *link, unsigned int *classes,
1351 +
1352 + /* issue bus reset */
1353 + DPRINTK("about to softreset, devmask=%x\n", devmask);
1354 +- err_mask = scc_bus_softreset(ap, devmask, deadline);
1355 +- if (err_mask) {
1356 +- ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", err_mask);
1357 ++ rc = scc_bus_softreset(ap, devmask, deadline);
1358 ++ if (rc) {
1359 ++ ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", rc);
1360 + return -EIO;
1361 + }
1362 +
1363 +diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
1364 +index e3c974a6c522..48138b311460 100644
1365 +--- a/drivers/char/tpm/tpm.c
1366 ++++ b/drivers/char/tpm/tpm.c
1367 +@@ -533,11 +533,10 @@ static int tpm_startup(struct tpm_chip *chip, __be16 startup_type)
1368 + int tpm_get_timeouts(struct tpm_chip *chip)
1369 + {
1370 + struct tpm_cmd_t tpm_cmd;
1371 +- struct timeout_t *timeout_cap;
1372 ++ unsigned long new_timeout[4];
1373 ++ unsigned long old_timeout[4];
1374 + struct duration_t *duration_cap;
1375 + ssize_t rc;
1376 +- u32 timeout;
1377 +- unsigned int scale = 1;
1378 +
1379 + tpm_cmd.header.in = tpm_getcap_header;
1380 + tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
1381 +@@ -571,25 +570,46 @@ int tpm_get_timeouts(struct tpm_chip *chip)
1382 + != sizeof(tpm_cmd.header.out) + sizeof(u32) + 4 * sizeof(u32))
1383 + return -EINVAL;
1384 +
1385 +- timeout_cap = &tpm_cmd.params.getcap_out.cap.timeout;
1386 +- /* Don't overwrite default if value is 0 */
1387 +- timeout = be32_to_cpu(timeout_cap->a);
1388 +- if (timeout && timeout < 1000) {
1389 +- /* timeouts in msec rather usec */
1390 +- scale = 1000;
1391 +- chip->vendor.timeout_adjusted = true;
1392 ++ old_timeout[0] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.a);
1393 ++ old_timeout[1] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.b);
1394 ++ old_timeout[2] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.c);
1395 ++ old_timeout[3] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.d);
1396 ++ memcpy(new_timeout, old_timeout, sizeof(new_timeout));
1397 ++
1398 ++ /*
1399 ++ * Provide ability for vendor overrides of timeout values in case
1400 ++ * of misreporting.
1401 ++ */
1402 ++ if (chip->vendor.update_timeouts != NULL)
1403 ++ chip->vendor.timeout_adjusted =
1404 ++ chip->vendor.update_timeouts(chip, new_timeout);
1405 ++
1406 ++ if (!chip->vendor.timeout_adjusted) {
1407 ++ /* Don't overwrite default if value is 0 */
1408 ++ if (new_timeout[0] != 0 && new_timeout[0] < 1000) {
1409 ++ int i;
1410 ++
1411 ++ /* timeouts in msec rather usec */
1412 ++ for (i = 0; i != ARRAY_SIZE(new_timeout); i++)
1413 ++ new_timeout[i] *= 1000;
1414 ++ chip->vendor.timeout_adjusted = true;
1415 ++ }
1416 + }
1417 +- if (timeout)
1418 +- chip->vendor.timeout_a = usecs_to_jiffies(timeout * scale);
1419 +- timeout = be32_to_cpu(timeout_cap->b);
1420 +- if (timeout)
1421 +- chip->vendor.timeout_b = usecs_to_jiffies(timeout * scale);
1422 +- timeout = be32_to_cpu(timeout_cap->c);
1423 +- if (timeout)
1424 +- chip->vendor.timeout_c = usecs_to_jiffies(timeout * scale);
1425 +- timeout = be32_to_cpu(timeout_cap->d);
1426 +- if (timeout)
1427 +- chip->vendor.timeout_d = usecs_to_jiffies(timeout * scale);
1428 ++
1429 ++ /* Report adjusted timeouts */
1430 ++ if (chip->vendor.timeout_adjusted) {
1431 ++ dev_info(chip->dev,
1432 ++ HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n",
1433 ++ old_timeout[0], new_timeout[0],
1434 ++ old_timeout[1], new_timeout[1],
1435 ++ old_timeout[2], new_timeout[2],
1436 ++ old_timeout[3], new_timeout[3]);
1437 ++ }
1438 ++
1439 ++ chip->vendor.timeout_a = usecs_to_jiffies(new_timeout[0]);
1440 ++ chip->vendor.timeout_b = usecs_to_jiffies(new_timeout[1]);
1441 ++ chip->vendor.timeout_c = usecs_to_jiffies(new_timeout[2]);
1442 ++ chip->vendor.timeout_d = usecs_to_jiffies(new_timeout[3]);
1443 +
1444 + duration:
1445 + tpm_cmd.header.in = tpm_getcap_header;
1446 +@@ -1423,13 +1443,13 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
1447 + int err, total = 0, retries = 5;
1448 + u8 *dest = out;
1449 +
1450 ++ if (!out || !num_bytes || max > TPM_MAX_RNG_DATA)
1451 ++ return -EINVAL;
1452 ++
1453 + chip = tpm_chip_find_get(chip_num);
1454 + if (chip == NULL)
1455 + return -ENODEV;
1456 +
1457 +- if (!out || !num_bytes || max > TPM_MAX_RNG_DATA)
1458 +- return -EINVAL;
1459 +-
1460 + do {
1461 + tpm_cmd.header.in = tpm_getrandom_header;
1462 + tpm_cmd.params.getrandom_in.num_bytes = cpu_to_be32(num_bytes);
1463 +@@ -1448,6 +1468,7 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
1464 + num_bytes -= recd;
1465 + } while (retries-- && total < max);
1466 +
1467 ++ tpm_chip_put(chip);
1468 + return total ? total : -EIO;
1469 + }
1470 + EXPORT_SYMBOL_GPL(tpm_get_random);
1471 +diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
1472 +index a7bfc176ed43..b911d79fbd58 100644
1473 +--- a/drivers/char/tpm/tpm.h
1474 ++++ b/drivers/char/tpm/tpm.h
1475 +@@ -95,6 +95,9 @@ struct tpm_vendor_specific {
1476 + int (*send) (struct tpm_chip *, u8 *, size_t);
1477 + void (*cancel) (struct tpm_chip *);
1478 + u8 (*status) (struct tpm_chip *);
1479 ++ bool (*update_timeouts)(struct tpm_chip *chip,
1480 ++ unsigned long *timeout_cap);
1481 ++
1482 + void (*release) (struct device *);
1483 + struct miscdevice miscdev;
1484 + struct attribute_group *attr_group;
1485 +diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
1486 +index 5796d0157ce0..e7b1a0ae4300 100644
1487 +--- a/drivers/char/tpm/tpm_tis.c
1488 ++++ b/drivers/char/tpm/tpm_tis.c
1489 +@@ -373,6 +373,36 @@ out_err:
1490 + return rc;
1491 + }
1492 +
1493 ++struct tis_vendor_timeout_override {
1494 ++ u32 did_vid;
1495 ++ unsigned long timeout_us[4];
1496 ++};
1497 ++
1498 ++static const struct tis_vendor_timeout_override vendor_timeout_overrides[] = {
1499 ++ /* Atmel 3204 */
1500 ++ { 0x32041114, { (TIS_SHORT_TIMEOUT*1000), (TIS_LONG_TIMEOUT*1000),
1501 ++ (TIS_SHORT_TIMEOUT*1000), (TIS_SHORT_TIMEOUT*1000) } },
1502 ++};
1503 ++
1504 ++static bool tpm_tis_update_timeouts(struct tpm_chip *chip,
1505 ++ unsigned long *timeout_cap)
1506 ++{
1507 ++ int i;
1508 ++ u32 did_vid;
1509 ++
1510 ++ did_vid = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
1511 ++
1512 ++ for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) {
1513 ++ if (vendor_timeout_overrides[i].did_vid != did_vid)
1514 ++ continue;
1515 ++ memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us,
1516 ++ sizeof(vendor_timeout_overrides[i].timeout_us));
1517 ++ return true;
1518 ++ }
1519 ++
1520 ++ return false;
1521 ++}
1522 ++
1523 + /*
1524 + * Early probing for iTPM with STS_DATA_EXPECT flaw.
1525 + * Try sending command without itpm flag set and if that
1526 +@@ -475,6 +505,7 @@ static struct tpm_vendor_specific tpm_tis = {
1527 + .recv = tpm_tis_recv,
1528 + .send = tpm_tis_send,
1529 + .cancel = tpm_tis_ready,
1530 ++ .update_timeouts = tpm_tis_update_timeouts,
1531 + .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
1532 + .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
1533 + .req_canceled = tpm_tis_req_canceled,
1534 +diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
1535 +index b22659cccca4..e6125522860a 100644
1536 +--- a/drivers/firmware/efi/vars.c
1537 ++++ b/drivers/firmware/efi/vars.c
1538 +@@ -481,7 +481,7 @@ EXPORT_SYMBOL_GPL(efivar_entry_remove);
1539 + */
1540 + static void efivar_entry_list_del_unlock(struct efivar_entry *entry)
1541 + {
1542 +- WARN_ON(!spin_is_locked(&__efivars->lock));
1543 ++ lockdep_assert_held(&__efivars->lock);
1544 +
1545 + list_del(&entry->list);
1546 + spin_unlock_irq(&__efivars->lock);
1547 +@@ -507,7 +507,7 @@ int __efivar_entry_delete(struct efivar_entry *entry)
1548 + const struct efivar_operations *ops = __efivars->ops;
1549 + efi_status_t status;
1550 +
1551 +- WARN_ON(!spin_is_locked(&__efivars->lock));
1552 ++ lockdep_assert_held(&__efivars->lock);
1553 +
1554 + status = ops->set_variable(entry->var.VariableName,
1555 + &entry->var.VendorGuid,
1556 +@@ -667,7 +667,7 @@ struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
1557 + int strsize1, strsize2;
1558 + bool found = false;
1559 +
1560 +- WARN_ON(!spin_is_locked(&__efivars->lock));
1561 ++ lockdep_assert_held(&__efivars->lock);
1562 +
1563 + list_for_each_entry_safe(entry, n, head, list) {
1564 + strsize1 = ucs2_strsize(name, 1024);
1565 +@@ -739,7 +739,7 @@ int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
1566 + const struct efivar_operations *ops = __efivars->ops;
1567 + efi_status_t status;
1568 +
1569 +- WARN_ON(!spin_is_locked(&__efivars->lock));
1570 ++ lockdep_assert_held(&__efivars->lock);
1571 +
1572 + status = ops->get_variable(entry->var.VariableName,
1573 + &entry->var.VendorGuid,
1574 +diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
1575 +index 7507fe036b6e..1ceb95a3bbe0 100644
1576 +--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
1577 ++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
1578 +@@ -423,6 +423,9 @@ static int init_ring_common(struct intel_ring_buffer *ring)
1579 + }
1580 + }
1581 +
1582 ++ /* Enforce ordering by reading HEAD register back */
1583 ++ I915_READ_HEAD(ring);
1584 ++
1585 + /* Initialize the ring. This must happen _after_ we've cleared the ring
1586 + * registers with the above sequence (the readback of the HEAD registers
1587 + * also enforces ordering), otherwise the hw might lose the new ring
1588 +diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
1589 +index 85ef9ff42aa6..9d9770d201ae 100644
1590 +--- a/drivers/gpu/drm/radeon/cik.c
1591 ++++ b/drivers/gpu/drm/radeon/cik.c
1592 +@@ -4769,12 +4769,13 @@ static void cik_vm_decode_fault(struct radeon_device *rdev,
1593 + void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
1594 + {
1595 + struct radeon_ring *ring = &rdev->ring[ridx];
1596 ++ int usepfp = (ridx == RADEON_RING_TYPE_GFX_INDEX);
1597 +
1598 + if (vm == NULL)
1599 + return;
1600 +
1601 + radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
1602 +- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
1603 ++ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
1604 + WRITE_DATA_DST_SEL(0)));
1605 + if (vm->id < 8) {
1606 + radeon_ring_write(ring,
1607 +@@ -4833,7 +4834,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
1608 + radeon_ring_write(ring, 1 << vm->id);
1609 +
1610 + /* compute doesn't have PFP */
1611 +- if (ridx == RADEON_RING_TYPE_GFX_INDEX) {
1612 ++ if (usepfp) {
1613 + /* sync PFP to ME, otherwise we might get invalid PFP reads */
1614 + radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
1615 + radeon_ring_write(ring, 0x0);
1616 +diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
1617 +index dfa641277175..402d4630d13e 100644
1618 +--- a/drivers/gpu/drm/radeon/radeon_atombios.c
1619 ++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
1620 +@@ -1963,7 +1963,7 @@ static const char *thermal_controller_names[] = {
1621 + "adm1032",
1622 + "adm1030",
1623 + "max6649",
1624 +- "lm64",
1625 ++ "lm63", /* lm64 */
1626 + "f75375",
1627 + "asc7xxx",
1628 + };
1629 +@@ -1974,7 +1974,7 @@ static const char *pp_lib_thermal_controller_names[] = {
1630 + "adm1032",
1631 + "adm1030",
1632 + "max6649",
1633 +- "lm64",
1634 ++ "lm63", /* lm64 */
1635 + "f75375",
1636 + "RV6xx",
1637 + "RV770",
1638 +diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
1639 +index 4d41a0dc1796..53769e9cf595 100644
1640 +--- a/drivers/gpu/drm/radeon/si.c
1641 ++++ b/drivers/gpu/drm/radeon/si.c
1642 +@@ -4757,7 +4757,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
1643 +
1644 + /* write new base address */
1645 + radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
1646 +- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
1647 ++ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
1648 + WRITE_DATA_DST_SEL(0)));
1649 +
1650 + if (vm->id < 8) {
1651 +diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
1652 +index d700698a1f22..bf980ea2b593 100644
1653 +--- a/drivers/gpu/drm/radeon/trinity_dpm.c
1654 ++++ b/drivers/gpu/drm/radeon/trinity_dpm.c
1655 +@@ -1868,7 +1868,16 @@ int trinity_dpm_init(struct radeon_device *rdev)
1656 + for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
1657 + pi->at[i] = TRINITY_AT_DFLT;
1658 +
1659 +- pi->enable_bapm = false;
1660 ++ /* There are stability issues reported on with
1661 ++ * bapm enabled when switching between AC and battery
1662 ++ * power. At the same time, some MSI boards hang
1663 ++ * if it's not enabled and dpm is enabled. Just enable
1664 ++ * it for MSI boards right now.
1665 ++ */
1666 ++ if (rdev->pdev->subsystem_vendor == 0x1462)
1667 ++ pi->enable_bapm = true;
1668 ++ else
1669 ++ pi->enable_bapm = false;
1670 + pi->enable_nbps_policy = true;
1671 + pi->enable_sclk_ds = true;
1672 + pi->enable_gfx_power_gating = true;
1673 +diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
1674 +index 116da199b942..af1b17a0db66 100644
1675 +--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
1676 ++++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
1677 +@@ -122,6 +122,7 @@ static int tilcdc_unload(struct drm_device *dev)
1678 + struct tilcdc_drm_private *priv = dev->dev_private;
1679 + struct tilcdc_module *mod, *cur;
1680 +
1681 ++ drm_fbdev_cma_fini(priv->fbdev);
1682 + drm_kms_helper_poll_fini(dev);
1683 + drm_mode_config_cleanup(dev);
1684 + drm_vblank_cleanup(dev);
1685 +@@ -628,10 +629,10 @@ static int __init tilcdc_drm_init(void)
1686 + static void __exit tilcdc_drm_fini(void)
1687 + {
1688 + DBG("fini");
1689 +- tilcdc_tfp410_fini();
1690 +- tilcdc_slave_fini();
1691 +- tilcdc_panel_fini();
1692 + platform_driver_unregister(&tilcdc_platform_driver);
1693 ++ tilcdc_panel_fini();
1694 ++ tilcdc_slave_fini();
1695 ++ tilcdc_tfp410_fini();
1696 + }
1697 +
1698 + late_initcall(tilcdc_drm_init);
1699 +diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
1700 +index 86c67329b605..b085dcc54fb5 100644
1701 +--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
1702 ++++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
1703 +@@ -151,6 +151,7 @@ struct panel_connector {
1704 + static void panel_connector_destroy(struct drm_connector *connector)
1705 + {
1706 + struct panel_connector *panel_connector = to_panel_connector(connector);
1707 ++ drm_sysfs_connector_remove(connector);
1708 + drm_connector_cleanup(connector);
1709 + kfree(panel_connector);
1710 + }
1711 +@@ -285,10 +286,8 @@ static void panel_destroy(struct tilcdc_module *mod)
1712 + {
1713 + struct panel_module *panel_mod = to_panel_module(mod);
1714 +
1715 +- if (panel_mod->timings) {
1716 ++ if (panel_mod->timings)
1717 + display_timings_release(panel_mod->timings);
1718 +- kfree(panel_mod->timings);
1719 +- }
1720 +
1721 + tilcdc_module_cleanup(mod);
1722 + kfree(panel_mod->info);
1723 +diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave.c b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
1724 +index 595068ba2d5e..2f83ffb7f37e 100644
1725 +--- a/drivers/gpu/drm/tilcdc/tilcdc_slave.c
1726 ++++ b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
1727 +@@ -166,6 +166,7 @@ struct slave_connector {
1728 + static void slave_connector_destroy(struct drm_connector *connector)
1729 + {
1730 + struct slave_connector *slave_connector = to_slave_connector(connector);
1731 ++ drm_sysfs_connector_remove(connector);
1732 + drm_connector_cleanup(connector);
1733 + kfree(slave_connector);
1734 + }
1735 +diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
1736 +index c38b56b268ac..ce75ac8de4f8 100644
1737 +--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
1738 ++++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
1739 +@@ -167,6 +167,7 @@ struct tfp410_connector {
1740 + static void tfp410_connector_destroy(struct drm_connector *connector)
1741 + {
1742 + struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
1743 ++ drm_sysfs_connector_remove(connector);
1744 + drm_connector_cleanup(connector);
1745 + kfree(tfp410_connector);
1746 + }
1747 +diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
1748 +index 863bef9f9234..cf4bad2c1d59 100644
1749 +--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
1750 ++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
1751 +@@ -297,8 +297,10 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
1752 + *
1753 + * @pool: to free the pages from
1754 + * @free_all: If set to true will free all pages in pool
1755 ++ * @gfp: GFP flags.
1756 + **/
1757 +-static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
1758 ++static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
1759 ++ gfp_t gfp)
1760 + {
1761 + unsigned long irq_flags;
1762 + struct page *p;
1763 +@@ -309,8 +311,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
1764 + if (NUM_PAGES_TO_ALLOC < nr_free)
1765 + npages_to_free = NUM_PAGES_TO_ALLOC;
1766 +
1767 +- pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
1768 +- GFP_KERNEL);
1769 ++ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
1770 + if (!pages_to_free) {
1771 + pr_err("Failed to allocate memory for pool free operation\n");
1772 + return 0;
1773 +@@ -382,32 +383,35 @@ out:
1774 + *
1775 + * XXX: (dchinner) Deadlock warning!
1776 + *
1777 +- * ttm_page_pool_free() does memory allocation using GFP_KERNEL. that means
1778 +- * this can deadlock when called a sc->gfp_mask that is not equal to
1779 +- * GFP_KERNEL.
1780 ++ * We need to pass sc->gfp_mask to ttm_page_pool_free().
1781 + *
1782 + * This code is crying out for a shrinker per pool....
1783 + */
1784 + static unsigned long
1785 + ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1786 + {
1787 +- static atomic_t start_pool = ATOMIC_INIT(0);
1788 ++ static DEFINE_MUTEX(lock);
1789 ++ static unsigned start_pool;
1790 + unsigned i;
1791 +- unsigned pool_offset = atomic_add_return(1, &start_pool);
1792 ++ unsigned pool_offset;
1793 + struct ttm_page_pool *pool;
1794 + int shrink_pages = sc->nr_to_scan;
1795 + unsigned long freed = 0;
1796 +
1797 +- pool_offset = pool_offset % NUM_POOLS;
1798 ++ if (!mutex_trylock(&lock))
1799 ++ return SHRINK_STOP;
1800 ++ pool_offset = ++start_pool % NUM_POOLS;
1801 + /* select start pool in round robin fashion */
1802 + for (i = 0; i < NUM_POOLS; ++i) {
1803 + unsigned nr_free = shrink_pages;
1804 + if (shrink_pages == 0)
1805 + break;
1806 + pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
1807 +- shrink_pages = ttm_page_pool_free(pool, nr_free);
1808 ++ shrink_pages = ttm_page_pool_free(pool, nr_free,
1809 ++ sc->gfp_mask);
1810 + freed += nr_free - shrink_pages;
1811 + }
1812 ++ mutex_unlock(&lock);
1813 + return freed;
1814 + }
1815 +
1816 +@@ -706,7 +710,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
1817 + }
1818 + spin_unlock_irqrestore(&pool->lock, irq_flags);
1819 + if (npages)
1820 +- ttm_page_pool_free(pool, npages);
1821 ++ ttm_page_pool_free(pool, npages, GFP_KERNEL);
1822 + }
1823 +
1824 + /*
1825 +@@ -846,7 +850,8 @@ void ttm_page_alloc_fini(void)
1826 + ttm_pool_mm_shrink_fini(_manager);
1827 +
1828 + for (i = 0; i < NUM_POOLS; ++i)
1829 +- ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
1830 ++ ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES,
1831 ++ GFP_KERNEL);
1832 +
1833 + kobject_put(&_manager->kobj);
1834 + _manager = NULL;
1835 +diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
1836 +index 7957beeeaf73..ae86e3513631 100644
1837 +--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
1838 ++++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
1839 +@@ -410,8 +410,10 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
1840 + *
1841 + * @pool: to free the pages from
1842 + * @nr_free: If set to true will free all pages in pool
1843 ++ * @gfp: GFP flags.
1844 + **/
1845 +-static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
1846 ++static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
1847 ++ gfp_t gfp)
1848 + {
1849 + unsigned long irq_flags;
1850 + struct dma_page *dma_p, *tmp;
1851 +@@ -429,8 +431,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
1852 + npages_to_free, nr_free);
1853 + }
1854 + #endif
1855 +- pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
1856 +- GFP_KERNEL);
1857 ++ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
1858 +
1859 + if (!pages_to_free) {
1860 + pr_err("%s: Failed to allocate memory for pool free operation\n",
1861 +@@ -529,7 +530,7 @@ static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
1862 + if (pool->type != type)
1863 + continue;
1864 + /* Takes a spinlock.. */
1865 +- ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
1866 ++ ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, GFP_KERNEL);
1867 + WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
1868 + /* This code path is called after _all_ references to the
1869 + * struct device has been dropped - so nobody should be
1870 +@@ -982,7 +983,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
1871 +
1872 + /* shrink pool if necessary (only on !is_cached pools)*/
1873 + if (npages)
1874 +- ttm_dma_page_pool_free(pool, npages);
1875 ++ ttm_dma_page_pool_free(pool, npages, GFP_KERNEL);
1876 + ttm->state = tt_unpopulated;
1877 + }
1878 + EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
1879 +@@ -992,10 +993,7 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
1880 + *
1881 + * XXX: (dchinner) Deadlock warning!
1882 + *
1883 +- * ttm_dma_page_pool_free() does GFP_KERNEL memory allocation, and so attention
1884 +- * needs to be paid to sc->gfp_mask to determine if this can be done or not.
1885 +- * GFP_KERNEL memory allocation in a GFP_ATOMIC reclaim context woul dbe really
1886 +- * bad.
1887 ++ * We need to pass sc->gfp_mask to ttm_dma_page_pool_free().
1888 + *
1889 + * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
1890 + * shrinkers
1891 +@@ -1003,9 +1001,9 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
1892 + static unsigned long
1893 + ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1894 + {
1895 +- static atomic_t start_pool = ATOMIC_INIT(0);
1896 ++ static unsigned start_pool;
1897 + unsigned idx = 0;
1898 +- unsigned pool_offset = atomic_add_return(1, &start_pool);
1899 ++ unsigned pool_offset;
1900 + unsigned shrink_pages = sc->nr_to_scan;
1901 + struct device_pools *p;
1902 + unsigned long freed = 0;
1903 +@@ -1013,8 +1011,11 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1904 + if (list_empty(&_manager->pools))
1905 + return SHRINK_STOP;
1906 +
1907 +- mutex_lock(&_manager->lock);
1908 +- pool_offset = pool_offset % _manager->npools;
1909 ++ if (!mutex_trylock(&_manager->lock))
1910 ++ return SHRINK_STOP;
1911 ++ if (!_manager->npools)
1912 ++ goto out;
1913 ++ pool_offset = ++start_pool % _manager->npools;
1914 + list_for_each_entry(p, &_manager->pools, pools) {
1915 + unsigned nr_free;
1916 +
1917 +@@ -1026,13 +1027,15 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1918 + if (++idx < pool_offset)
1919 + continue;
1920 + nr_free = shrink_pages;
1921 +- shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
1922 ++ shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free,
1923 ++ sc->gfp_mask);
1924 + freed += nr_free - shrink_pages;
1925 +
1926 + pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
1927 + p->pool->dev_name, p->pool->name, current->pid,
1928 + nr_free, shrink_pages);
1929 + }
1930 ++out:
1931 + mutex_unlock(&_manager->lock);
1932 + return freed;
1933 + }
1934 +@@ -1043,7 +1046,8 @@ ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1935 + struct device_pools *p;
1936 + unsigned long count = 0;
1937 +
1938 +- mutex_lock(&_manager->lock);
1939 ++ if (!mutex_trylock(&_manager->lock))
1940 ++ return 0;
1941 + list_for_each_entry(p, &_manager->pools, pools)
1942 + count += p->pool->npages_free;
1943 + mutex_unlock(&_manager->lock);
1944 +diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
1945 +index c47c2034ca71..4293e89bbbdd 100644
1946 +--- a/drivers/infiniband/core/iwcm.c
1947 ++++ b/drivers/infiniband/core/iwcm.c
1948 +@@ -46,6 +46,7 @@
1949 + #include <linux/completion.h>
1950 + #include <linux/slab.h>
1951 + #include <linux/module.h>
1952 ++#include <linux/sysctl.h>
1953 +
1954 + #include <rdma/iw_cm.h>
1955 + #include <rdma/ib_addr.h>
1956 +@@ -65,6 +66,20 @@ struct iwcm_work {
1957 + struct list_head free_list;
1958 + };
1959 +
1960 ++static unsigned int default_backlog = 256;
1961 ++
1962 ++static struct ctl_table_header *iwcm_ctl_table_hdr;
1963 ++static struct ctl_table iwcm_ctl_table[] = {
1964 ++ {
1965 ++ .procname = "default_backlog",
1966 ++ .data = &default_backlog,
1967 ++ .maxlen = sizeof(default_backlog),
1968 ++ .mode = 0644,
1969 ++ .proc_handler = proc_dointvec,
1970 ++ },
1971 ++ { }
1972 ++};
1973 ++
1974 + /*
1975 + * The following services provide a mechanism for pre-allocating iwcm_work
1976 + * elements. The design pre-allocates them based on the cm_id type:
1977 +@@ -419,6 +434,9 @@ int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
1978 +
1979 + cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
1980 +
1981 ++ if (!backlog)
1982 ++ backlog = default_backlog;
1983 ++
1984 + ret = alloc_work_entries(cm_id_priv, backlog);
1985 + if (ret)
1986 + return ret;
1987 +@@ -1024,11 +1042,20 @@ static int __init iw_cm_init(void)
1988 + if (!iwcm_wq)
1989 + return -ENOMEM;
1990 +
1991 ++ iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm",
1992 ++ iwcm_ctl_table);
1993 ++ if (!iwcm_ctl_table_hdr) {
1994 ++ pr_err("iw_cm: couldn't register sysctl paths\n");
1995 ++ destroy_workqueue(iwcm_wq);
1996 ++ return -ENOMEM;
1997 ++ }
1998 ++
1999 + return 0;
2000 + }
2001 +
2002 + static void __exit iw_cm_cleanup(void)
2003 + {
2004 ++ unregister_net_sysctl_table(iwcm_ctl_table_hdr);
2005 + destroy_workqueue(iwcm_wq);
2006 + }
2007 +
2008 +diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
2009 +index 024fa025a7ab..15984e1c0b61 100644
2010 +--- a/drivers/infiniband/ulp/srp/ib_srp.c
2011 ++++ b/drivers/infiniband/ulp/srp/ib_srp.c
2012 +@@ -93,6 +93,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
2013 + static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
2014 +
2015 + static struct scsi_transport_template *ib_srp_transport_template;
2016 ++static struct workqueue_struct *srp_remove_wq;
2017 +
2018 + static struct ib_client srp_client = {
2019 + .name = "srp",
2020 +@@ -458,7 +459,7 @@ static bool srp_queue_remove_work(struct srp_target_port *target)
2021 + spin_unlock_irq(&target->lock);
2022 +
2023 + if (changed)
2024 +- queue_work(system_long_wq, &target->remove_work);
2025 ++ queue_work(srp_remove_wq, &target->remove_work);
2026 +
2027 + return changed;
2028 + }
2029 +@@ -2602,9 +2603,10 @@ static void srp_remove_one(struct ib_device *device)
2030 + spin_unlock(&host->target_lock);
2031 +
2032 + /*
2033 +- * Wait for target port removal tasks.
2034 ++ * Wait for tl_err and target port removal tasks.
2035 + */
2036 + flush_workqueue(system_long_wq);
2037 ++ flush_workqueue(srp_remove_wq);
2038 +
2039 + kfree(host);
2040 + }
2041 +@@ -2649,16 +2651,22 @@ static int __init srp_init_module(void)
2042 + indirect_sg_entries = cmd_sg_entries;
2043 + }
2044 +
2045 ++ srp_remove_wq = create_workqueue("srp_remove");
2046 ++ if (IS_ERR(srp_remove_wq)) {
2047 ++ ret = PTR_ERR(srp_remove_wq);
2048 ++ goto out;
2049 ++ }
2050 ++
2051 ++ ret = -ENOMEM;
2052 + ib_srp_transport_template =
2053 + srp_attach_transport(&ib_srp_transport_functions);
2054 + if (!ib_srp_transport_template)
2055 +- return -ENOMEM;
2056 ++ goto destroy_wq;
2057 +
2058 + ret = class_register(&srp_class);
2059 + if (ret) {
2060 + pr_err("couldn't register class infiniband_srp\n");
2061 +- srp_release_transport(ib_srp_transport_template);
2062 +- return ret;
2063 ++ goto release_tr;
2064 + }
2065 +
2066 + ib_sa_register_client(&srp_sa_client);
2067 +@@ -2666,13 +2674,22 @@ static int __init srp_init_module(void)
2068 + ret = ib_register_client(&srp_client);
2069 + if (ret) {
2070 + pr_err("couldn't register IB client\n");
2071 +- srp_release_transport(ib_srp_transport_template);
2072 +- ib_sa_unregister_client(&srp_sa_client);
2073 +- class_unregister(&srp_class);
2074 +- return ret;
2075 ++ goto unreg_sa;
2076 + }
2077 +
2078 +- return 0;
2079 ++out:
2080 ++ return ret;
2081 ++
2082 ++unreg_sa:
2083 ++ ib_sa_unregister_client(&srp_sa_client);
2084 ++ class_unregister(&srp_class);
2085 ++
2086 ++release_tr:
2087 ++ srp_release_transport(ib_srp_transport_template);
2088 ++
2089 ++destroy_wq:
2090 ++ destroy_workqueue(srp_remove_wq);
2091 ++ goto out;
2092 + }
2093 +
2094 + static void __exit srp_cleanup_module(void)
2095 +@@ -2681,6 +2698,7 @@ static void __exit srp_cleanup_module(void)
2096 + ib_sa_unregister_client(&srp_sa_client);
2097 + class_unregister(&srp_class);
2098 + srp_release_transport(ib_srp_transport_template);
2099 ++ destroy_workqueue(srp_remove_wq);
2100 + }
2101 +
2102 + module_init(srp_init_module);
2103 +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
2104 +index 22f656e125dd..67644e960592 100644
2105 +--- a/drivers/iommu/amd_iommu.c
2106 ++++ b/drivers/iommu/amd_iommu.c
2107 +@@ -3227,14 +3227,16 @@ free_domains:
2108 +
2109 + static void cleanup_domain(struct protection_domain *domain)
2110 + {
2111 +- struct iommu_dev_data *dev_data, *next;
2112 ++ struct iommu_dev_data *entry;
2113 + unsigned long flags;
2114 +
2115 + write_lock_irqsave(&amd_iommu_devtable_lock, flags);
2116 +
2117 +- list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
2118 +- __detach_device(dev_data);
2119 +- atomic_set(&dev_data->bind, 0);
2120 ++ while (!list_empty(&domain->dev_list)) {
2121 ++ entry = list_first_entry(&domain->dev_list,
2122 ++ struct iommu_dev_data, list);
2123 ++ __detach_device(entry);
2124 ++ atomic_set(&entry->bind, 0);
2125 + }
2126 +
2127 + write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2128 +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
2129 +index 66c4aee20c72..9b582c9444f2 100644
2130 +--- a/drivers/md/raid1.c
2131 ++++ b/drivers/md/raid1.c
2132 +@@ -1406,12 +1406,12 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
2133 + mddev->degraded++;
2134 + set_bit(Faulty, &rdev->flags);
2135 + spin_unlock_irqrestore(&conf->device_lock, flags);
2136 +- /*
2137 +- * if recovery is running, make sure it aborts.
2138 +- */
2139 +- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2140 + } else
2141 + set_bit(Faulty, &rdev->flags);
2142 ++ /*
2143 ++ * if recovery is running, make sure it aborts.
2144 ++ */
2145 ++ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2146 + set_bit(MD_CHANGE_DEVS, &mddev->flags);
2147 + printk(KERN_ALERT
2148 + "md/raid1:%s: Disk failure on %s, disabling device.\n"
2149 +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
2150 +index 308575d23550..9ccb107c982e 100644
2151 +--- a/drivers/md/raid10.c
2152 ++++ b/drivers/md/raid10.c
2153 +@@ -1698,13 +1698,12 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
2154 + spin_unlock_irqrestore(&conf->device_lock, flags);
2155 + return;
2156 + }
2157 +- if (test_and_clear_bit(In_sync, &rdev->flags)) {
2158 ++ if (test_and_clear_bit(In_sync, &rdev->flags))
2159 + mddev->degraded++;
2160 +- /*
2161 +- * if recovery is running, make sure it aborts.
2162 +- */
2163 +- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2164 +- }
2165 ++ /*
2166 ++ * If recovery is running, make sure it aborts.
2167 ++ */
2168 ++ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2169 + set_bit(Blocked, &rdev->flags);
2170 + set_bit(Faulty, &rdev->flags);
2171 + set_bit(MD_CHANGE_DEVS, &mddev->flags);
2172 +@@ -2970,6 +2969,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
2173 + */
2174 + if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
2175 + end_reshape(conf);
2176 ++ close_sync(conf);
2177 + return 0;
2178 + }
2179 +
2180 +@@ -4420,7 +4420,7 @@ read_more:
2181 + read_bio->bi_private = r10_bio;
2182 + read_bio->bi_end_io = end_sync_read;
2183 + read_bio->bi_rw = READ;
2184 +- read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
2185 ++ read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
2186 + read_bio->bi_flags |= 1 << BIO_UPTODATE;
2187 + read_bio->bi_vcnt = 0;
2188 + read_bio->bi_size = 0;
2189 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
2190 +index 3ecfb063ec0b..42510e40c23c 100644
2191 +--- a/drivers/md/raid5.c
2192 ++++ b/drivers/md/raid5.c
2193 +@@ -3672,6 +3672,8 @@ static void handle_stripe(struct stripe_head *sh)
2194 + set_bit(R5_Wantwrite, &dev->flags);
2195 + if (prexor)
2196 + continue;
2197 ++ if (s.failed > 1)
2198 ++ continue;
2199 + if (!test_bit(R5_Insync, &dev->flags) ||
2200 + ((i == sh->pd_idx || i == sh->qd_idx) &&
2201 + s.failed == 0))
2202 +diff --git a/drivers/media/common/siano/Kconfig b/drivers/media/common/siano/Kconfig
2203 +index f953d33ee151..4bfbd5f463d1 100644
2204 +--- a/drivers/media/common/siano/Kconfig
2205 ++++ b/drivers/media/common/siano/Kconfig
2206 +@@ -22,8 +22,7 @@ config SMS_SIANO_DEBUGFS
2207 + bool "Enable debugfs for smsdvb"
2208 + depends on SMS_SIANO_MDTV
2209 + depends on DEBUG_FS
2210 +- depends on SMS_USB_DRV
2211 +- depends on CONFIG_SMS_USB_DRV = CONFIG_SMS_SDIO_DRV
2212 ++ depends on SMS_USB_DRV = SMS_SDIO_DRV
2213 +
2214 + ---help---
2215 + Choose Y to enable visualizing a dump of the frontend
2216 +diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
2217 +index 703560fa5e73..88c1606fd555 100644
2218 +--- a/drivers/media/media-device.c
2219 ++++ b/drivers/media/media-device.c
2220 +@@ -106,8 +106,6 @@ static long media_device_enum_entities(struct media_device *mdev,
2221 + if (ent->name) {
2222 + strncpy(u_ent.name, ent->name, sizeof(u_ent.name));
2223 + u_ent.name[sizeof(u_ent.name) - 1] = '\0';
2224 +- } else {
2225 +- memset(u_ent.name, 0, sizeof(u_ent.name));
2226 + }
2227 + u_ent.type = ent->type;
2228 + u_ent.revision = ent->revision;
2229 +diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
2230 +index 714c53ef6c11..2960ff1637d1 100644
2231 +--- a/drivers/media/platform/vsp1/vsp1_video.c
2232 ++++ b/drivers/media/platform/vsp1/vsp1_video.c
2233 +@@ -622,8 +622,6 @@ static int vsp1_video_buffer_prepare(struct vb2_buffer *vb)
2234 + if (vb->num_planes < format->num_planes)
2235 + return -EINVAL;
2236 +
2237 +- buf->video = video;
2238 +-
2239 + for (i = 0; i < vb->num_planes; ++i) {
2240 + buf->addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
2241 + buf->length[i] = vb2_plane_size(vb, i);
2242 +diff --git a/drivers/media/platform/vsp1/vsp1_video.h b/drivers/media/platform/vsp1/vsp1_video.h
2243 +index d8612a378345..47b7a8ab5e2f 100644
2244 +--- a/drivers/media/platform/vsp1/vsp1_video.h
2245 ++++ b/drivers/media/platform/vsp1/vsp1_video.h
2246 +@@ -89,7 +89,6 @@ static inline struct vsp1_pipeline *to_vsp1_pipeline(struct media_entity *e)
2247 + }
2248 +
2249 + struct vsp1_video_buffer {
2250 +- struct vsp1_video *video;
2251 + struct vb2_buffer buf;
2252 + struct list_head queue;
2253 +
2254 +diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
2255 +index 2018befabb5a..e71decbfd0af 100644
2256 +--- a/drivers/media/tuners/xc4000.c
2257 ++++ b/drivers/media/tuners/xc4000.c
2258 +@@ -93,7 +93,7 @@ struct xc4000_priv {
2259 + struct firmware_description *firm;
2260 + int firm_size;
2261 + u32 if_khz;
2262 +- u32 freq_hz;
2263 ++ u32 freq_hz, freq_offset;
2264 + u32 bandwidth;
2265 + u8 video_standard;
2266 + u8 rf_mode;
2267 +@@ -1157,14 +1157,14 @@ static int xc4000_set_params(struct dvb_frontend *fe)
2268 + case SYS_ATSC:
2269 + dprintk(1, "%s() VSB modulation\n", __func__);
2270 + priv->rf_mode = XC_RF_MODE_AIR;
2271 +- priv->freq_hz = c->frequency - 1750000;
2272 ++ priv->freq_offset = 1750000;
2273 + priv->video_standard = XC4000_DTV6;
2274 + type = DTV6;
2275 + break;
2276 + case SYS_DVBC_ANNEX_B:
2277 + dprintk(1, "%s() QAM modulation\n", __func__);
2278 + priv->rf_mode = XC_RF_MODE_CABLE;
2279 +- priv->freq_hz = c->frequency - 1750000;
2280 ++ priv->freq_offset = 1750000;
2281 + priv->video_standard = XC4000_DTV6;
2282 + type = DTV6;
2283 + break;
2284 +@@ -1173,23 +1173,23 @@ static int xc4000_set_params(struct dvb_frontend *fe)
2285 + dprintk(1, "%s() OFDM\n", __func__);
2286 + if (bw == 0) {
2287 + if (c->frequency < 400000000) {
2288 +- priv->freq_hz = c->frequency - 2250000;
2289 ++ priv->freq_offset = 2250000;
2290 + } else {
2291 +- priv->freq_hz = c->frequency - 2750000;
2292 ++ priv->freq_offset = 2750000;
2293 + }
2294 + priv->video_standard = XC4000_DTV7_8;
2295 + type = DTV78;
2296 + } else if (bw <= 6000000) {
2297 + priv->video_standard = XC4000_DTV6;
2298 +- priv->freq_hz = c->frequency - 1750000;
2299 ++ priv->freq_offset = 1750000;
2300 + type = DTV6;
2301 + } else if (bw <= 7000000) {
2302 + priv->video_standard = XC4000_DTV7;
2303 +- priv->freq_hz = c->frequency - 2250000;
2304 ++ priv->freq_offset = 2250000;
2305 + type = DTV7;
2306 + } else {
2307 + priv->video_standard = XC4000_DTV8;
2308 +- priv->freq_hz = c->frequency - 2750000;
2309 ++ priv->freq_offset = 2750000;
2310 + type = DTV8;
2311 + }
2312 + priv->rf_mode = XC_RF_MODE_AIR;
2313 +@@ -1200,6 +1200,8 @@ static int xc4000_set_params(struct dvb_frontend *fe)
2314 + goto fail;
2315 + }
2316 +
2317 ++ priv->freq_hz = c->frequency - priv->freq_offset;
2318 ++
2319 + dprintk(1, "%s() frequency=%d (compensated)\n",
2320 + __func__, priv->freq_hz);
2321 +
2322 +@@ -1520,7 +1522,7 @@ static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq)
2323 + {
2324 + struct xc4000_priv *priv = fe->tuner_priv;
2325 +
2326 +- *freq = priv->freq_hz;
2327 ++ *freq = priv->freq_hz + priv->freq_offset;
2328 +
2329 + if (debug) {
2330 + mutex_lock(&priv->lock);
2331 +diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
2332 +index 5cd09a681b6a..b2d9e9cb97f7 100644
2333 +--- a/drivers/media/tuners/xc5000.c
2334 ++++ b/drivers/media/tuners/xc5000.c
2335 +@@ -55,7 +55,7 @@ struct xc5000_priv {
2336 +
2337 + u32 if_khz;
2338 + u16 xtal_khz;
2339 +- u32 freq_hz;
2340 ++ u32 freq_hz, freq_offset;
2341 + u32 bandwidth;
2342 + u8 video_standard;
2343 + u8 rf_mode;
2344 +@@ -755,13 +755,13 @@ static int xc5000_set_params(struct dvb_frontend *fe)
2345 + case SYS_ATSC:
2346 + dprintk(1, "%s() VSB modulation\n", __func__);
2347 + priv->rf_mode = XC_RF_MODE_AIR;
2348 +- priv->freq_hz = freq - 1750000;
2349 ++ priv->freq_offset = 1750000;
2350 + priv->video_standard = DTV6;
2351 + break;
2352 + case SYS_DVBC_ANNEX_B:
2353 + dprintk(1, "%s() QAM modulation\n", __func__);
2354 + priv->rf_mode = XC_RF_MODE_CABLE;
2355 +- priv->freq_hz = freq - 1750000;
2356 ++ priv->freq_offset = 1750000;
2357 + priv->video_standard = DTV6;
2358 + break;
2359 + case SYS_ISDBT:
2360 +@@ -776,15 +776,15 @@ static int xc5000_set_params(struct dvb_frontend *fe)
2361 + switch (bw) {
2362 + case 6000000:
2363 + priv->video_standard = DTV6;
2364 +- priv->freq_hz = freq - 1750000;
2365 ++ priv->freq_offset = 1750000;
2366 + break;
2367 + case 7000000:
2368 + priv->video_standard = DTV7;
2369 +- priv->freq_hz = freq - 2250000;
2370 ++ priv->freq_offset = 2250000;
2371 + break;
2372 + case 8000000:
2373 + priv->video_standard = DTV8;
2374 +- priv->freq_hz = freq - 2750000;
2375 ++ priv->freq_offset = 2750000;
2376 + break;
2377 + default:
2378 + printk(KERN_ERR "xc5000 bandwidth not set!\n");
2379 +@@ -798,15 +798,15 @@ static int xc5000_set_params(struct dvb_frontend *fe)
2380 + priv->rf_mode = XC_RF_MODE_CABLE;
2381 + if (bw <= 6000000) {
2382 + priv->video_standard = DTV6;
2383 +- priv->freq_hz = freq - 1750000;
2384 ++ priv->freq_offset = 1750000;
2385 + b = 6;
2386 + } else if (bw <= 7000000) {
2387 + priv->video_standard = DTV7;
2388 +- priv->freq_hz = freq - 2250000;
2389 ++ priv->freq_offset = 2250000;
2390 + b = 7;
2391 + } else {
2392 + priv->video_standard = DTV7_8;
2393 +- priv->freq_hz = freq - 2750000;
2394 ++ priv->freq_offset = 2750000;
2395 + b = 8;
2396 + }
2397 + dprintk(1, "%s() Bandwidth %dMHz (%d)\n", __func__,
2398 +@@ -817,6 +817,8 @@ static int xc5000_set_params(struct dvb_frontend *fe)
2399 + return -EINVAL;
2400 + }
2401 +
2402 ++ priv->freq_hz = freq - priv->freq_offset;
2403 ++
2404 + dprintk(1, "%s() frequency=%d (compensated to %d)\n",
2405 + __func__, freq, priv->freq_hz);
2406 +
2407 +@@ -1067,7 +1069,7 @@ static int xc5000_get_frequency(struct dvb_frontend *fe, u32 *freq)
2408 + {
2409 + struct xc5000_priv *priv = fe->tuner_priv;
2410 + dprintk(1, "%s()\n", __func__);
2411 +- *freq = priv->freq_hz;
2412 ++ *freq = priv->freq_hz + priv->freq_offset;
2413 + return 0;
2414 + }
2415 +
2416 +diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
2417 +index f6154546b5c0..7ed75efa1c36 100644
2418 +--- a/drivers/media/usb/au0828/au0828-video.c
2419 ++++ b/drivers/media/usb/au0828/au0828-video.c
2420 +@@ -787,11 +787,27 @@ static int au0828_i2s_init(struct au0828_dev *dev)
2421 +
2422 + /*
2423 + * Auvitek au0828 analog stream enable
2424 +- * Please set interface0 to AS5 before enable the stream
2425 + */
2426 + static int au0828_analog_stream_enable(struct au0828_dev *d)
2427 + {
2428 ++ struct usb_interface *iface;
2429 ++ int ret;
2430 ++
2431 + dprintk(1, "au0828_analog_stream_enable called\n");
2432 ++
2433 ++ iface = usb_ifnum_to_if(d->usbdev, 0);
2434 ++ if (iface && iface->cur_altsetting->desc.bAlternateSetting != 5) {
2435 ++ dprintk(1, "Changing intf#0 to alt 5\n");
2436 ++ /* set au0828 interface0 to AS5 here again */
2437 ++ ret = usb_set_interface(d->usbdev, 0, 5);
2438 ++ if (ret < 0) {
2439 ++ printk(KERN_INFO "Au0828 can't set alt setting to 5!\n");
2440 ++ return -EBUSY;
2441 ++ }
2442 ++ }
2443 ++
2444 ++ /* FIXME: size should be calculated using d->width, d->height */
2445 ++
2446 + au0828_writereg(d, AU0828_SENSORCTRL_VBI_103, 0x00);
2447 + au0828_writereg(d, 0x106, 0x00);
2448 + /* set x position */
2449 +@@ -1002,15 +1018,6 @@ static int au0828_v4l2_open(struct file *filp)
2450 + return -ERESTARTSYS;
2451 + }
2452 + if (dev->users == 0) {
2453 +- /* set au0828 interface0 to AS5 here again */
2454 +- ret = usb_set_interface(dev->usbdev, 0, 5);
2455 +- if (ret < 0) {
2456 +- mutex_unlock(&dev->lock);
2457 +- printk(KERN_INFO "Au0828 can't set alternate to 5!\n");
2458 +- kfree(fh);
2459 +- return -EBUSY;
2460 +- }
2461 +-
2462 + au0828_analog_stream_enable(dev);
2463 + au0828_analog_stream_reset(dev);
2464 +
2465 +@@ -1252,13 +1259,6 @@ static int au0828_set_format(struct au0828_dev *dev, unsigned int cmd,
2466 + }
2467 + }
2468 +
2469 +- /* set au0828 interface0 to AS5 here again */
2470 +- ret = usb_set_interface(dev->usbdev, 0, 5);
2471 +- if (ret < 0) {
2472 +- printk(KERN_INFO "Au0828 can't set alt setting to 5!\n");
2473 +- return -EBUSY;
2474 +- }
2475 +-
2476 + au0828_analog_stream_enable(dev);
2477 +
2478 + return 0;
2479 +diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
2480 +index 29ee54d68512..5dd653f9b094 100644
2481 +--- a/drivers/mfd/omap-usb-host.c
2482 ++++ b/drivers/mfd/omap-usb-host.c
2483 +@@ -445,7 +445,7 @@ static unsigned omap_usbhs_rev1_hostconfig(struct usbhs_hcd_omap *omap,
2484 +
2485 + for (i = 0; i < omap->nports; i++) {
2486 + if (is_ehci_phy_mode(pdata->port_mode[i])) {
2487 +- reg &= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
2488 ++ reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
2489 + break;
2490 + }
2491 + }
2492 +diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
2493 +index 994ca4aff1a3..4b7ea3fb143c 100644
2494 +--- a/drivers/misc/mei/nfc.c
2495 ++++ b/drivers/misc/mei/nfc.c
2496 +@@ -342,9 +342,10 @@ static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
2497 + ndev = (struct mei_nfc_dev *) cldev->priv_data;
2498 + dev = ndev->cl->dev;
2499 +
2500 ++ err = -ENOMEM;
2501 + mei_buf = kzalloc(length + MEI_NFC_HEADER_SIZE, GFP_KERNEL);
2502 + if (!mei_buf)
2503 +- return -ENOMEM;
2504 ++ goto out;
2505 +
2506 + hdr = (struct mei_nfc_hci_hdr *) mei_buf;
2507 + hdr->cmd = MEI_NFC_CMD_HCI_SEND;
2508 +@@ -354,12 +355,9 @@ static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
2509 + hdr->data_size = length;
2510 +
2511 + memcpy(mei_buf + MEI_NFC_HEADER_SIZE, buf, length);
2512 +-
2513 + err = __mei_cl_send(ndev->cl, mei_buf, length + MEI_NFC_HEADER_SIZE);
2514 + if (err < 0)
2515 +- return err;
2516 +-
2517 +- kfree(mei_buf);
2518 ++ goto out;
2519 +
2520 + if (!wait_event_interruptible_timeout(ndev->send_wq,
2521 + ndev->recv_req_id == ndev->req_id, HZ)) {
2522 +@@ -368,7 +366,8 @@ static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
2523 + } else {
2524 + ndev->req_id++;
2525 + }
2526 +-
2527 ++out:
2528 ++ kfree(mei_buf);
2529 + return err;
2530 + }
2531 +
2532 +diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
2533 +index 19d637266fcd..71e4f6ccae2f 100644
2534 +--- a/drivers/mtd/ftl.c
2535 ++++ b/drivers/mtd/ftl.c
2536 +@@ -1075,7 +1075,6 @@ static void ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
2537 + return;
2538 + }
2539 +
2540 +- ftl_freepart(partition);
2541 + kfree(partition);
2542 + }
2543 +
2544 +diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
2545 +index 0332d0b2d73a..854662826272 100644
2546 +--- a/drivers/mtd/nand/omap2.c
2547 ++++ b/drivers/mtd/nand/omap2.c
2548 +@@ -948,7 +948,7 @@ static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
2549 + u32 val;
2550 +
2551 + val = readl(info->reg.gpmc_ecc_config);
2552 +- if (((val >> ECC_CONFIG_CS_SHIFT) & ~CS_MASK) != info->gpmc_cs)
2553 ++ if (((val >> ECC_CONFIG_CS_SHIFT) & CS_MASK) != info->gpmc_cs)
2554 + return -EINVAL;
2555 +
2556 + /* read ecc result */
2557 +diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
2558 +index 6c0fd8e0f9bf..895b086ec261 100644
2559 +--- a/drivers/net/ethernet/ibm/ibmveth.c
2560 ++++ b/drivers/net/ethernet/ibm/ibmveth.c
2561 +@@ -293,6 +293,18 @@ failure:
2562 + atomic_add(buffers_added, &(pool->available));
2563 + }
2564 +
2565 ++/*
2566 ++ * The final 8 bytes of the buffer list is a counter of frames dropped
2567 ++ * because there was not a buffer in the buffer list capable of holding
2568 ++ * the frame.
2569 ++ */
2570 ++static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter)
2571 ++{
2572 ++ __be64 *p = adapter->buffer_list_addr + 4096 - 8;
2573 ++
2574 ++ adapter->rx_no_buffer = be64_to_cpup(p);
2575 ++}
2576 ++
2577 + /* replenish routine */
2578 + static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
2579 + {
2580 +@@ -308,8 +320,7 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
2581 + ibmveth_replenish_buffer_pool(adapter, pool);
2582 + }
2583 +
2584 +- adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) +
2585 +- 4096 - 8);
2586 ++ ibmveth_update_rx_no_buffer(adapter);
2587 + }
2588 +
2589 + /* empty and free ana buffer pool - also used to do cleanup in error paths */
2590 +@@ -699,8 +710,7 @@ static int ibmveth_close(struct net_device *netdev)
2591 +
2592 + free_irq(netdev->irq, netdev);
2593 +
2594 +- adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) +
2595 +- 4096 - 8);
2596 ++ ibmveth_update_rx_no_buffer(adapter);
2597 +
2598 + ibmveth_cleanup(adapter);
2599 +
2600 +diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
2601 +index 8596aba34f96..237d0cda1bcb 100644
2602 +--- a/drivers/net/wireless/ath/carl9170/carl9170.h
2603 ++++ b/drivers/net/wireless/ath/carl9170/carl9170.h
2604 +@@ -256,6 +256,7 @@ struct ar9170 {
2605 + atomic_t rx_work_urbs;
2606 + atomic_t rx_pool_urbs;
2607 + kernel_ulong_t features;
2608 ++ bool usb_ep_cmd_is_bulk;
2609 +
2610 + /* firmware settings */
2611 + struct completion fw_load_wait;
2612 +diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
2613 +index 307bc0ddff99..83d20c8b2ad7 100644
2614 +--- a/drivers/net/wireless/ath/carl9170/usb.c
2615 ++++ b/drivers/net/wireless/ath/carl9170/usb.c
2616 +@@ -621,9 +621,16 @@ int __carl9170_exec_cmd(struct ar9170 *ar, struct carl9170_cmd *cmd,
2617 + goto err_free;
2618 + }
2619 +
2620 +- usb_fill_int_urb(urb, ar->udev, usb_sndintpipe(ar->udev,
2621 +- AR9170_USB_EP_CMD), cmd, cmd->hdr.len + 4,
2622 +- carl9170_usb_cmd_complete, ar, 1);
2623 ++ if (ar->usb_ep_cmd_is_bulk)
2624 ++ usb_fill_bulk_urb(urb, ar->udev,
2625 ++ usb_sndbulkpipe(ar->udev, AR9170_USB_EP_CMD),
2626 ++ cmd, cmd->hdr.len + 4,
2627 ++ carl9170_usb_cmd_complete, ar);
2628 ++ else
2629 ++ usb_fill_int_urb(urb, ar->udev,
2630 ++ usb_sndintpipe(ar->udev, AR9170_USB_EP_CMD),
2631 ++ cmd, cmd->hdr.len + 4,
2632 ++ carl9170_usb_cmd_complete, ar, 1);
2633 +
2634 + if (free_buf)
2635 + urb->transfer_flags |= URB_FREE_BUFFER;
2636 +@@ -1032,9 +1039,10 @@ static void carl9170_usb_firmware_step2(const struct firmware *fw,
2637 + static int carl9170_usb_probe(struct usb_interface *intf,
2638 + const struct usb_device_id *id)
2639 + {
2640 ++ struct usb_endpoint_descriptor *ep;
2641 + struct ar9170 *ar;
2642 + struct usb_device *udev;
2643 +- int err;
2644 ++ int i, err;
2645 +
2646 + err = usb_reset_device(interface_to_usbdev(intf));
2647 + if (err)
2648 +@@ -1050,6 +1058,21 @@ static int carl9170_usb_probe(struct usb_interface *intf,
2649 + ar->intf = intf;
2650 + ar->features = id->driver_info;
2651 +
2652 ++ /* We need to remember the type of endpoint 4 because it differs
2653 ++ * between high- and full-speed configuration. The high-speed
2654 ++ * configuration specifies it as interrupt and the full-speed
2655 ++ * configuration as bulk endpoint. This information is required
2656 ++ * later when sending urbs to that endpoint.
2657 ++ */
2658 ++ for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; ++i) {
2659 ++ ep = &intf->cur_altsetting->endpoint[i].desc;
2660 ++
2661 ++ if (usb_endpoint_num(ep) == AR9170_USB_EP_CMD &&
2662 ++ usb_endpoint_dir_out(ep) &&
2663 ++ usb_endpoint_type(ep) == USB_ENDPOINT_XFER_BULK)
2664 ++ ar->usb_ep_cmd_is_bulk = true;
2665 ++ }
2666 ++
2667 + usb_set_intfdata(intf, ar);
2668 + SET_IEEE80211_DEV(ar->hw, &intf->dev);
2669 +
2670 +diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
2671 +index 7917bb2fa834..406e50ef5152 100644
2672 +--- a/drivers/regulator/arizona-ldo1.c
2673 ++++ b/drivers/regulator/arizona-ldo1.c
2674 +@@ -141,8 +141,6 @@ static struct regulator_ops arizona_ldo1_ops = {
2675 + .map_voltage = regulator_map_voltage_linear,
2676 + .get_voltage_sel = regulator_get_voltage_sel_regmap,
2677 + .set_voltage_sel = regulator_set_voltage_sel_regmap,
2678 +- .get_bypass = regulator_get_bypass_regmap,
2679 +- .set_bypass = regulator_set_bypass_regmap,
2680 + };
2681 +
2682 + static const struct regulator_desc arizona_ldo1 = {
2683 +diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
2684 +index 90814fe85ac1..d5b3f66f0ebd 100644
2685 +--- a/drivers/scsi/bfa/bfa_ioc.h
2686 ++++ b/drivers/scsi/bfa/bfa_ioc.h
2687 +@@ -72,7 +72,7 @@ struct bfa_sge_s {
2688 + } while (0)
2689 +
2690 + #define bfa_swap_words(_x) ( \
2691 +- ((_x) << 32) | ((_x) >> 32))
2692 ++ ((u64)(_x) << 32) | ((u64)(_x) >> 32))
2693 +
2694 + #ifdef __BIG_ENDIAN
2695 + #define bfa_sge_to_be(_x)
2696 +diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
2697 +index f969aca0b54e..49014a143c6a 100644
2698 +--- a/drivers/scsi/scsi_devinfo.c
2699 ++++ b/drivers/scsi/scsi_devinfo.c
2700 +@@ -222,6 +222,7 @@ static struct {
2701 + {"PIONEER", "CD-ROM DRM-602X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
2702 + {"PIONEER", "CD-ROM DRM-604X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
2703 + {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
2704 ++ {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
2705 + {"Promise", "", NULL, BLIST_SPARSELUN},
2706 + {"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
2707 + {"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN},
2708 +diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
2709 +index 4109530e92a0..054ec2c412a4 100644
2710 +--- a/drivers/scsi/scsi_scan.c
2711 ++++ b/drivers/scsi/scsi_scan.c
2712 +@@ -922,6 +922,12 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
2713 + if (*bflags & BLIST_USE_10_BYTE_MS)
2714 + sdev->use_10_for_ms = 1;
2715 +
2716 ++ /* some devices don't like REPORT SUPPORTED OPERATION CODES
2717 ++ * and will simply timeout causing sd_mod init to take a very
2718 ++ * very long time */
2719 ++ if (*bflags & BLIST_NO_RSOC)
2720 ++ sdev->no_report_opcodes = 1;
2721 ++
2722 + /* set the device running here so that slave configure
2723 + * may do I/O */
2724 + ret = scsi_device_set_state(sdev, SDEV_RUNNING);
2725 +@@ -950,7 +956,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
2726 +
2727 + sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
2728 +
2729 +- if (*bflags & BLIST_SKIP_VPD_PAGES)
2730 ++ if (*bflags & BLIST_TRY_VPD_PAGES)
2731 ++ sdev->try_vpd_pages = 1;
2732 ++ else if (*bflags & BLIST_SKIP_VPD_PAGES)
2733 + sdev->skip_vpd_pages = 1;
2734 +
2735 + transport_configure_device(&sdev->sdev_gendev);
2736 +@@ -1236,6 +1244,12 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
2737 + max_dev_lun = min(8U, max_dev_lun);
2738 +
2739 + /*
2740 ++ * Stop scanning at 255 unless BLIST_SCSI3LUN
2741 ++ */
2742 ++ if (!(bflags & BLIST_SCSI3LUN))
2743 ++ max_dev_lun = min(256U, max_dev_lun);
2744 ++
2745 ++ /*
2746 + * We have already scanned LUN 0, so start at LUN 1. Keep scanning
2747 + * until we reach the max, or no LUN is found and we are not
2748 + * sparse_lun.
2749 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
2750 +index dbc024bd4adf..69d2a7060fde 100644
2751 +--- a/drivers/scsi/sd.c
2752 ++++ b/drivers/scsi/sd.c
2753 +@@ -2667,6 +2667,11 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
2754 +
2755 + static int sd_try_extended_inquiry(struct scsi_device *sdp)
2756 + {
2757 ++ /* Attempt VPD inquiry if the device blacklist explicitly calls
2758 ++ * for it.
2759 ++ */
2760 ++ if (sdp->try_vpd_pages)
2761 ++ return 1;
2762 + /*
2763 + * Although VPD inquiries can go to SCSI-2 type devices,
2764 + * some USB ones crash on receiving them, and the pages
2765 +diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
2766 +index 9969fa1ef7c4..ed0f899e8aa5 100644
2767 +--- a/drivers/scsi/storvsc_drv.c
2768 ++++ b/drivers/scsi/storvsc_drv.c
2769 +@@ -33,6 +33,7 @@
2770 + #include <linux/device.h>
2771 + #include <linux/hyperv.h>
2772 + #include <linux/mempool.h>
2773 ++#include <linux/blkdev.h>
2774 + #include <scsi/scsi.h>
2775 + #include <scsi/scsi_cmnd.h>
2776 + #include <scsi/scsi_host.h>
2777 +@@ -330,17 +331,17 @@ static int storvsc_timeout = 180;
2778 +
2779 + static void storvsc_on_channel_callback(void *context);
2780 +
2781 +-/*
2782 +- * In Hyper-V, each port/path/target maps to 1 scsi host adapter. In
2783 +- * reality, the path/target is not used (ie always set to 0) so our
2784 +- * scsi host adapter essentially has 1 bus with 1 target that contains
2785 +- * up to 256 luns.
2786 +- */
2787 +-#define STORVSC_MAX_LUNS_PER_TARGET 64
2788 +-#define STORVSC_MAX_TARGETS 1
2789 +-#define STORVSC_MAX_CHANNELS 1
2790 ++#define STORVSC_MAX_LUNS_PER_TARGET 255
2791 ++#define STORVSC_MAX_TARGETS 2
2792 ++#define STORVSC_MAX_CHANNELS 8
2793 +
2794 ++#define STORVSC_FC_MAX_LUNS_PER_TARGET 255
2795 ++#define STORVSC_FC_MAX_TARGETS 128
2796 ++#define STORVSC_FC_MAX_CHANNELS 8
2797 +
2798 ++#define STORVSC_IDE_MAX_LUNS_PER_TARGET 64
2799 ++#define STORVSC_IDE_MAX_TARGETS 1
2800 ++#define STORVSC_IDE_MAX_CHANNELS 1
2801 +
2802 + struct storvsc_cmd_request {
2803 + struct list_head entry;
2804 +@@ -1017,6 +1018,13 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
2805 + case ATA_12:
2806 + set_host_byte(scmnd, DID_PASSTHROUGH);
2807 + break;
2808 ++ /*
2809 ++ * On Some Windows hosts TEST_UNIT_READY command can return
2810 ++ * SRB_STATUS_ERROR, let the upper level code deal with it
2811 ++ * based on the sense information.
2812 ++ */
2813 ++ case TEST_UNIT_READY:
2814 ++ break;
2815 + default:
2816 + set_host_byte(scmnd, DID_TARGET_FAILURE);
2817 + }
2818 +@@ -1518,6 +1526,16 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
2819 + return SUCCESS;
2820 + }
2821 +
2822 ++/*
2823 ++ * The host guarantees to respond to each command, although I/O latencies might
2824 ++ * be unbounded on Azure. Reset the timer unconditionally to give the host a
2825 ++ * chance to perform EH.
2826 ++ */
2827 ++static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd)
2828 ++{
2829 ++ return BLK_EH_RESET_TIMER;
2830 ++}
2831 ++
2832 + static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
2833 + {
2834 + bool allowed = true;
2835 +@@ -1553,9 +1571,19 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
2836 + struct vmscsi_request *vm_srb;
2837 + struct stor_mem_pools *memp = scmnd->device->hostdata;
2838 +
2839 +- if (!storvsc_scsi_cmd_ok(scmnd)) {
2840 +- scmnd->scsi_done(scmnd);
2841 +- return 0;
2842 ++ if (vmstor_current_major <= VMSTOR_WIN8_MAJOR) {
2843 ++ /*
2844 ++ * On legacy hosts filter unimplemented commands.
2845 ++ * Future hosts are expected to correctly handle
2846 ++ * unsupported commands. Furthermore, it is
2847 ++ * possible that some of the currently
2848 ++ * unsupported commands maybe supported in
2849 ++ * future versions of the host.
2850 ++ */
2851 ++ if (!storvsc_scsi_cmd_ok(scmnd)) {
2852 ++ scmnd->scsi_done(scmnd);
2853 ++ return 0;
2854 ++ }
2855 + }
2856 +
2857 + request_size = sizeof(struct storvsc_cmd_request);
2858 +@@ -1580,26 +1608,24 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
2859 + vm_srb = &cmd_request->vstor_packet.vm_srb;
2860 + vm_srb->win8_extension.time_out_value = 60;
2861 +
2862 ++ vm_srb->win8_extension.srb_flags |=
2863 ++ (SRB_FLAGS_QUEUE_ACTION_ENABLE |
2864 ++ SRB_FLAGS_DISABLE_SYNCH_TRANSFER);
2865 +
2866 + /* Build the SRB */
2867 + switch (scmnd->sc_data_direction) {
2868 + case DMA_TO_DEVICE:
2869 + vm_srb->data_in = WRITE_TYPE;
2870 + vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_OUT;
2871 +- vm_srb->win8_extension.srb_flags |=
2872 +- (SRB_FLAGS_QUEUE_ACTION_ENABLE |
2873 +- SRB_FLAGS_DISABLE_SYNCH_TRANSFER);
2874 + break;
2875 + case DMA_FROM_DEVICE:
2876 + vm_srb->data_in = READ_TYPE;
2877 + vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_IN;
2878 +- vm_srb->win8_extension.srb_flags |=
2879 +- (SRB_FLAGS_QUEUE_ACTION_ENABLE |
2880 +- SRB_FLAGS_DISABLE_SYNCH_TRANSFER);
2881 + break;
2882 + default:
2883 + vm_srb->data_in = UNKNOWN_TYPE;
2884 +- vm_srb->win8_extension.srb_flags = 0;
2885 ++ vm_srb->win8_extension.srb_flags |= (SRB_FLAGS_DATA_IN |
2886 ++ SRB_FLAGS_DATA_OUT);
2887 + break;
2888 + }
2889 +
2890 +@@ -1687,11 +1713,11 @@ static struct scsi_host_template scsi_driver = {
2891 + .bios_param = storvsc_get_chs,
2892 + .queuecommand = storvsc_queuecommand,
2893 + .eh_host_reset_handler = storvsc_host_reset_handler,
2894 ++ .eh_timed_out = storvsc_eh_timed_out,
2895 + .slave_alloc = storvsc_device_alloc,
2896 + .slave_destroy = storvsc_device_destroy,
2897 + .slave_configure = storvsc_device_configure,
2898 +- .cmd_per_lun = 1,
2899 +- /* 64 max_queue * 1 target */
2900 ++ .cmd_per_lun = 255,
2901 + .can_queue = STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS,
2902 + .this_id = -1,
2903 + /* no use setting to 0 since ll_blk_rw reset it to 1 */
2904 +@@ -1743,19 +1769,25 @@ static int storvsc_probe(struct hv_device *device,
2905 + * set state to properly communicate with the host.
2906 + */
2907 +
2908 +- if (vmbus_proto_version == VERSION_WIN8) {
2909 +- sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE;
2910 +- vmscsi_size_delta = 0;
2911 +- vmstor_current_major = VMSTOR_WIN8_MAJOR;
2912 +- vmstor_current_minor = VMSTOR_WIN8_MINOR;
2913 +- } else {
2914 ++ switch (vmbus_proto_version) {
2915 ++ case VERSION_WS2008:
2916 ++ case VERSION_WIN7:
2917 + sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE;
2918 + vmscsi_size_delta = sizeof(struct vmscsi_win8_extension);
2919 + vmstor_current_major = VMSTOR_WIN7_MAJOR;
2920 + vmstor_current_minor = VMSTOR_WIN7_MINOR;
2921 ++ break;
2922 ++ default:
2923 ++ sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE;
2924 ++ vmscsi_size_delta = 0;
2925 ++ vmstor_current_major = VMSTOR_WIN8_MAJOR;
2926 ++ vmstor_current_minor = VMSTOR_WIN8_MINOR;
2927 ++ break;
2928 + }
2929 +
2930 +-
2931 ++ if (dev_id->driver_data == SFC_GUID)
2932 ++ scsi_driver.can_queue = (STORVSC_MAX_IO_REQUESTS *
2933 ++ STORVSC_FC_MAX_TARGETS);
2934 + host = scsi_host_alloc(&scsi_driver,
2935 + sizeof(struct hv_host_device));
2936 + if (!host)
2937 +@@ -1789,12 +1821,25 @@ static int storvsc_probe(struct hv_device *device,
2938 + host_dev->path = stor_device->path_id;
2939 + host_dev->target = stor_device->target_id;
2940 +
2941 +- /* max # of devices per target */
2942 +- host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
2943 +- /* max # of targets per channel */
2944 +- host->max_id = STORVSC_MAX_TARGETS;
2945 +- /* max # of channels */
2946 +- host->max_channel = STORVSC_MAX_CHANNELS - 1;
2947 ++ switch (dev_id->driver_data) {
2948 ++ case SFC_GUID:
2949 ++ host->max_lun = STORVSC_FC_MAX_LUNS_PER_TARGET;
2950 ++ host->max_id = STORVSC_FC_MAX_TARGETS;
2951 ++ host->max_channel = STORVSC_FC_MAX_CHANNELS - 1;
2952 ++ break;
2953 ++
2954 ++ case SCSI_GUID:
2955 ++ host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
2956 ++ host->max_id = STORVSC_MAX_TARGETS;
2957 ++ host->max_channel = STORVSC_MAX_CHANNELS - 1;
2958 ++ break;
2959 ++
2960 ++ default:
2961 ++ host->max_lun = STORVSC_IDE_MAX_LUNS_PER_TARGET;
2962 ++ host->max_id = STORVSC_IDE_MAX_TARGETS;
2963 ++ host->max_channel = STORVSC_IDE_MAX_CHANNELS - 1;
2964 ++ break;
2965 ++ }
2966 + /* max cmd length */
2967 + host->max_cmd_len = STORVSC_MAX_CMD_LEN;
2968 +
2969 +diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
2970 +index 1d1d321d90c4..72006e63d513 100644
2971 +--- a/drivers/spi/spi-orion.c
2972 ++++ b/drivers/spi/spi-orion.c
2973 +@@ -404,8 +404,6 @@ static int orion_spi_probe(struct platform_device *pdev)
2974 + struct resource *r;
2975 + unsigned long tclk_hz;
2976 + int status = 0;
2977 +- const u32 *iprop;
2978 +- int size;
2979 +
2980 + master = spi_alloc_master(&pdev->dev, sizeof *spi);
2981 + if (master == NULL) {
2982 +@@ -416,10 +414,10 @@ static int orion_spi_probe(struct platform_device *pdev)
2983 + if (pdev->id != -1)
2984 + master->bus_num = pdev->id;
2985 + if (pdev->dev.of_node) {
2986 +- iprop = of_get_property(pdev->dev.of_node, "cell-index",
2987 +- &size);
2988 +- if (iprop && size == sizeof(*iprop))
2989 +- master->bus_num = *iprop;
2990 ++ u32 cell_index;
2991 ++ if (!of_property_read_u32(pdev->dev.of_node, "cell-index",
2992 ++ &cell_index))
2993 ++ master->bus_num = cell_index;
2994 + }
2995 +
2996 + /* we support only mode 0, and no options */
2997 +diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
2998 +index 7b69e93d8448..fa28c75c6d04 100644
2999 +--- a/drivers/spi/spi-pxa2xx.c
3000 ++++ b/drivers/spi/spi-pxa2xx.c
3001 +@@ -1082,6 +1082,7 @@ static struct acpi_device_id pxa2xx_spi_acpi_match[] = {
3002 + { "INT3430", 0 },
3003 + { "INT3431", 0 },
3004 + { "80860F0E", 0 },
3005 ++ { "8086228E", 0 },
3006 + { },
3007 + };
3008 + MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
3009 +diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
3010 +index 85f692ddd992..d1eea2d426bd 100644
3011 +--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
3012 ++++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
3013 +@@ -53,9 +53,11 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
3014 + {USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */
3015 + /*=== Customer ID ===*/
3016 + /****** 8188EUS ********/
3017 ++ {USB_DEVICE(0x056e, 0x4008)}, /* Elecom WDC-150SU2M */
3018 + {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */
3019 + {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
3020 + {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
3021 ++ {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
3022 + {} /* Terminating entry */
3023 + };
3024 +
3025 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
3026 +index 557e8a9fe58a..721de375c543 100644
3027 +--- a/drivers/usb/core/hub.c
3028 ++++ b/drivers/usb/core/hub.c
3029 +@@ -1704,8 +1704,14 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
3030 + * - Change autosuspend delay of hub can avoid unnecessary auto
3031 + * suspend timer for hub, also may decrease power consumption
3032 + * of USB bus.
3033 ++ *
3034 ++ * - If user has indicated to prevent autosuspend by passing
3035 ++ * usbcore.autosuspend = -1 then keep autosuspend disabled.
3036 + */
3037 +- pm_runtime_set_autosuspend_delay(&hdev->dev, 0);
3038 ++#ifdef CONFIG_PM_RUNTIME
3039 ++ if (hdev->dev.power.autosuspend_delay >= 0)
3040 ++ pm_runtime_set_autosuspend_delay(&hdev->dev, 0);
3041 ++#endif
3042 +
3043 + /*
3044 + * Hubs have proper suspend/resume support, except for root hubs
3045 +diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
3046 +index 1bb85bee2625..7ba861543d03 100644
3047 +--- a/drivers/usb/host/ehci-hub.c
3048 ++++ b/drivers/usb/host/ehci-hub.c
3049 +@@ -1241,7 +1241,7 @@ static int ehci_hub_control (
3050 + if (selector == EHSET_TEST_SINGLE_STEP_SET_FEATURE) {
3051 + spin_unlock_irqrestore(&ehci->lock, flags);
3052 + retval = ehset_single_step_set_feature(hcd,
3053 +- wIndex);
3054 ++ wIndex + 1);
3055 + spin_lock_irqsave(&ehci->lock, flags);
3056 + break;
3057 + }
3058 +diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c
3059 +index cc9dd9e4f05e..45f87735b449 100644
3060 +--- a/drivers/usb/host/ohci-spear.c
3061 ++++ b/drivers/usb/host/ohci-spear.c
3062 +@@ -53,7 +53,7 @@ static int ohci_spear_start(struct usb_hcd *hcd)
3063 + create_debug_files(ohci);
3064 +
3065 + #ifdef DEBUG
3066 +- ohci_dump(ohci, 1);
3067 ++ ohci_dump(ohci);
3068 + #endif
3069 + return 0;
3070 + }
3071 +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
3072 +index f34b42e4c391..1cfe0c743092 100644
3073 +--- a/drivers/usb/host/xhci-pci.c
3074 ++++ b/drivers/usb/host/xhci-pci.c
3075 +@@ -101,6 +101,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
3076 + /* AMD PLL quirk */
3077 + if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
3078 + xhci->quirks |= XHCI_AMD_PLL_FIX;
3079 ++
3080 ++ if (pdev->vendor == PCI_VENDOR_ID_AMD)
3081 ++ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
3082 ++
3083 + if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
3084 + xhci->quirks |= XHCI_LPM_SUPPORT;
3085 + xhci->quirks |= XHCI_INTEL_HOST;
3086 +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
3087 +index 6118e292d5df..46ad9f3f589d 100644
3088 +--- a/drivers/usb/host/xhci-ring.c
3089 ++++ b/drivers/usb/host/xhci-ring.c
3090 +@@ -2579,7 +2579,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
3091 + * last TRB of the previous TD. The command completion handle
3092 + * will take care the rest.
3093 + */
3094 +- if (!event_seg && trb_comp_code == COMP_STOP_INVAL) {
3095 ++ if (!event_seg && (trb_comp_code == COMP_STOP ||
3096 ++ trb_comp_code == COMP_STOP_INVAL)) {
3097 + ret = 0;
3098 + goto cleanup;
3099 + }
3100 +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
3101 +index bac979402ce3..bb68ed5cd3bc 100644
3102 +--- a/drivers/usb/serial/ftdi_sio.c
3103 ++++ b/drivers/usb/serial/ftdi_sio.c
3104 +@@ -152,6 +152,7 @@ static struct usb_device_id id_table_combined [] = {
3105 + { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) },
3106 + { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
3107 + { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
3108 ++ { USB_DEVICE(FTDI_VID, FTDI_BM_ATOM_NANO_PID) },
3109 + { USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) },
3110 + { USB_DEVICE(FTDI_VID, FTDI_EV3CON_PID) },
3111 + { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) },
3112 +@@ -948,6 +949,8 @@ static struct usb_device_id id_table_combined [] = {
3113 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) },
3114 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) },
3115 + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) },
3116 ++ /* ekey Devices */
3117 ++ { USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) },
3118 + /* Infineon Devices */
3119 + { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
3120 + { } /* Terminating entry */
3121 +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
3122 +index 1e58d90a0b6c..70b0b1d88ae9 100644
3123 +--- a/drivers/usb/serial/ftdi_sio_ids.h
3124 ++++ b/drivers/usb/serial/ftdi_sio_ids.h
3125 +@@ -42,6 +42,8 @@
3126 + /* www.candapter.com Ewert Energy Systems CANdapter device */
3127 + #define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */
3128 +
3129 ++#define FTDI_BM_ATOM_NANO_PID 0xa559 /* Basic Micro ATOM Nano USB2Serial */
3130 ++
3131 + /*
3132 + * Texas Instruments XDS100v2 JTAG / BeagleBone A3
3133 + * http://processors.wiki.ti.com/index.php/XDS100
3134 +@@ -1378,3 +1380,8 @@
3135 + #define BRAINBOXES_US_160_6_PID 0x9006 /* US-160 16xRS232 1Mbaud Port 11 and 12 */
3136 + #define BRAINBOXES_US_160_7_PID 0x9007 /* US-160 16xRS232 1Mbaud Port 13 and 14 */
3137 + #define BRAINBOXES_US_160_8_PID 0x9008 /* US-160 16xRS232 1Mbaud Port 15 and 16 */
3138 ++
3139 ++/*
3140 ++ * ekey biometric systems GmbH (http://ekey.net/)
3141 ++ */
3142 ++#define FTDI_EKEY_CONV_USB_PID 0xCB08 /* Converter USB */
3143 +diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
3144 +index 36a7740e827c..cc5a430dc357 100644
3145 +--- a/drivers/usb/serial/whiteheat.c
3146 ++++ b/drivers/usb/serial/whiteheat.c
3147 +@@ -521,6 +521,10 @@ static void command_port_read_callback(struct urb *urb)
3148 + dev_dbg(&urb->dev->dev, "%s - command_info is NULL, exiting.\n", __func__);
3149 + return;
3150 + }
3151 ++ if (!urb->actual_length) {
3152 ++ dev_dbg(&urb->dev->dev, "%s - empty response, exiting.\n", __func__);
3153 ++ return;
3154 ++ }
3155 + if (status) {
3156 + dev_dbg(&urb->dev->dev, "%s - nonzero urb status: %d\n", __func__, status);
3157 + if (status != -ENOENT)
3158 +@@ -541,7 +545,8 @@ static void command_port_read_callback(struct urb *urb)
3159 + /* These are unsolicited reports from the firmware, hence no
3160 + waiting command to wakeup */
3161 + dev_dbg(&urb->dev->dev, "%s - event received\n", __func__);
3162 +- } else if (data[0] == WHITEHEAT_GET_DTR_RTS) {
3163 ++ } else if ((data[0] == WHITEHEAT_GET_DTR_RTS) &&
3164 ++ (urb->actual_length - 1 <= sizeof(command_info->result_buffer))) {
3165 + memcpy(command_info->result_buffer, &data[1],
3166 + urb->actual_length - 1);
3167 + command_info->command_finished = WHITEHEAT_CMD_COMPLETE;
3168 +diff --git a/fs/aio.c b/fs/aio.c
3169 +index 6d68e01dc7ca..b732a9c32042 100644
3170 +--- a/fs/aio.c
3171 ++++ b/fs/aio.c
3172 +@@ -1065,6 +1065,12 @@ static long aio_read_events_ring(struct kioctx *ctx,
3173 + tail = ring->tail;
3174 + kunmap_atomic(ring);
3175 +
3176 ++ /*
3177 ++ * Ensure that once we've read the current tail pointer, that
3178 ++ * we also see the events that were stored up to the tail.
3179 ++ */
3180 ++ smp_rmb();
3181 ++
3182 + pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
3183 +
3184 + if (head == tail)
3185 +diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
3186 +index 1f4ce7ac144d..53039de1495d 100644
3187 +--- a/fs/btrfs/backref.c
3188 ++++ b/fs/btrfs/backref.c
3189 +@@ -263,9 +263,8 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
3190 + }
3191 + if (ret > 0)
3192 + goto next;
3193 +- ret = ulist_add_merge(parents, eb->start,
3194 +- (uintptr_t)eie,
3195 +- (u64 *)&old, GFP_NOFS);
3196 ++ ret = ulist_add_merge_ptr(parents, eb->start,
3197 ++ eie, (void **)&old, GFP_NOFS);
3198 + if (ret < 0)
3199 + break;
3200 + if (!ret && extent_item_pos) {
3201 +@@ -955,16 +954,19 @@ again:
3202 + ret = -EIO;
3203 + goto out;
3204 + }
3205 ++ btrfs_tree_read_lock(eb);
3206 ++ btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
3207 + ret = find_extent_in_eb(eb, bytenr,
3208 + *extent_item_pos, &eie);
3209 ++ btrfs_tree_read_unlock_blocking(eb);
3210 + free_extent_buffer(eb);
3211 + if (ret < 0)
3212 + goto out;
3213 + ref->inode_list = eie;
3214 + }
3215 +- ret = ulist_add_merge(refs, ref->parent,
3216 +- (uintptr_t)ref->inode_list,
3217 +- (u64 *)&eie, GFP_NOFS);
3218 ++ ret = ulist_add_merge_ptr(refs, ref->parent,
3219 ++ ref->inode_list,
3220 ++ (void **)&eie, GFP_NOFS);
3221 + if (ret < 0)
3222 + goto out;
3223 + if (!ret && extent_item_pos) {
3224 +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
3225 +index b395791dd923..594bbfd4996e 100644
3226 +--- a/fs/btrfs/extent_io.c
3227 ++++ b/fs/btrfs/extent_io.c
3228 +@@ -2485,6 +2485,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
3229 + test_bit(BIO_UPTODATE, &bio->bi_flags);
3230 + if (err)
3231 + uptodate = 0;
3232 ++ offset += len;
3233 + continue;
3234 + }
3235 + }
3236 +diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
3237 +index 4f53159bdb9d..d4731e9808ea 100644
3238 +--- a/fs/btrfs/file-item.c
3239 ++++ b/fs/btrfs/file-item.c
3240 +@@ -752,7 +752,7 @@ again:
3241 + found_next = 1;
3242 + if (ret != 0)
3243 + goto insert;
3244 +- slot = 0;
3245 ++ slot = path->slots[0];
3246 + }
3247 + btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
3248 + if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
3249 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
3250 +index fa8010c1b628..7e6758d075ad 100644
3251 +--- a/fs/btrfs/inode.c
3252 ++++ b/fs/btrfs/inode.c
3253 +@@ -683,6 +683,18 @@ retry:
3254 + unlock_extent(io_tree, async_extent->start,
3255 + async_extent->start +
3256 + async_extent->ram_size - 1);
3257 ++
3258 ++ /*
3259 ++ * we need to redirty the pages if we decide to
3260 ++ * fallback to uncompressed IO, otherwise we
3261 ++ * will not submit these pages down to lower
3262 ++ * layers.
3263 ++ */
3264 ++ extent_range_redirty_for_io(inode,
3265 ++ async_extent->start,
3266 ++ async_extent->start +
3267 ++ async_extent->ram_size - 1);
3268 ++
3269 + goto retry;
3270 + }
3271 + goto out_free;
3272 +diff --git a/fs/btrfs/ulist.h b/fs/btrfs/ulist.h
3273 +index fb36731074b5..3e62b57be6b5 100644
3274 +--- a/fs/btrfs/ulist.h
3275 ++++ b/fs/btrfs/ulist.h
3276 +@@ -74,6 +74,21 @@ void ulist_free(struct ulist *ulist);
3277 + int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask);
3278 + int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
3279 + u64 *old_aux, gfp_t gfp_mask);
3280 ++
3281 ++/* just like ulist_add_merge() but take a pointer for the aux data */
3282 ++static inline int ulist_add_merge_ptr(struct ulist *ulist, u64 val, void *aux,
3283 ++ void **old_aux, gfp_t gfp_mask)
3284 ++{
3285 ++#if BITS_PER_LONG == 32
3286 ++ u64 old64 = (uintptr_t)*old_aux;
3287 ++ int ret = ulist_add_merge(ulist, val, (uintptr_t)aux, &old64, gfp_mask);
3288 ++ *old_aux = (void *)((uintptr_t)old64);
3289 ++ return ret;
3290 ++#else
3291 ++ return ulist_add_merge(ulist, val, (u64)aux, (u64 *)old_aux, gfp_mask);
3292 ++#endif
3293 ++}
3294 ++
3295 + struct ulist_node *ulist_next(struct ulist *ulist,
3296 + struct ulist_iterator *uiter);
3297 +
3298 +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
3299 +index 465b65488b27..d13f77ea0034 100644
3300 +--- a/fs/cifs/cifsglob.h
3301 ++++ b/fs/cifs/cifsglob.h
3302 +@@ -70,11 +70,6 @@
3303 + #define SERVER_NAME_LENGTH 40
3304 + #define SERVER_NAME_LEN_WITH_NULL (SERVER_NAME_LENGTH + 1)
3305 +
3306 +-/* used to define string lengths for reversing unicode strings */
3307 +-/* (256+1)*2 = 514 */
3308 +-/* (max path length + 1 for null) * 2 for unicode */
3309 +-#define MAX_NAME 514
3310 +-
3311 + /* SMB echo "timeout" -- FIXME: tunable? */
3312 + #define SMB_ECHO_INTERVAL (60 * HZ)
3313 +
3314 +diff --git a/fs/cifs/file.c b/fs/cifs/file.c
3315 +index 643a18491bed..892a1e947b5a 100644
3316 +--- a/fs/cifs/file.c
3317 ++++ b/fs/cifs/file.c
3318 +@@ -2847,7 +2847,7 @@ cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
3319 + total_read += result;
3320 + }
3321 +
3322 +- return total_read > 0 ? total_read : result;
3323 ++ return total_read > 0 && result != -EAGAIN ? total_read : result;
3324 + }
3325 +
3326 + static ssize_t
3327 +@@ -3270,7 +3270,7 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3328 + total_read += result;
3329 + }
3330 +
3331 +- return total_read > 0 ? total_read : result;
3332 ++ return total_read > 0 && result != -EAGAIN ? total_read : result;
3333 + }
3334 +
3335 + static int cifs_readpages(struct file *file, struct address_space *mapping,
3336 +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
3337 +index 5f8bdff3a758..2a93255c0150 100644
3338 +--- a/fs/cifs/inode.c
3339 ++++ b/fs/cifs/inode.c
3340 +@@ -1682,13 +1682,22 @@ cifs_rename(struct inode *source_dir, struct dentry *source_dentry,
3341 + unlink_target:
3342 + /* Try unlinking the target dentry if it's not negative */
3343 + if (target_dentry->d_inode && (rc == -EACCES || rc == -EEXIST)) {
3344 +- tmprc = cifs_unlink(target_dir, target_dentry);
3345 ++ if (S_ISDIR(target_dentry->d_inode->i_mode))
3346 ++ tmprc = cifs_rmdir(target_dir, target_dentry);
3347 ++ else
3348 ++ tmprc = cifs_unlink(target_dir, target_dentry);
3349 + if (tmprc)
3350 + goto cifs_rename_exit;
3351 + rc = cifs_do_rename(xid, source_dentry, from_name,
3352 + target_dentry, to_name);
3353 + }
3354 +
3355 ++ /* force revalidate to go get info when needed */
3356 ++ CIFS_I(source_dir)->time = CIFS_I(target_dir)->time = 0;
3357 ++
3358 ++ source_dir->i_ctime = source_dir->i_mtime = target_dir->i_ctime =
3359 ++ target_dir->i_mtime = current_fs_time(source_dir->i_sb);
3360 ++
3361 + cifs_rename_exit:
3362 + kfree(info_buf_source);
3363 + kfree(from_name);
3364 +diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
3365 +index 5940ecabbe6a..59edb8fd33aa 100644
3366 +--- a/fs/cifs/readdir.c
3367 ++++ b/fs/cifs/readdir.c
3368 +@@ -596,8 +596,8 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
3369 + if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
3370 + cfile->invalidHandle = true;
3371 + spin_unlock(&cifs_file_list_lock);
3372 +- if (server->ops->close)
3373 +- server->ops->close(xid, tcon, &cfile->fid);
3374 ++ if (server->ops->close_dir)
3375 ++ server->ops->close_dir(xid, tcon, &cfile->fid);
3376 + } else
3377 + spin_unlock(&cifs_file_list_lock);
3378 + if (cfile->srch_inf.ntwrk_buf_start) {
3379 +diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
3380 +index 3f17b4550831..45992944e238 100644
3381 +--- a/fs/cifs/smb2file.c
3382 ++++ b/fs/cifs/smb2file.c
3383 +@@ -50,7 +50,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
3384 + goto out;
3385 + }
3386 +
3387 +- smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
3388 ++ smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
3389 + GFP_KERNEL);
3390 + if (smb2_data == NULL) {
3391 + rc = -ENOMEM;
3392 +diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
3393 +index 84c012a6aba0..215f8d3e3e53 100644
3394 +--- a/fs/cifs/smb2inode.c
3395 ++++ b/fs/cifs/smb2inode.c
3396 +@@ -131,7 +131,7 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
3397 + *adjust_tz = false;
3398 + *symlink = false;
3399 +
3400 +- smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
3401 ++ smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
3402 + GFP_KERNEL);
3403 + if (smb2_data == NULL)
3404 + return -ENOMEM;
3405 +diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
3406 +index 7c2f45c06fc2..824696fb24db 100644
3407 +--- a/fs/cifs/smb2maperror.c
3408 ++++ b/fs/cifs/smb2maperror.c
3409 +@@ -605,7 +605,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
3410 + {STATUS_MAPPED_FILE_SIZE_ZERO, -EIO, "STATUS_MAPPED_FILE_SIZE_ZERO"},
3411 + {STATUS_TOO_MANY_OPENED_FILES, -EMFILE, "STATUS_TOO_MANY_OPENED_FILES"},
3412 + {STATUS_CANCELLED, -EIO, "STATUS_CANCELLED"},
3413 +- {STATUS_CANNOT_DELETE, -EIO, "STATUS_CANNOT_DELETE"},
3414 ++ {STATUS_CANNOT_DELETE, -EACCES, "STATUS_CANNOT_DELETE"},
3415 + {STATUS_INVALID_COMPUTER_NAME, -EIO, "STATUS_INVALID_COMPUTER_NAME"},
3416 + {STATUS_FILE_DELETED, -EIO, "STATUS_FILE_DELETED"},
3417 + {STATUS_SPECIAL_ACCOUNT, -EIO, "STATUS_SPECIAL_ACCOUNT"},
3418 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
3419 +index 4ac88f89a5e5..8956cf67299b 100644
3420 +--- a/fs/cifs/smb2ops.c
3421 ++++ b/fs/cifs/smb2ops.c
3422 +@@ -251,7 +251,7 @@ smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
3423 + int rc;
3424 + struct smb2_file_all_info *smb2_data;
3425 +
3426 +- smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
3427 ++ smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
3428 + GFP_KERNEL);
3429 + if (smb2_data == NULL)
3430 + return -ENOMEM;
3431 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
3432 +index 829ad35f98d4..fb0c67372a90 100644
3433 +--- a/fs/cifs/smb2pdu.c
3434 ++++ b/fs/cifs/smb2pdu.c
3435 +@@ -912,7 +912,8 @@ tcon_exit:
3436 + tcon_error_exit:
3437 + if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
3438 + cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
3439 +- tcon->bad_network_name = true;
3440 ++ if (tcon)
3441 ++ tcon->bad_network_name = true;
3442 + }
3443 + goto tcon_exit;
3444 + }
3445 +@@ -1488,7 +1489,7 @@ SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
3446 + {
3447 + return query_info(xid, tcon, persistent_fid, volatile_fid,
3448 + FILE_ALL_INFORMATION,
3449 +- sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
3450 ++ sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
3451 + sizeof(struct smb2_file_all_info), data);
3452 + }
3453 +
3454 +diff --git a/fs/dcache.c b/fs/dcache.c
3455 +index 8ef74f3d8fe5..87b70fe7eccc 100644
3456 +--- a/fs/dcache.c
3457 ++++ b/fs/dcache.c
3458 +@@ -125,8 +125,6 @@ static inline void done_seqretry(seqlock_t *lock, int seq)
3459 + * This hash-function tries to avoid losing too many bits of hash
3460 + * information, yet avoid using a prime hash-size or similar.
3461 + */
3462 +-#define D_HASHBITS d_hash_shift
3463 +-#define D_HASHMASK d_hash_mask
3464 +
3465 + static unsigned int d_hash_mask __read_mostly;
3466 + static unsigned int d_hash_shift __read_mostly;
3467 +@@ -137,8 +135,7 @@ static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
3468 + unsigned int hash)
3469 + {
3470 + hash += (unsigned long) parent / L1_CACHE_BYTES;
3471 +- hash = hash + (hash >> D_HASHBITS);
3472 +- return dentry_hashtable + (hash & D_HASHMASK);
3473 ++ return dentry_hashtable + hash_32(hash, d_hash_shift);
3474 + }
3475 +
3476 + /* Statistics gathering. */
3477 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
3478 +index e5d9908c0bc3..d65a6260ad61 100644
3479 +--- a/fs/ext4/inode.c
3480 ++++ b/fs/ext4/inode.c
3481 +@@ -2192,6 +2192,7 @@ static int mpage_map_and_submit_extent(handle_t *handle,
3482 + struct ext4_map_blocks *map = &mpd->map;
3483 + int err;
3484 + loff_t disksize;
3485 ++ int progress = 0;
3486 +
3487 + mpd->io_submit.io_end->offset =
3488 + ((loff_t)map->m_lblk) << inode->i_blkbits;
3489 +@@ -2208,8 +2209,11 @@ static int mpage_map_and_submit_extent(handle_t *handle,
3490 + * is non-zero, a commit should free up blocks.
3491 + */
3492 + if ((err == -ENOMEM) ||
3493 +- (err == -ENOSPC && ext4_count_free_clusters(sb)))
3494 ++ (err == -ENOSPC && ext4_count_free_clusters(sb))) {
3495 ++ if (progress)
3496 ++ goto update_disksize;
3497 + return err;
3498 ++ }
3499 + ext4_msg(sb, KERN_CRIT,
3500 + "Delayed block allocation failed for "
3501 + "inode %lu at logical offset %llu with"
3502 +@@ -2226,15 +2230,17 @@ static int mpage_map_and_submit_extent(handle_t *handle,
3503 + *give_up_on_write = true;
3504 + return err;
3505 + }
3506 ++ progress = 1;
3507 + /*
3508 + * Update buffer state, submit mapped pages, and get us new
3509 + * extent to map
3510 + */
3511 + err = mpage_map_and_submit_buffers(mpd);
3512 + if (err < 0)
3513 +- return err;
3514 ++ goto update_disksize;
3515 + } while (map->m_len);
3516 +
3517 ++update_disksize:
3518 + /*
3519 + * Update on-disk size after IO is submitted. Races with
3520 + * truncate are avoided by checking i_size under i_data_sem.
3521 +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
3522 +index 795d5afc1479..242226a87be7 100644
3523 +--- a/fs/ext4/mballoc.c
3524 ++++ b/fs/ext4/mballoc.c
3525 +@@ -1398,6 +1398,8 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
3526 + int last = first + count - 1;
3527 + struct super_block *sb = e4b->bd_sb;
3528 +
3529 ++ if (WARN_ON(count == 0))
3530 ++ return;
3531 + BUG_ON(last >= (sb->s_blocksize << 3));
3532 + assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
3533 + /* Don't bother if the block group is corrupt. */
3534 +@@ -3200,6 +3202,8 @@ static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
3535 + int err;
3536 +
3537 + if (pa == NULL) {
3538 ++ if (ac->ac_f_ex.fe_len == 0)
3539 ++ return;
3540 + err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
3541 + if (err) {
3542 + /*
3543 +@@ -3214,6 +3218,7 @@ static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
3544 + mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
3545 + ac->ac_f_ex.fe_len);
3546 + ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
3547 ++ ext4_mb_unload_buddy(&e4b);
3548 + return;
3549 + }
3550 + if (pa->pa_type == MB_INODE_PA)
3551 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
3552 +index 9afc4ba21611..b52a34bc7600 100644
3553 +--- a/fs/ext4/super.c
3554 ++++ b/fs/ext4/super.c
3555 +@@ -3141,9 +3141,9 @@ static int set_journal_csum_feature_set(struct super_block *sb)
3556 +
3557 + if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3558 + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
3559 +- /* journal checksum v2 */
3560 ++ /* journal checksum v3 */
3561 + compat = 0;
3562 +- incompat = JBD2_FEATURE_INCOMPAT_CSUM_V2;
3563 ++ incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
3564 + } else {
3565 + /* journal checksum v1 */
3566 + compat = JBD2_FEATURE_COMPAT_CHECKSUM;
3567 +@@ -3165,6 +3165,7 @@ static int set_journal_csum_feature_set(struct super_block *sb)
3568 + jbd2_journal_clear_features(sbi->s_journal,
3569 + JBD2_FEATURE_COMPAT_CHECKSUM, 0,
3570 + JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
3571 ++ JBD2_FEATURE_INCOMPAT_CSUM_V3 |
3572 + JBD2_FEATURE_INCOMPAT_CSUM_V2);
3573 + }
3574 +
3575 +diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
3576 +index cf2fc0594063..9181c2b22b3c 100644
3577 +--- a/fs/jbd2/commit.c
3578 ++++ b/fs/jbd2/commit.c
3579 +@@ -97,7 +97,7 @@ static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh)
3580 + struct commit_header *h;
3581 + __u32 csum;
3582 +
3583 +- if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
3584 ++ if (!jbd2_journal_has_csum_v2or3(j))
3585 + return;
3586 +
3587 + h = (struct commit_header *)(bh->b_data);
3588 +@@ -313,11 +313,11 @@ static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
3589 + return checksum;
3590 + }
3591 +
3592 +-static void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
3593 ++static void write_tag_block(journal_t *j, journal_block_tag_t *tag,
3594 + unsigned long long block)
3595 + {
3596 + tag->t_blocknr = cpu_to_be32(block & (u32)~0);
3597 +- if (tag_bytes > JBD2_TAG_SIZE32)
3598 ++ if (JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_64BIT))
3599 + tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
3600 + }
3601 +
3602 +@@ -327,7 +327,7 @@ static void jbd2_descr_block_csum_set(journal_t *j,
3603 + struct jbd2_journal_block_tail *tail;
3604 + __u32 csum;
3605 +
3606 +- if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
3607 ++ if (!jbd2_journal_has_csum_v2or3(j))
3608 + return;
3609 +
3610 + tail = (struct jbd2_journal_block_tail *)(bh->b_data + j->j_blocksize -
3611 +@@ -340,12 +340,13 @@ static void jbd2_descr_block_csum_set(journal_t *j,
3612 + static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
3613 + struct buffer_head *bh, __u32 sequence)
3614 + {
3615 ++ journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
3616 + struct page *page = bh->b_page;
3617 + __u8 *addr;
3618 + __u32 csum32;
3619 + __be32 seq;
3620 +
3621 +- if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
3622 ++ if (!jbd2_journal_has_csum_v2or3(j))
3623 + return;
3624 +
3625 + seq = cpu_to_be32(sequence);
3626 +@@ -355,8 +356,10 @@ static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
3627 + bh->b_size);
3628 + kunmap_atomic(addr);
3629 +
3630 +- /* We only have space to store the lower 16 bits of the crc32c. */
3631 +- tag->t_checksum = cpu_to_be16(csum32);
3632 ++ if (JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V3))
3633 ++ tag3->t_checksum = cpu_to_be32(csum32);
3634 ++ else
3635 ++ tag->t_checksum = cpu_to_be16(csum32);
3636 + }
3637 + /*
3638 + * jbd2_journal_commit_transaction
3639 +@@ -396,7 +399,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
3640 + LIST_HEAD(io_bufs);
3641 + LIST_HEAD(log_bufs);
3642 +
3643 +- if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
3644 ++ if (jbd2_journal_has_csum_v2or3(journal))
3645 + csum_size = sizeof(struct jbd2_journal_block_tail);
3646 +
3647 + /*
3648 +@@ -692,7 +695,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
3649 + tag_flag |= JBD2_FLAG_SAME_UUID;
3650 +
3651 + tag = (journal_block_tag_t *) tagp;
3652 +- write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr);
3653 ++ write_tag_block(journal, tag, jh2bh(jh)->b_blocknr);
3654 + tag->t_flags = cpu_to_be16(tag_flag);
3655 + jbd2_block_tag_csum_set(journal, tag, wbuf[bufs],
3656 + commit_transaction->t_tid);
3657 +diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
3658 +index 52032647dd4a..e72faacaf578 100644
3659 +--- a/fs/jbd2/journal.c
3660 ++++ b/fs/jbd2/journal.c
3661 +@@ -124,7 +124,7 @@ EXPORT_SYMBOL(__jbd2_debug);
3662 + /* Checksumming functions */
3663 + int jbd2_verify_csum_type(journal_t *j, journal_superblock_t *sb)
3664 + {
3665 +- if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
3666 ++ if (!jbd2_journal_has_csum_v2or3(j))
3667 + return 1;
3668 +
3669 + return sb->s_checksum_type == JBD2_CRC32C_CHKSUM;
3670 +@@ -145,7 +145,7 @@ static __be32 jbd2_superblock_csum(journal_t *j, journal_superblock_t *sb)
3671 +
3672 + int jbd2_superblock_csum_verify(journal_t *j, journal_superblock_t *sb)
3673 + {
3674 +- if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
3675 ++ if (!jbd2_journal_has_csum_v2or3(j))
3676 + return 1;
3677 +
3678 + return sb->s_checksum == jbd2_superblock_csum(j, sb);
3679 +@@ -153,7 +153,7 @@ int jbd2_superblock_csum_verify(journal_t *j, journal_superblock_t *sb)
3680 +
3681 + void jbd2_superblock_csum_set(journal_t *j, journal_superblock_t *sb)
3682 + {
3683 +- if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
3684 ++ if (!jbd2_journal_has_csum_v2or3(j))
3685 + return;
3686 +
3687 + sb->s_checksum = jbd2_superblock_csum(j, sb);
3688 +@@ -1524,21 +1524,29 @@ static int journal_get_superblock(journal_t *journal)
3689 + goto out;
3690 + }
3691 +
3692 +- if (JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM) &&
3693 +- JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
3694 ++ if (jbd2_journal_has_csum_v2or3(journal) &&
3695 ++ JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM)) {
3696 + /* Can't have checksum v1 and v2 on at the same time! */
3697 + printk(KERN_ERR "JBD: Can't enable checksumming v1 and v2 "
3698 + "at the same time!\n");
3699 + goto out;
3700 + }
3701 +
3702 ++ if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2) &&
3703 ++ JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3)) {
3704 ++ /* Can't have checksum v2 and v3 at the same time! */
3705 ++ printk(KERN_ERR "JBD: Can't enable checksumming v2 and v3 "
3706 ++ "at the same time!\n");
3707 ++ goto out;
3708 ++ }
3709 ++
3710 + if (!jbd2_verify_csum_type(journal, sb)) {
3711 + printk(KERN_ERR "JBD: Unknown checksum type\n");
3712 + goto out;
3713 + }
3714 +
3715 + /* Load the checksum driver */
3716 +- if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
3717 ++ if (jbd2_journal_has_csum_v2or3(journal)) {
3718 + journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
3719 + if (IS_ERR(journal->j_chksum_driver)) {
3720 + printk(KERN_ERR "JBD: Cannot load crc32c driver.\n");
3721 +@@ -1555,7 +1563,7 @@ static int journal_get_superblock(journal_t *journal)
3722 + }
3723 +
3724 + /* Precompute checksum seed for all metadata */
3725 +- if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
3726 ++ if (jbd2_journal_has_csum_v2or3(journal))
3727 + journal->j_csum_seed = jbd2_chksum(journal, ~0, sb->s_uuid,
3728 + sizeof(sb->s_uuid));
3729 +
3730 +@@ -1815,8 +1823,14 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
3731 + if (!jbd2_journal_check_available_features(journal, compat, ro, incompat))
3732 + return 0;
3733 +
3734 +- /* Asking for checksumming v2 and v1? Only give them v2. */
3735 +- if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V2 &&
3736 ++ /* If enabling v2 checksums, turn on v3 instead */
3737 ++ if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V2) {
3738 ++ incompat &= ~JBD2_FEATURE_INCOMPAT_CSUM_V2;
3739 ++ incompat |= JBD2_FEATURE_INCOMPAT_CSUM_V3;
3740 ++ }
3741 ++
3742 ++ /* Asking for checksumming v3 and v1? Only give them v3. */
3743 ++ if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V3 &&
3744 + compat & JBD2_FEATURE_COMPAT_CHECKSUM)
3745 + compat &= ~JBD2_FEATURE_COMPAT_CHECKSUM;
3746 +
3747 +@@ -1825,8 +1839,8 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
3748 +
3749 + sb = journal->j_superblock;
3750 +
3751 +- /* If enabling v2 checksums, update superblock */
3752 +- if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
3753 ++ /* If enabling v3 checksums, update superblock */
3754 ++ if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V3)) {
3755 + sb->s_checksum_type = JBD2_CRC32C_CHKSUM;
3756 + sb->s_feature_compat &=
3757 + ~cpu_to_be32(JBD2_FEATURE_COMPAT_CHECKSUM);
3758 +@@ -1844,8 +1858,7 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
3759 + }
3760 +
3761 + /* Precompute checksum seed for all metadata */
3762 +- if (JBD2_HAS_INCOMPAT_FEATURE(journal,
3763 +- JBD2_FEATURE_INCOMPAT_CSUM_V2))
3764 ++ if (jbd2_journal_has_csum_v2or3(journal))
3765 + journal->j_csum_seed = jbd2_chksum(journal, ~0,
3766 + sb->s_uuid,
3767 + sizeof(sb->s_uuid));
3768 +@@ -1854,7 +1867,8 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
3769 + /* If enabling v1 checksums, downgrade superblock */
3770 + if (COMPAT_FEATURE_ON(JBD2_FEATURE_COMPAT_CHECKSUM))
3771 + sb->s_feature_incompat &=
3772 +- ~cpu_to_be32(JBD2_FEATURE_INCOMPAT_CSUM_V2);
3773 ++ ~cpu_to_be32(JBD2_FEATURE_INCOMPAT_CSUM_V2 |
3774 ++ JBD2_FEATURE_INCOMPAT_CSUM_V3);
3775 +
3776 + sb->s_feature_compat |= cpu_to_be32(compat);
3777 + sb->s_feature_ro_compat |= cpu_to_be32(ro);
3778 +@@ -2167,16 +2181,20 @@ int jbd2_journal_blocks_per_page(struct inode *inode)
3779 + */
3780 + size_t journal_tag_bytes(journal_t *journal)
3781 + {
3782 +- journal_block_tag_t tag;
3783 +- size_t x = 0;
3784 ++ size_t sz;
3785 ++
3786 ++ if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3))
3787 ++ return sizeof(journal_block_tag3_t);
3788 ++
3789 ++ sz = sizeof(journal_block_tag_t);
3790 +
3791 + if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
3792 +- x += sizeof(tag.t_checksum);
3793 ++ sz += sizeof(__u16);
3794 +
3795 + if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
3796 +- return x + JBD2_TAG_SIZE64;
3797 ++ return sz;
3798 + else
3799 +- return x + JBD2_TAG_SIZE32;
3800 ++ return sz - sizeof(__u32);
3801 + }
3802 +
3803 + /*
3804 +diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
3805 +index 3929c50428b1..20dbfabbf874 100644
3806 +--- a/fs/jbd2/recovery.c
3807 ++++ b/fs/jbd2/recovery.c
3808 +@@ -181,7 +181,7 @@ static int jbd2_descr_block_csum_verify(journal_t *j,
3809 + __be32 provided;
3810 + __u32 calculated;
3811 +
3812 +- if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
3813 ++ if (!jbd2_journal_has_csum_v2or3(j))
3814 + return 1;
3815 +
3816 + tail = (struct jbd2_journal_block_tail *)(buf + j->j_blocksize -
3817 +@@ -205,7 +205,7 @@ static int count_tags(journal_t *journal, struct buffer_head *bh)
3818 + int nr = 0, size = journal->j_blocksize;
3819 + int tag_bytes = journal_tag_bytes(journal);
3820 +
3821 +- if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
3822 ++ if (jbd2_journal_has_csum_v2or3(journal))
3823 + size -= sizeof(struct jbd2_journal_block_tail);
3824 +
3825 + tagp = &bh->b_data[sizeof(journal_header_t)];
3826 +@@ -338,10 +338,11 @@ int jbd2_journal_skip_recovery(journal_t *journal)
3827 + return err;
3828 + }
3829 +
3830 +-static inline unsigned long long read_tag_block(int tag_bytes, journal_block_tag_t *tag)
3831 ++static inline unsigned long long read_tag_block(journal_t *journal,
3832 ++ journal_block_tag_t *tag)
3833 + {
3834 + unsigned long long block = be32_to_cpu(tag->t_blocknr);
3835 +- if (tag_bytes > JBD2_TAG_SIZE32)
3836 ++ if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
3837 + block |= (u64)be32_to_cpu(tag->t_blocknr_high) << 32;
3838 + return block;
3839 + }
3840 +@@ -384,7 +385,7 @@ static int jbd2_commit_block_csum_verify(journal_t *j, void *buf)
3841 + __be32 provided;
3842 + __u32 calculated;
3843 +
3844 +- if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
3845 ++ if (!jbd2_journal_has_csum_v2or3(j))
3846 + return 1;
3847 +
3848 + h = buf;
3849 +@@ -399,17 +400,21 @@ static int jbd2_commit_block_csum_verify(journal_t *j, void *buf)
3850 + static int jbd2_block_tag_csum_verify(journal_t *j, journal_block_tag_t *tag,
3851 + void *buf, __u32 sequence)
3852 + {
3853 ++ journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
3854 + __u32 csum32;
3855 + __be32 seq;
3856 +
3857 +- if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
3858 ++ if (!jbd2_journal_has_csum_v2or3(j))
3859 + return 1;
3860 +
3861 + seq = cpu_to_be32(sequence);
3862 + csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
3863 + csum32 = jbd2_chksum(j, csum32, buf, j->j_blocksize);
3864 +
3865 +- return tag->t_checksum == cpu_to_be16(csum32);
3866 ++ if (JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V3))
3867 ++ return tag3->t_checksum == cpu_to_be32(csum32);
3868 ++ else
3869 ++ return tag->t_checksum == cpu_to_be16(csum32);
3870 + }
3871 +
3872 + static int do_one_pass(journal_t *journal,
3873 +@@ -426,6 +431,7 @@ static int do_one_pass(journal_t *journal,
3874 + int tag_bytes = journal_tag_bytes(journal);
3875 + __u32 crc32_sum = ~0; /* Transactional Checksums */
3876 + int descr_csum_size = 0;
3877 ++ int block_error = 0;
3878 +
3879 + /*
3880 + * First thing is to establish what we expect to find in the log
3881 +@@ -512,8 +518,7 @@ static int do_one_pass(journal_t *journal,
3882 + switch(blocktype) {
3883 + case JBD2_DESCRIPTOR_BLOCK:
3884 + /* Verify checksum first */
3885 +- if (JBD2_HAS_INCOMPAT_FEATURE(journal,
3886 +- JBD2_FEATURE_INCOMPAT_CSUM_V2))
3887 ++ if (jbd2_journal_has_csum_v2or3(journal))
3888 + descr_csum_size =
3889 + sizeof(struct jbd2_journal_block_tail);
3890 + if (descr_csum_size > 0 &&
3891 +@@ -574,7 +579,7 @@ static int do_one_pass(journal_t *journal,
3892 + unsigned long long blocknr;
3893 +
3894 + J_ASSERT(obh != NULL);
3895 +- blocknr = read_tag_block(tag_bytes,
3896 ++ blocknr = read_tag_block(journal,
3897 + tag);
3898 +
3899 + /* If the block has been
3900 +@@ -598,7 +603,8 @@ static int do_one_pass(journal_t *journal,
3901 + "checksum recovering "
3902 + "block %llu in log\n",
3903 + blocknr);
3904 +- continue;
3905 ++ block_error = 1;
3906 ++ goto skip_write;
3907 + }
3908 +
3909 + /* Find a buffer for the new
3910 +@@ -797,7 +803,8 @@ static int do_one_pass(journal_t *journal,
3911 + success = -EIO;
3912 + }
3913 + }
3914 +-
3915 ++ if (block_error && success == 0)
3916 ++ success = -EIO;
3917 + return success;
3918 +
3919 + failed:
3920 +@@ -811,7 +818,7 @@ static int jbd2_revoke_block_csum_verify(journal_t *j,
3921 + __be32 provided;
3922 + __u32 calculated;
3923 +
3924 +- if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
3925 ++ if (!jbd2_journal_has_csum_v2or3(j))
3926 + return 1;
3927 +
3928 + tail = (struct jbd2_journal_revoke_tail *)(buf + j->j_blocksize -
3929 +diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
3930 +index 198c9c10276d..d5e95a175c92 100644
3931 +--- a/fs/jbd2/revoke.c
3932 ++++ b/fs/jbd2/revoke.c
3933 +@@ -91,8 +91,8 @@
3934 + #include <linux/list.h>
3935 + #include <linux/init.h>
3936 + #include <linux/bio.h>
3937 +-#endif
3938 + #include <linux/log2.h>
3939 ++#endif
3940 +
3941 + static struct kmem_cache *jbd2_revoke_record_cache;
3942 + static struct kmem_cache *jbd2_revoke_table_cache;
3943 +@@ -597,7 +597,7 @@ static void write_one_revoke_record(journal_t *journal,
3944 + offset = *offsetp;
3945 +
3946 + /* Do we need to leave space at the end for a checksum? */
3947 +- if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
3948 ++ if (jbd2_journal_has_csum_v2or3(journal))
3949 + csum_size = sizeof(struct jbd2_journal_revoke_tail);
3950 +
3951 + /* Make sure we have a descriptor with space left for the record */
3952 +@@ -644,7 +644,7 @@ static void jbd2_revoke_csum_set(journal_t *j, struct buffer_head *bh)
3953 + struct jbd2_journal_revoke_tail *tail;
3954 + __u32 csum;
3955 +
3956 +- if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
3957 ++ if (!jbd2_journal_has_csum_v2or3(j))
3958 + return;
3959 +
3960 + tail = (struct jbd2_journal_revoke_tail *)(bh->b_data + j->j_blocksize -
3961 +diff --git a/fs/namei.c b/fs/namei.c
3962 +index e3249d565c95..227c78ae70b4 100644
3963 +--- a/fs/namei.c
3964 ++++ b/fs/namei.c
3965 +@@ -34,6 +34,7 @@
3966 + #include <linux/device_cgroup.h>
3967 + #include <linux/fs_struct.h>
3968 + #include <linux/posix_acl.h>
3969 ++#include <linux/hash.h>
3970 + #include <asm/uaccess.h>
3971 +
3972 + #include "internal.h"
3973 +@@ -1661,8 +1662,7 @@ static inline int can_lookup(struct inode *inode)
3974 +
3975 + static inline unsigned int fold_hash(unsigned long hash)
3976 + {
3977 +- hash += hash >> (8*sizeof(int));
3978 +- return hash;
3979 ++ return hash_64(hash, 32);
3980 + }
3981 +
3982 + #else /* 32-bit case */
3983 +diff --git a/fs/namespace.c b/fs/namespace.c
3984 +index 7c67de88f3f1..4ea2b7378d8c 100644
3985 +--- a/fs/namespace.c
3986 ++++ b/fs/namespace.c
3987 +@@ -2391,6 +2391,14 @@ long do_mount(const char *dev_name, const char *dir_name,
3988 + if (flags & MS_RDONLY)
3989 + mnt_flags |= MNT_READONLY;
3990 +
3991 ++ /* The default atime for remount is preservation */
3992 ++ if ((flags & MS_REMOUNT) &&
3993 ++ ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME |
3994 ++ MS_STRICTATIME)) == 0)) {
3995 ++ mnt_flags &= ~MNT_ATIME_MASK;
3996 ++ mnt_flags |= path.mnt->mnt_flags & MNT_ATIME_MASK;
3997 ++ }
3998 ++
3999 + flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN |
4000 + MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
4001 + MS_STRICTATIME);
4002 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
4003 +index 40062e42c955..067d8c90eb1a 100644
4004 +--- a/fs/nfs/nfs4proc.c
4005 ++++ b/fs/nfs/nfs4proc.c
4006 +@@ -2532,6 +2532,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
4007 + struct nfs4_closedata *calldata = data;
4008 + struct nfs4_state *state = calldata->state;
4009 + struct inode *inode = calldata->inode;
4010 ++ bool is_rdonly, is_wronly, is_rdwr;
4011 + int call_close = 0;
4012 +
4013 + dprintk("%s: begin!\n", __func__);
4014 +@@ -2539,18 +2540,24 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
4015 + goto out_wait;
4016 +
4017 + task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
4018 +- calldata->arg.fmode = FMODE_READ|FMODE_WRITE;
4019 + spin_lock(&state->owner->so_lock);
4020 ++ is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
4021 ++ is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
4022 ++ is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
4023 ++ /* Calculate the current open share mode */
4024 ++ calldata->arg.fmode = 0;
4025 ++ if (is_rdonly || is_rdwr)
4026 ++ calldata->arg.fmode |= FMODE_READ;
4027 ++ if (is_wronly || is_rdwr)
4028 ++ calldata->arg.fmode |= FMODE_WRITE;
4029 + /* Calculate the change in open mode */
4030 + if (state->n_rdwr == 0) {
4031 + if (state->n_rdonly == 0) {
4032 +- call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
4033 +- call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
4034 ++ call_close |= is_rdonly || is_rdwr;
4035 + calldata->arg.fmode &= ~FMODE_READ;
4036 + }
4037 + if (state->n_wronly == 0) {
4038 +- call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
4039 +- call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
4040 ++ call_close |= is_wronly || is_rdwr;
4041 + calldata->arg.fmode &= ~FMODE_WRITE;
4042 + }
4043 + }
4044 +diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
4045 +index 3eaa6e30a2dc..cc8c5b32043c 100644
4046 +--- a/fs/nfsd/nfs4callback.c
4047 ++++ b/fs/nfsd/nfs4callback.c
4048 +@@ -672,7 +672,8 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
4049 + clp->cl_cb_session = ses;
4050 + args.bc_xprt = conn->cb_xprt;
4051 + args.prognumber = clp->cl_cb_session->se_cb_prog;
4052 +- args.protocol = XPRT_TRANSPORT_BC_TCP;
4053 ++ args.protocol = conn->cb_xprt->xpt_class->xcl_ident |
4054 ++ XPRT_TRANSPORT_BC;
4055 + args.authflavor = ses->se_cb_sec.flavor;
4056 + }
4057 + /* Create RPC client */
4058 +diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
4059 +index 760c85a6f534..4942f4370f60 100644
4060 +--- a/fs/nfsd/nfssvc.c
4061 ++++ b/fs/nfsd/nfssvc.c
4062 +@@ -221,7 +221,8 @@ static int nfsd_startup_generic(int nrservs)
4063 + */
4064 + ret = nfsd_racache_init(2*nrservs);
4065 + if (ret)
4066 +- return ret;
4067 ++ goto dec_users;
4068 ++
4069 + ret = nfs4_state_start();
4070 + if (ret)
4071 + goto out_racache;
4072 +@@ -229,6 +230,8 @@ static int nfsd_startup_generic(int nrservs)
4073 +
4074 + out_racache:
4075 + nfsd_racache_shutdown();
4076 ++dec_users:
4077 ++ nfsd_users--;
4078 + return ret;
4079 + }
4080 +
4081 +diff --git a/fs/proc/array.c b/fs/proc/array.c
4082 +index cbd0f1b324b9..09f0d9c374a3 100644
4083 +--- a/fs/proc/array.c
4084 ++++ b/fs/proc/array.c
4085 +@@ -304,15 +304,11 @@ static void render_cap_t(struct seq_file *m, const char *header,
4086 + seq_puts(m, header);
4087 + CAP_FOR_EACH_U32(__capi) {
4088 + seq_printf(m, "%08x",
4089 +- a->cap[(_KERNEL_CAPABILITY_U32S-1) - __capi]);
4090 ++ a->cap[CAP_LAST_U32 - __capi]);
4091 + }
4092 + seq_putc(m, '\n');
4093 + }
4094 +
4095 +-/* Remove non-existent capabilities */
4096 +-#define NORM_CAPS(v) (v.cap[CAP_TO_INDEX(CAP_LAST_CAP)] &= \
4097 +- CAP_TO_MASK(CAP_LAST_CAP + 1) - 1)
4098 +-
4099 + static inline void task_cap(struct seq_file *m, struct task_struct *p)
4100 + {
4101 + const struct cred *cred;
4102 +@@ -326,11 +322,6 @@ static inline void task_cap(struct seq_file *m, struct task_struct *p)
4103 + cap_bset = cred->cap_bset;
4104 + rcu_read_unlock();
4105 +
4106 +- NORM_CAPS(cap_inheritable);
4107 +- NORM_CAPS(cap_permitted);
4108 +- NORM_CAPS(cap_effective);
4109 +- NORM_CAPS(cap_bset);
4110 +-
4111 + render_cap_t(m, "CapInh:\t", &cap_inheritable);
4112 + render_cap_t(m, "CapPrm:\t", &cap_permitted);
4113 + render_cap_t(m, "CapEff:\t", &cap_effective);
4114 +diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
4115 +index be9a1fa2721b..0415a628b2ab 100644
4116 +--- a/fs/xfs/xfs_aops.c
4117 ++++ b/fs/xfs/xfs_aops.c
4118 +@@ -1657,11 +1657,72 @@ xfs_vm_readpages(
4119 + return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
4120 + }
4121 +
4122 ++/*
4123 ++ * This is basically a copy of __set_page_dirty_buffers() with one
4124 ++ * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
4125 ++ * dirty, we'll never be able to clean them because we don't write buffers
4126 ++ * beyond EOF, and that means we can't invalidate pages that span EOF
4127 ++ * that have been marked dirty. Further, the dirty state can leak into
4128 ++ * the file interior if the file is extended, resulting in all sorts of
4129 ++ * bad things happening as the state does not match the underlying data.
4130 ++ *
4131 ++ * XXX: this really indicates that bufferheads in XFS need to die. Warts like
4132 ++ * this only exist because of bufferheads and how the generic code manages them.
4133 ++ */
4134 ++STATIC int
4135 ++xfs_vm_set_page_dirty(
4136 ++ struct page *page)
4137 ++{
4138 ++ struct address_space *mapping = page->mapping;
4139 ++ struct inode *inode = mapping->host;
4140 ++ loff_t end_offset;
4141 ++ loff_t offset;
4142 ++ int newly_dirty;
4143 ++
4144 ++ if (unlikely(!mapping))
4145 ++ return !TestSetPageDirty(page);
4146 ++
4147 ++ end_offset = i_size_read(inode);
4148 ++ offset = page_offset(page);
4149 ++
4150 ++ spin_lock(&mapping->private_lock);
4151 ++ if (page_has_buffers(page)) {
4152 ++ struct buffer_head *head = page_buffers(page);
4153 ++ struct buffer_head *bh = head;
4154 ++
4155 ++ do {
4156 ++ if (offset < end_offset)
4157 ++ set_buffer_dirty(bh);
4158 ++ bh = bh->b_this_page;
4159 ++ offset += 1 << inode->i_blkbits;
4160 ++ } while (bh != head);
4161 ++ }
4162 ++ newly_dirty = !TestSetPageDirty(page);
4163 ++ spin_unlock(&mapping->private_lock);
4164 ++
4165 ++ if (newly_dirty) {
4166 ++ /* sigh - __set_page_dirty() is static, so copy it here, too */
4167 ++ unsigned long flags;
4168 ++
4169 ++ spin_lock_irqsave(&mapping->tree_lock, flags);
4170 ++ if (page->mapping) { /* Race with truncate? */
4171 ++ WARN_ON_ONCE(!PageUptodate(page));
4172 ++ account_page_dirtied(page, mapping);
4173 ++ radix_tree_tag_set(&mapping->page_tree,
4174 ++ page_index(page), PAGECACHE_TAG_DIRTY);
4175 ++ }
4176 ++ spin_unlock_irqrestore(&mapping->tree_lock, flags);
4177 ++ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
4178 ++ }
4179 ++ return newly_dirty;
4180 ++}
4181 ++
4182 + const struct address_space_operations xfs_address_space_operations = {
4183 + .readpage = xfs_vm_readpage,
4184 + .readpages = xfs_vm_readpages,
4185 + .writepage = xfs_vm_writepage,
4186 + .writepages = xfs_vm_writepages,
4187 ++ .set_page_dirty = xfs_vm_set_page_dirty,
4188 + .releasepage = xfs_vm_releasepage,
4189 + .invalidatepage = xfs_vm_invalidatepage,
4190 + .write_begin = xfs_vm_write_begin,
4191 +diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
4192 +index 1ee776d477c3..895db7a88412 100644
4193 +--- a/fs/xfs/xfs_dquot.c
4194 ++++ b/fs/xfs/xfs_dquot.c
4195 +@@ -1121,7 +1121,8 @@ xfs_qm_dqflush(
4196 + * Get the buffer containing the on-disk dquot
4197 + */
4198 + error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
4199 +- mp->m_quotainfo->qi_dqchunklen, 0, &bp, NULL);
4200 ++ mp->m_quotainfo->qi_dqchunklen, 0, &bp,
4201 ++ &xfs_dquot_buf_ops);
4202 + if (error)
4203 + goto out_unlock;
4204 +
4205 +diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
4206 +index 4c749ab543d0..d56b136e68fe 100644
4207 +--- a/fs/xfs/xfs_file.c
4208 ++++ b/fs/xfs/xfs_file.c
4209 +@@ -299,7 +299,16 @@ xfs_file_aio_read(
4210 + xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
4211 + return ret;
4212 + }
4213 +- truncate_pagecache_range(VFS_I(ip), pos, -1);
4214 ++
4215 ++ /*
4216 ++ * Invalidate whole pages. This can return an error if
4217 ++ * we fail to invalidate a page, but this should never
4218 ++ * happen on XFS. Warn if it does fail.
4219 ++ */
4220 ++ ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
4221 ++ pos >> PAGE_CACHE_SHIFT, -1);
4222 ++ WARN_ON_ONCE(ret);
4223 ++ ret = 0;
4224 + }
4225 + xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
4226 + }
4227 +@@ -678,7 +687,15 @@ xfs_file_dio_aio_write(
4228 + pos, -1);
4229 + if (ret)
4230 + goto out;
4231 +- truncate_pagecache_range(VFS_I(ip), pos, -1);
4232 ++ /*
4233 ++ * Invalidate whole pages. This can return an error if
4234 ++ * we fail to invalidate a page, but this should never
4235 ++ * happen on XFS. Warn if it does fail.
4236 ++ */
4237 ++ ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
4238 ++ pos >> PAGE_CACHE_SHIFT, -1);
4239 ++ WARN_ON_ONCE(ret);
4240 ++ ret = 0;
4241 + }
4242 +
4243 + /*
4244 +diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
4245 +index 39797490a1f1..5b166a07d55e 100644
4246 +--- a/fs/xfs/xfs_log_recover.c
4247 ++++ b/fs/xfs/xfs_log_recover.c
4248 +@@ -2121,6 +2121,17 @@ xlog_recover_validate_buf_type(
4249 + __uint16_t magic16;
4250 + __uint16_t magicda;
4251 +
4252 ++ /*
4253 ++ * We can only do post recovery validation on items on CRC enabled
4254 ++ * fielsystems as we need to know when the buffer was written to be able
4255 ++ * to determine if we should have replayed the item. If we replay old
4256 ++ * metadata over a newer buffer, then it will enter a temporarily
4257 ++ * inconsistent state resulting in verification failures. Hence for now
4258 ++ * just avoid the verification stage for non-crc filesystems
4259 ++ */
4260 ++ if (!xfs_sb_version_hascrc(&mp->m_sb))
4261 ++ return;
4262 ++
4263 + magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
4264 + magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
4265 + magicda = be16_to_cpu(info->magic);
4266 +@@ -2156,8 +2167,6 @@ xlog_recover_validate_buf_type(
4267 + bp->b_ops = &xfs_agf_buf_ops;
4268 + break;
4269 + case XFS_BLFT_AGFL_BUF:
4270 +- if (!xfs_sb_version_hascrc(&mp->m_sb))
4271 +- break;
4272 + if (magic32 != XFS_AGFL_MAGIC) {
4273 + xfs_warn(mp, "Bad AGFL block magic!");
4274 + ASSERT(0);
4275 +@@ -2190,10 +2199,6 @@ xlog_recover_validate_buf_type(
4276 + #endif
4277 + break;
4278 + case XFS_BLFT_DINO_BUF:
4279 +- /*
4280 +- * we get here with inode allocation buffers, not buffers that
4281 +- * track unlinked list changes.
4282 +- */
4283 + if (magic16 != XFS_DINODE_MAGIC) {
4284 + xfs_warn(mp, "Bad INODE block magic!");
4285 + ASSERT(0);
4286 +@@ -2273,8 +2278,6 @@ xlog_recover_validate_buf_type(
4287 + bp->b_ops = &xfs_attr3_leaf_buf_ops;
4288 + break;
4289 + case XFS_BLFT_ATTR_RMT_BUF:
4290 +- if (!xfs_sb_version_hascrc(&mp->m_sb))
4291 +- break;
4292 + if (magic32 != XFS_ATTR3_RMT_MAGIC) {
4293 + xfs_warn(mp, "Bad attr remote magic!");
4294 + ASSERT(0);
4295 +@@ -2381,16 +2384,7 @@ xlog_recover_do_reg_buffer(
4296 + /* Shouldn't be any more regions */
4297 + ASSERT(i == item->ri_total);
4298 +
4299 +- /*
4300 +- * We can only do post recovery validation on items on CRC enabled
4301 +- * fielsystems as we need to know when the buffer was written to be able
4302 +- * to determine if we should have replayed the item. If we replay old
4303 +- * metadata over a newer buffer, then it will enter a temporarily
4304 +- * inconsistent state resulting in verification failures. Hence for now
4305 +- * just avoid the verification stage for non-crc filesystems
4306 +- */
4307 +- if (xfs_sb_version_hascrc(&mp->m_sb))
4308 +- xlog_recover_validate_buf_type(mp, bp, buf_f);
4309 ++ xlog_recover_validate_buf_type(mp, bp, buf_f);
4310 + }
4311 +
4312 + /*
4313 +@@ -2625,12 +2619,29 @@ xlog_recover_buffer_pass2(
4314 + }
4315 +
4316 + /*
4317 +- * recover the buffer only if we get an LSN from it and it's less than
4318 ++ * Recover the buffer only if we get an LSN from it and it's less than
4319 + * the lsn of the transaction we are replaying.
4320 ++ *
4321 ++ * Note that we have to be extremely careful of readahead here.
4322 ++ * Readahead does not attach verfiers to the buffers so if we don't
4323 ++ * actually do any replay after readahead because of the LSN we found
4324 ++ * in the buffer if more recent than that current transaction then we
4325 ++ * need to attach the verifier directly. Failure to do so can lead to
4326 ++ * future recovery actions (e.g. EFI and unlinked list recovery) can
4327 ++ * operate on the buffers and they won't get the verifier attached. This
4328 ++ * can lead to blocks on disk having the correct content but a stale
4329 ++ * CRC.
4330 ++ *
4331 ++ * It is safe to assume these clean buffers are currently up to date.
4332 ++ * If the buffer is dirtied by a later transaction being replayed, then
4333 ++ * the verifier will be reset to match whatever recover turns that
4334 ++ * buffer into.
4335 + */
4336 + lsn = xlog_recover_get_buf_lsn(mp, bp);
4337 +- if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0)
4338 ++ if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
4339 ++ xlog_recover_validate_buf_type(mp, bp, buf_f);
4340 + goto out_release;
4341 ++ }
4342 +
4343 + if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
4344 + error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
4345 +diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
4346 +index 4688a622b373..794aa2fb9c69 100644
4347 +--- a/fs/xfs/xfs_qm.c
4348 ++++ b/fs/xfs/xfs_qm.c
4349 +@@ -1193,6 +1193,12 @@ xfs_qm_dqiter_bufs(
4350 + if (error)
4351 + break;
4352 +
4353 ++ /*
4354 ++ * A corrupt buffer might not have a verifier attached, so
4355 ++ * make sure we have the correct one attached before writeback
4356 ++ * occurs.
4357 ++ */
4358 ++ bp->b_ops = &xfs_dquot_buf_ops;
4359 + xfs_qm_reset_dqcounts(mp, bp, firstid, type);
4360 + xfs_buf_delwri_queue(bp, buffer_list);
4361 + xfs_buf_relse(bp);
4362 +@@ -1276,7 +1282,7 @@ xfs_qm_dqiterate(
4363 + xfs_buf_readahead(mp->m_ddev_targp,
4364 + XFS_FSB_TO_DADDR(mp, rablkno),
4365 + mp->m_quotainfo->qi_dqchunklen,
4366 +- NULL);
4367 ++ &xfs_dquot_buf_ops);
4368 + rablkno++;
4369 + }
4370 + }
4371 +diff --git a/include/linux/capability.h b/include/linux/capability.h
4372 +index 84b13ad67c1c..aa93e5ef594c 100644
4373 +--- a/include/linux/capability.h
4374 ++++ b/include/linux/capability.h
4375 +@@ -78,8 +78,11 @@ extern const kernel_cap_t __cap_init_eff_set;
4376 + # error Fix up hand-coded capability macro initializers
4377 + #else /* HAND-CODED capability initializers */
4378 +
4379 ++#define CAP_LAST_U32 ((_KERNEL_CAPABILITY_U32S) - 1)
4380 ++#define CAP_LAST_U32_VALID_MASK (CAP_TO_MASK(CAP_LAST_CAP + 1) -1)
4381 ++
4382 + # define CAP_EMPTY_SET ((kernel_cap_t){{ 0, 0 }})
4383 +-# define CAP_FULL_SET ((kernel_cap_t){{ ~0, ~0 }})
4384 ++# define CAP_FULL_SET ((kernel_cap_t){{ ~0, CAP_LAST_U32_VALID_MASK }})
4385 + # define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \
4386 + | CAP_TO_MASK(CAP_LINUX_IMMUTABLE), \
4387 + CAP_FS_MASK_B1 } })
4388 +diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
4389 +index d5b50a19463c..0dae71e9971c 100644
4390 +--- a/include/linux/jbd2.h
4391 ++++ b/include/linux/jbd2.h
4392 +@@ -159,7 +159,11 @@ typedef struct journal_header_s
4393 + * journal_block_tag (in the descriptor). The other h_chksum* fields are
4394 + * not used.
4395 + *
4396 +- * Checksum v1 and v2 are mutually exclusive features.
4397 ++ * If FEATURE_INCOMPAT_CSUM_V3 is set, the descriptor block uses
4398 ++ * journal_block_tag3_t to store a full 32-bit checksum. Everything else
4399 ++ * is the same as v2.
4400 ++ *
4401 ++ * Checksum v1, v2, and v3 are mutually exclusive features.
4402 + */
4403 + struct commit_header {
4404 + __be32 h_magic;
4405 +@@ -179,6 +183,14 @@ struct commit_header {
4406 + * raw struct shouldn't be used for pointer math or sizeof() - use
4407 + * journal_tag_bytes(journal) instead to compute this.
4408 + */
4409 ++typedef struct journal_block_tag3_s
4410 ++{
4411 ++ __be32 t_blocknr; /* The on-disk block number */
4412 ++ __be32 t_flags; /* See below */
4413 ++ __be32 t_blocknr_high; /* most-significant high 32bits. */
4414 ++ __be32 t_checksum; /* crc32c(uuid+seq+block) */
4415 ++} journal_block_tag3_t;
4416 ++
4417 + typedef struct journal_block_tag_s
4418 + {
4419 + __be32 t_blocknr; /* The on-disk block number */
4420 +@@ -187,9 +199,6 @@ typedef struct journal_block_tag_s
4421 + __be32 t_blocknr_high; /* most-significant high 32bits. */
4422 + } journal_block_tag_t;
4423 +
4424 +-#define JBD2_TAG_SIZE32 (offsetof(journal_block_tag_t, t_blocknr_high))
4425 +-#define JBD2_TAG_SIZE64 (sizeof(journal_block_tag_t))
4426 +-
4427 + /* Tail of descriptor block, for checksumming */
4428 + struct jbd2_journal_block_tail {
4429 + __be32 t_checksum; /* crc32c(uuid+descr_block) */
4430 +@@ -284,6 +293,7 @@ typedef struct journal_superblock_s
4431 + #define JBD2_FEATURE_INCOMPAT_64BIT 0x00000002
4432 + #define JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT 0x00000004
4433 + #define JBD2_FEATURE_INCOMPAT_CSUM_V2 0x00000008
4434 ++#define JBD2_FEATURE_INCOMPAT_CSUM_V3 0x00000010
4435 +
4436 + /* Features known to this kernel version: */
4437 + #define JBD2_KNOWN_COMPAT_FEATURES JBD2_FEATURE_COMPAT_CHECKSUM
4438 +@@ -291,7 +301,8 @@ typedef struct journal_superblock_s
4439 + #define JBD2_KNOWN_INCOMPAT_FEATURES (JBD2_FEATURE_INCOMPAT_REVOKE | \
4440 + JBD2_FEATURE_INCOMPAT_64BIT | \
4441 + JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | \
4442 +- JBD2_FEATURE_INCOMPAT_CSUM_V2)
4443 ++ JBD2_FEATURE_INCOMPAT_CSUM_V2 | \
4444 ++ JBD2_FEATURE_INCOMPAT_CSUM_V3)
4445 +
4446 + #ifdef __KERNEL__
4447 +
4448 +@@ -1296,6 +1307,15 @@ static inline int tid_geq(tid_t x, tid_t y)
4449 + extern int jbd2_journal_blocks_per_page(struct inode *inode);
4450 + extern size_t journal_tag_bytes(journal_t *journal);
4451 +
4452 ++static inline int jbd2_journal_has_csum_v2or3(journal_t *journal)
4453 ++{
4454 ++ if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2) ||
4455 ++ JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3))
4456 ++ return 1;
4457 ++
4458 ++ return 0;
4459 ++}
4460 ++
4461 + /*
4462 + * We reserve t_outstanding_credits >> JBD2_CONTROL_BLOCKS_SHIFT for
4463 + * transaction control blocks.
4464 +diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
4465 +index b05963f09ebf..f5bfb1a80abe 100644
4466 +--- a/include/linux/sunrpc/svc_xprt.h
4467 ++++ b/include/linux/sunrpc/svc_xprt.h
4468 +@@ -32,6 +32,7 @@ struct svc_xprt_class {
4469 + struct svc_xprt_ops *xcl_ops;
4470 + struct list_head xcl_list;
4471 + u32 xcl_max_payload;
4472 ++ int xcl_ident;
4473 + };
4474 +
4475 + /*
4476 +diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
4477 +index b4f1effc9216..409fafb63f63 100644
4478 +--- a/include/scsi/scsi_device.h
4479 ++++ b/include/scsi/scsi_device.h
4480 +@@ -149,6 +149,7 @@ struct scsi_device {
4481 + unsigned skip_ms_page_8:1; /* do not use MODE SENSE page 0x08 */
4482 + unsigned skip_ms_page_3f:1; /* do not use MODE SENSE page 0x3f */
4483 + unsigned skip_vpd_pages:1; /* do not read VPD pages */
4484 ++ unsigned try_vpd_pages:1; /* attempt to read VPD pages */
4485 + unsigned use_192_bytes_for_3f:1; /* ask for 192 bytes from page 0x3f */
4486 + unsigned no_start_on_add:1; /* do not issue start on add */
4487 + unsigned allow_restart:1; /* issue START_UNIT in error handler */
4488 +diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
4489 +index 447d2d7466fc..183eaab7c380 100644
4490 +--- a/include/scsi/scsi_devinfo.h
4491 ++++ b/include/scsi/scsi_devinfo.h
4492 +@@ -32,4 +32,9 @@
4493 + #define BLIST_ATTACH_PQ3 0x1000000 /* Scan: Attach to PQ3 devices */
4494 + #define BLIST_NO_DIF 0x2000000 /* Disable T10 PI (DIF) */
4495 + #define BLIST_SKIP_VPD_PAGES 0x4000000 /* Ignore SBC-3 VPD pages */
4496 ++#define BLIST_SCSI3LUN 0x8000000 /* Scan more than 256 LUNs
4497 ++ for sequential scan */
4498 ++#define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */
4499 ++#define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */
4500 ++
4501 + #endif
4502 +diff --git a/include/uapi/rdma/rdma_user_cm.h b/include/uapi/rdma/rdma_user_cm.h
4503 +index 99b80abf360a..3066718eb120 100644
4504 +--- a/include/uapi/rdma/rdma_user_cm.h
4505 ++++ b/include/uapi/rdma/rdma_user_cm.h
4506 +@@ -34,6 +34,7 @@
4507 + #define RDMA_USER_CM_H
4508 +
4509 + #include <linux/types.h>
4510 ++#include <linux/socket.h>
4511 + #include <linux/in6.h>
4512 + #include <rdma/ib_user_verbs.h>
4513 + #include <rdma/ib_user_sa.h>
4514 +diff --git a/kernel/audit.c b/kernel/audit.c
4515 +index 197a496587a6..4059e949beb2 100644
4516 +--- a/kernel/audit.c
4517 ++++ b/kernel/audit.c
4518 +@@ -1412,7 +1412,7 @@ void audit_log_cap(struct audit_buffer *ab, char *prefix, kernel_cap_t *cap)
4519 + audit_log_format(ab, " %s=", prefix);
4520 + CAP_FOR_EACH_U32(i) {
4521 + audit_log_format(ab, "%08x",
4522 +- cap->cap[(_KERNEL_CAPABILITY_U32S-1) - i]);
4523 ++ cap->cap[CAP_LAST_U32 - i]);
4524 + }
4525 + }
4526 +
4527 +diff --git a/kernel/capability.c b/kernel/capability.c
4528 +index 788653b97430..50fb74b136db 100644
4529 +--- a/kernel/capability.c
4530 ++++ b/kernel/capability.c
4531 +@@ -268,6 +268,10 @@ SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data)
4532 + i++;
4533 + }
4534 +
4535 ++ effective.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
4536 ++ permitted.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
4537 ++ inheritable.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
4538 ++
4539 + new = prepare_creds();
4540 + if (!new)
4541 + return -ENOMEM;
4542 +diff --git a/kernel/futex.c b/kernel/futex.c
4543 +index f94695c9d38b..e4b9b60e25b1 100644
4544 +--- a/kernel/futex.c
4545 ++++ b/kernel/futex.c
4546 +@@ -2465,6 +2465,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
4547 + * shared futexes. We need to compare the keys:
4548 + */
4549 + if (match_futex(&q.key, &key2)) {
4550 ++ queue_unlock(&q, hb);
4551 + ret = -EINVAL;
4552 + goto out_put_keys;
4553 + }
4554 +diff --git a/kernel/smp.c b/kernel/smp.c
4555 +index 0564571dcdf7..7d1187c0c2b6 100644
4556 +--- a/kernel/smp.c
4557 ++++ b/kernel/smp.c
4558 +@@ -650,7 +650,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
4559 + if (cond_func(cpu, info)) {
4560 + ret = smp_call_function_single(cpu, func,
4561 + info, wait);
4562 +- WARN_ON_ONCE(!ret);
4563 ++ WARN_ON_ONCE(ret);
4564 + }
4565 + preempt_enable();
4566 + }
4567 +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
4568 +index a758ec217bc0..65da8249bae6 100644
4569 +--- a/kernel/trace/ring_buffer.c
4570 ++++ b/kernel/trace/ring_buffer.c
4571 +@@ -1981,7 +1981,7 @@ rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
4572 +
4573 + /**
4574 + * rb_update_event - update event type and data
4575 +- * @event: the even to update
4576 ++ * @event: the event to update
4577 + * @type: the type of event
4578 + * @length: the size of the event field in the ring buffer
4579 + *
4580 +@@ -3354,21 +3354,16 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
4581 + struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4582 +
4583 + /* Iterator usage is expected to have record disabled */
4584 +- if (list_empty(&cpu_buffer->reader_page->list)) {
4585 +- iter->head_page = rb_set_head_page(cpu_buffer);
4586 +- if (unlikely(!iter->head_page))
4587 +- return;
4588 +- iter->head = iter->head_page->read;
4589 +- } else {
4590 +- iter->head_page = cpu_buffer->reader_page;
4591 +- iter->head = cpu_buffer->reader_page->read;
4592 +- }
4593 ++ iter->head_page = cpu_buffer->reader_page;
4594 ++ iter->head = cpu_buffer->reader_page->read;
4595 ++
4596 ++ iter->cache_reader_page = iter->head_page;
4597 ++ iter->cache_read = iter->head;
4598 ++
4599 + if (iter->head)
4600 + iter->read_stamp = cpu_buffer->read_stamp;
4601 + else
4602 + iter->read_stamp = iter->head_page->page->time_stamp;
4603 +- iter->cache_reader_page = cpu_buffer->reader_page;
4604 +- iter->cache_read = cpu_buffer->read;
4605 + }
4606 +
4607 + /**
4608 +@@ -3761,12 +3756,14 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
4609 + return NULL;
4610 +
4611 + /*
4612 +- * We repeat when a time extend is encountered.
4613 +- * Since the time extend is always attached to a data event,
4614 +- * we should never loop more than once.
4615 +- * (We never hit the following condition more than twice).
4616 ++ * We repeat when a time extend is encountered or we hit
4617 ++ * the end of the page. Since the time extend is always attached
4618 ++ * to a data event, we should never loop more than three times.
4619 ++ * Once for going to next page, once on time extend, and
4620 ++ * finally once to get the event.
4621 ++ * (We never hit the following condition more than thrice).
4622 + */
4623 +- if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
4624 ++ if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3))
4625 + return NULL;
4626 +
4627 + if (rb_per_cpu_empty(cpu_buffer))
4628 +diff --git a/mm/util.c b/mm/util.c
4629 +index 96da2d7c076c..de943ec0a4c8 100644
4630 +--- a/mm/util.c
4631 ++++ b/mm/util.c
4632 +@@ -272,17 +272,14 @@ pid_t vm_is_stack(struct task_struct *task,
4633 +
4634 + if (in_group) {
4635 + struct task_struct *t;
4636 +- rcu_read_lock();
4637 +- if (!pid_alive(task))
4638 +- goto done;
4639 +
4640 +- t = task;
4641 +- do {
4642 ++ rcu_read_lock();
4643 ++ for_each_thread(task, t) {
4644 + if (vm_is_stack_for_task(t, vma)) {
4645 + ret = t->pid;
4646 + goto done;
4647 + }
4648 +- } while_each_thread(task, t);
4649 ++ }
4650 + done:
4651 + rcu_read_unlock();
4652 + }
4653 +diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
4654 +index a3a81d96314b..2710e850b74c 100644
4655 +--- a/net/bluetooth/l2cap_sock.c
4656 ++++ b/net/bluetooth/l2cap_sock.c
4657 +@@ -882,7 +882,8 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
4658 + l2cap_chan_close(chan, 0);
4659 + lock_sock(sk);
4660 +
4661 +- if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
4662 ++ if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
4663 ++ !(current->flags & PF_EXITING))
4664 + err = bt_sock_wait_state(sk, BT_CLOSED,
4665 + sk->sk_lingertime);
4666 + }
4667 +diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
4668 +index ca957d34b0c8..19ba192e9dbf 100644
4669 +--- a/net/bluetooth/rfcomm/core.c
4670 ++++ b/net/bluetooth/rfcomm/core.c
4671 +@@ -1857,10 +1857,13 @@ static struct rfcomm_session *rfcomm_process_rx(struct rfcomm_session *s)
4672 + /* Get data directly from socket receive queue without copying it. */
4673 + while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
4674 + skb_orphan(skb);
4675 +- if (!skb_linearize(skb))
4676 ++ if (!skb_linearize(skb)) {
4677 + s = rfcomm_recv_frame(s, skb);
4678 +- else
4679 ++ if (!s)
4680 ++ break;
4681 ++ } else {
4682 + kfree_skb(skb);
4683 ++ }
4684 + }
4685 +
4686 + if (s && (sk->sk_state == BT_CLOSED))
4687 +diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
4688 +index c1c6028e389a..7ca014daa5ab 100644
4689 +--- a/net/bluetooth/rfcomm/sock.c
4690 ++++ b/net/bluetooth/rfcomm/sock.c
4691 +@@ -887,7 +887,8 @@ static int rfcomm_sock_shutdown(struct socket *sock, int how)
4692 + sk->sk_shutdown = SHUTDOWN_MASK;
4693 + __rfcomm_sock_close(sk);
4694 +
4695 +- if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
4696 ++ if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
4697 ++ !(current->flags & PF_EXITING))
4698 + err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
4699 + }
4700 + release_sock(sk);
4701 +diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
4702 +index d021e441b6e6..4f5f01b779b5 100644
4703 +--- a/net/bluetooth/sco.c
4704 ++++ b/net/bluetooth/sco.c
4705 +@@ -913,7 +913,8 @@ static int sco_sock_shutdown(struct socket *sock, int how)
4706 + sco_sock_clear_timer(sk);
4707 + __sco_sock_close(sk);
4708 +
4709 +- if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
4710 ++ if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
4711 ++ !(current->flags & PF_EXITING))
4712 + err = bt_sock_wait_state(sk, BT_CLOSED,
4713 + sk->sk_lingertime);
4714 + }
4715 +@@ -933,7 +934,8 @@ static int sco_sock_release(struct socket *sock)
4716 +
4717 + sco_sock_close(sk);
4718 +
4719 +- if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) {
4720 ++ if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
4721 ++ !(current->flags & PF_EXITING)) {
4722 + lock_sock(sk);
4723 + err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
4724 + release_sock(sk);
4725 +diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
4726 +index 96238ba95f2b..de6662b14e1f 100644
4727 +--- a/net/ceph/auth_x.c
4728 ++++ b/net/ceph/auth_x.c
4729 +@@ -13,8 +13,6 @@
4730 + #include "auth_x.h"
4731 + #include "auth_x_protocol.h"
4732 +
4733 +-#define TEMP_TICKET_BUF_LEN 256
4734 +-
4735 + static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed);
4736 +
4737 + static int ceph_x_is_authenticated(struct ceph_auth_client *ac)
4738 +@@ -64,7 +62,7 @@ static int ceph_x_encrypt(struct ceph_crypto_key *secret,
4739 + }
4740 +
4741 + static int ceph_x_decrypt(struct ceph_crypto_key *secret,
4742 +- void **p, void *end, void *obuf, size_t olen)
4743 ++ void **p, void *end, void **obuf, size_t olen)
4744 + {
4745 + struct ceph_x_encrypt_header head;
4746 + size_t head_len = sizeof(head);
4747 +@@ -75,8 +73,14 @@ static int ceph_x_decrypt(struct ceph_crypto_key *secret,
4748 + return -EINVAL;
4749 +
4750 + dout("ceph_x_decrypt len %d\n", len);
4751 +- ret = ceph_decrypt2(secret, &head, &head_len, obuf, &olen,
4752 +- *p, len);
4753 ++ if (*obuf == NULL) {
4754 ++ *obuf = kmalloc(len, GFP_NOFS);
4755 ++ if (!*obuf)
4756 ++ return -ENOMEM;
4757 ++ olen = len;
4758 ++ }
4759 ++
4760 ++ ret = ceph_decrypt2(secret, &head, &head_len, *obuf, &olen, *p, len);
4761 + if (ret)
4762 + return ret;
4763 + if (head.struct_v != 1 || le64_to_cpu(head.magic) != CEPHX_ENC_MAGIC)
4764 +@@ -129,139 +133,120 @@ static void remove_ticket_handler(struct ceph_auth_client *ac,
4765 + kfree(th);
4766 + }
4767 +
4768 +-static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
4769 +- struct ceph_crypto_key *secret,
4770 +- void *buf, void *end)
4771 ++static int process_one_ticket(struct ceph_auth_client *ac,
4772 ++ struct ceph_crypto_key *secret,
4773 ++ void **p, void *end)
4774 + {
4775 + struct ceph_x_info *xi = ac->private;
4776 +- int num;
4777 +- void *p = buf;
4778 ++ int type;
4779 ++ u8 tkt_struct_v, blob_struct_v;
4780 ++ struct ceph_x_ticket_handler *th;
4781 ++ void *dbuf = NULL;
4782 ++ void *dp, *dend;
4783 ++ int dlen;
4784 ++ char is_enc;
4785 ++ struct timespec validity;
4786 ++ struct ceph_crypto_key old_key;
4787 ++ void *ticket_buf = NULL;
4788 ++ void *tp, *tpend;
4789 ++ struct ceph_timespec new_validity;
4790 ++ struct ceph_crypto_key new_session_key;
4791 ++ struct ceph_buffer *new_ticket_blob;
4792 ++ unsigned long new_expires, new_renew_after;
4793 ++ u64 new_secret_id;
4794 + int ret;
4795 +- char *dbuf;
4796 +- char *ticket_buf;
4797 +- u8 reply_struct_v;
4798 +
4799 +- dbuf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
4800 +- if (!dbuf)
4801 +- return -ENOMEM;
4802 ++ ceph_decode_need(p, end, sizeof(u32) + 1, bad);
4803 +
4804 +- ret = -ENOMEM;
4805 +- ticket_buf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
4806 +- if (!ticket_buf)
4807 +- goto out_dbuf;
4808 ++ type = ceph_decode_32(p);
4809 ++ dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
4810 +
4811 +- ceph_decode_need(&p, end, 1 + sizeof(u32), bad);
4812 +- reply_struct_v = ceph_decode_8(&p);
4813 +- if (reply_struct_v != 1)
4814 ++ tkt_struct_v = ceph_decode_8(p);
4815 ++ if (tkt_struct_v != 1)
4816 + goto bad;
4817 +- num = ceph_decode_32(&p);
4818 +- dout("%d tickets\n", num);
4819 +- while (num--) {
4820 +- int type;
4821 +- u8 tkt_struct_v, blob_struct_v;
4822 +- struct ceph_x_ticket_handler *th;
4823 +- void *dp, *dend;
4824 +- int dlen;
4825 +- char is_enc;
4826 +- struct timespec validity;
4827 +- struct ceph_crypto_key old_key;
4828 +- void *tp, *tpend;
4829 +- struct ceph_timespec new_validity;
4830 +- struct ceph_crypto_key new_session_key;
4831 +- struct ceph_buffer *new_ticket_blob;
4832 +- unsigned long new_expires, new_renew_after;
4833 +- u64 new_secret_id;
4834 +-
4835 +- ceph_decode_need(&p, end, sizeof(u32) + 1, bad);
4836 +-
4837 +- type = ceph_decode_32(&p);
4838 +- dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
4839 +-
4840 +- tkt_struct_v = ceph_decode_8(&p);
4841 +- if (tkt_struct_v != 1)
4842 +- goto bad;
4843 +-
4844 +- th = get_ticket_handler(ac, type);
4845 +- if (IS_ERR(th)) {
4846 +- ret = PTR_ERR(th);
4847 +- goto out;
4848 +- }
4849 +
4850 +- /* blob for me */
4851 +- dlen = ceph_x_decrypt(secret, &p, end, dbuf,
4852 +- TEMP_TICKET_BUF_LEN);
4853 +- if (dlen <= 0) {
4854 +- ret = dlen;
4855 +- goto out;
4856 +- }
4857 +- dout(" decrypted %d bytes\n", dlen);
4858 +- dend = dbuf + dlen;
4859 +- dp = dbuf;
4860 ++ th = get_ticket_handler(ac, type);
4861 ++ if (IS_ERR(th)) {
4862 ++ ret = PTR_ERR(th);
4863 ++ goto out;
4864 ++ }
4865 +
4866 +- tkt_struct_v = ceph_decode_8(&dp);
4867 +- if (tkt_struct_v != 1)
4868 +- goto bad;
4869 ++ /* blob for me */
4870 ++ dlen = ceph_x_decrypt(secret, p, end, &dbuf, 0);
4871 ++ if (dlen <= 0) {
4872 ++ ret = dlen;
4873 ++ goto out;
4874 ++ }
4875 ++ dout(" decrypted %d bytes\n", dlen);
4876 ++ dp = dbuf;
4877 ++ dend = dp + dlen;
4878 +
4879 +- memcpy(&old_key, &th->session_key, sizeof(old_key));
4880 +- ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
4881 +- if (ret)
4882 +- goto out;
4883 ++ tkt_struct_v = ceph_decode_8(&dp);
4884 ++ if (tkt_struct_v != 1)
4885 ++ goto bad;
4886 +
4887 +- ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
4888 +- ceph_decode_timespec(&validity, &new_validity);
4889 +- new_expires = get_seconds() + validity.tv_sec;
4890 +- new_renew_after = new_expires - (validity.tv_sec / 4);
4891 +- dout(" expires=%lu renew_after=%lu\n", new_expires,
4892 +- new_renew_after);
4893 ++ memcpy(&old_key, &th->session_key, sizeof(old_key));
4894 ++ ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
4895 ++ if (ret)
4896 ++ goto out;
4897 +
4898 +- /* ticket blob for service */
4899 +- ceph_decode_8_safe(&p, end, is_enc, bad);
4900 +- tp = ticket_buf;
4901 +- if (is_enc) {
4902 +- /* encrypted */
4903 +- dout(" encrypted ticket\n");
4904 +- dlen = ceph_x_decrypt(&old_key, &p, end, ticket_buf,
4905 +- TEMP_TICKET_BUF_LEN);
4906 +- if (dlen < 0) {
4907 +- ret = dlen;
4908 +- goto out;
4909 +- }
4910 +- dlen = ceph_decode_32(&tp);
4911 +- } else {
4912 +- /* unencrypted */
4913 +- ceph_decode_32_safe(&p, end, dlen, bad);
4914 +- ceph_decode_need(&p, end, dlen, bad);
4915 +- ceph_decode_copy(&p, ticket_buf, dlen);
4916 ++ ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
4917 ++ ceph_decode_timespec(&validity, &new_validity);
4918 ++ new_expires = get_seconds() + validity.tv_sec;
4919 ++ new_renew_after = new_expires - (validity.tv_sec / 4);
4920 ++ dout(" expires=%lu renew_after=%lu\n", new_expires,
4921 ++ new_renew_after);
4922 ++
4923 ++ /* ticket blob for service */
4924 ++ ceph_decode_8_safe(p, end, is_enc, bad);
4925 ++ if (is_enc) {
4926 ++ /* encrypted */
4927 ++ dout(" encrypted ticket\n");
4928 ++ dlen = ceph_x_decrypt(&old_key, p, end, &ticket_buf, 0);
4929 ++ if (dlen < 0) {
4930 ++ ret = dlen;
4931 ++ goto out;
4932 + }
4933 +- tpend = tp + dlen;
4934 +- dout(" ticket blob is %d bytes\n", dlen);
4935 +- ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
4936 +- blob_struct_v = ceph_decode_8(&tp);
4937 +- new_secret_id = ceph_decode_64(&tp);
4938 +- ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
4939 +- if (ret)
4940 ++ tp = ticket_buf;
4941 ++ dlen = ceph_decode_32(&tp);
4942 ++ } else {
4943 ++ /* unencrypted */
4944 ++ ceph_decode_32_safe(p, end, dlen, bad);
4945 ++ ticket_buf = kmalloc(dlen, GFP_NOFS);
4946 ++ if (!ticket_buf) {
4947 ++ ret = -ENOMEM;
4948 + goto out;
4949 +-
4950 +- /* all is well, update our ticket */
4951 +- ceph_crypto_key_destroy(&th->session_key);
4952 +- if (th->ticket_blob)
4953 +- ceph_buffer_put(th->ticket_blob);
4954 +- th->session_key = new_session_key;
4955 +- th->ticket_blob = new_ticket_blob;
4956 +- th->validity = new_validity;
4957 +- th->secret_id = new_secret_id;
4958 +- th->expires = new_expires;
4959 +- th->renew_after = new_renew_after;
4960 +- dout(" got ticket service %d (%s) secret_id %lld len %d\n",
4961 +- type, ceph_entity_type_name(type), th->secret_id,
4962 +- (int)th->ticket_blob->vec.iov_len);
4963 +- xi->have_keys |= th->service;
4964 ++ }
4965 ++ tp = ticket_buf;
4966 ++ ceph_decode_need(p, end, dlen, bad);
4967 ++ ceph_decode_copy(p, ticket_buf, dlen);
4968 + }
4969 ++ tpend = tp + dlen;
4970 ++ dout(" ticket blob is %d bytes\n", dlen);
4971 ++ ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
4972 ++ blob_struct_v = ceph_decode_8(&tp);
4973 ++ new_secret_id = ceph_decode_64(&tp);
4974 ++ ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
4975 ++ if (ret)
4976 ++ goto out;
4977 ++
4978 ++ /* all is well, update our ticket */
4979 ++ ceph_crypto_key_destroy(&th->session_key);
4980 ++ if (th->ticket_blob)
4981 ++ ceph_buffer_put(th->ticket_blob);
4982 ++ th->session_key = new_session_key;
4983 ++ th->ticket_blob = new_ticket_blob;
4984 ++ th->validity = new_validity;
4985 ++ th->secret_id = new_secret_id;
4986 ++ th->expires = new_expires;
4987 ++ th->renew_after = new_renew_after;
4988 ++ dout(" got ticket service %d (%s) secret_id %lld len %d\n",
4989 ++ type, ceph_entity_type_name(type), th->secret_id,
4990 ++ (int)th->ticket_blob->vec.iov_len);
4991 ++ xi->have_keys |= th->service;
4992 +
4993 +- ret = 0;
4994 + out:
4995 + kfree(ticket_buf);
4996 +-out_dbuf:
4997 + kfree(dbuf);
4998 + return ret;
4999 +
5000 +@@ -270,6 +255,34 @@ bad:
5001 + goto out;
5002 + }
5003 +
5004 ++static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
5005 ++ struct ceph_crypto_key *secret,
5006 ++ void *buf, void *end)
5007 ++{
5008 ++ void *p = buf;
5009 ++ u8 reply_struct_v;
5010 ++ u32 num;
5011 ++ int ret;
5012 ++
5013 ++ ceph_decode_8_safe(&p, end, reply_struct_v, bad);
5014 ++ if (reply_struct_v != 1)
5015 ++ return -EINVAL;
5016 ++
5017 ++ ceph_decode_32_safe(&p, end, num, bad);
5018 ++ dout("%d tickets\n", num);
5019 ++
5020 ++ while (num--) {
5021 ++ ret = process_one_ticket(ac, secret, &p, end);
5022 ++ if (ret)
5023 ++ return ret;
5024 ++ }
5025 ++
5026 ++ return 0;
5027 ++
5028 ++bad:
5029 ++ return -EINVAL;
5030 ++}
5031 ++
5032 + static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
5033 + struct ceph_x_ticket_handler *th,
5034 + struct ceph_x_authorizer *au)
5035 +@@ -583,13 +596,14 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
5036 + struct ceph_x_ticket_handler *th;
5037 + int ret = 0;
5038 + struct ceph_x_authorize_reply reply;
5039 ++ void *preply = &reply;
5040 + void *p = au->reply_buf;
5041 + void *end = p + sizeof(au->reply_buf);
5042 +
5043 + th = get_ticket_handler(ac, au->service);
5044 + if (IS_ERR(th))
5045 + return PTR_ERR(th);
5046 +- ret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply));
5047 ++ ret = ceph_x_decrypt(&th->session_key, &p, end, &preply, sizeof(reply));
5048 + if (ret < 0)
5049 + return ret;
5050 + if (ret != sizeof(reply))
5051 +diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
5052 +index ce83d07eb419..94e21b9b1c87 100644
5053 +--- a/net/ceph/messenger.c
5054 ++++ b/net/ceph/messenger.c
5055 +@@ -904,7 +904,7 @@ static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor,
5056 + BUG_ON(page_count > (int)USHRT_MAX);
5057 + cursor->page_count = (unsigned short)page_count;
5058 + BUG_ON(length > SIZE_MAX - cursor->page_offset);
5059 +- cursor->last_piece = (size_t)cursor->page_offset + length <= PAGE_SIZE;
5060 ++ cursor->last_piece = cursor->page_offset + cursor->resid <= PAGE_SIZE;
5061 + }
5062 +
5063 + static struct page *
5064 +diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
5065 +index 2ac9ef35110b..dbcbf5a4707f 100644
5066 +--- a/net/ceph/mon_client.c
5067 ++++ b/net/ceph/mon_client.c
5068 +@@ -1041,7 +1041,15 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
5069 + if (!m) {
5070 + pr_info("alloc_msg unknown type %d\n", type);
5071 + *skip = 1;
5072 ++ } else if (front_len > m->front_alloc_len) {
5073 ++ pr_warning("mon_alloc_msg front %d > prealloc %d (%u#%llu)\n",
5074 ++ front_len, m->front_alloc_len,
5075 ++ (unsigned int)con->peer_name.type,
5076 ++ le64_to_cpu(con->peer_name.num));
5077 ++ ceph_msg_put(m);
5078 ++ m = ceph_msg_new(type, front_len, GFP_NOFS, false);
5079 + }
5080 ++
5081 + return m;
5082 + }
5083 +
5084 +diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
5085 +index 6ac0f1c3fc28..8c6e9c75c525 100644
5086 +--- a/net/sunrpc/svcsock.c
5087 ++++ b/net/sunrpc/svcsock.c
5088 +@@ -683,6 +683,7 @@ static struct svc_xprt_class svc_udp_class = {
5089 + .xcl_owner = THIS_MODULE,
5090 + .xcl_ops = &svc_udp_ops,
5091 + .xcl_max_payload = RPCSVC_MAXPAYLOAD_UDP,
5092 ++ .xcl_ident = XPRT_TRANSPORT_UDP,
5093 + };
5094 +
5095 + static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
5096 +@@ -1277,6 +1278,7 @@ static struct svc_xprt_class svc_tcp_class = {
5097 + .xcl_owner = THIS_MODULE,
5098 + .xcl_ops = &svc_tcp_ops,
5099 + .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
5100 ++ .xcl_ident = XPRT_TRANSPORT_TCP,
5101 + };
5102 +
5103 + void svc_init_xprt_sock(void)
5104 +diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
5105 +index 095363eee764..42ce6bfc729d 100644
5106 +--- a/net/sunrpc/xprt.c
5107 ++++ b/net/sunrpc/xprt.c
5108 +@@ -1290,7 +1290,7 @@ struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
5109 + }
5110 + }
5111 + spin_unlock(&xprt_list_lock);
5112 +- printk(KERN_ERR "RPC: transport (%d) not supported\n", args->ident);
5113 ++ dprintk("RPC: transport (%d) not supported\n", args->ident);
5114 + return ERR_PTR(-EIO);
5115 +
5116 + found:
5117 +diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
5118 +index 62e4f9bcc387..ed36cb52cd86 100644
5119 +--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
5120 ++++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
5121 +@@ -89,6 +89,7 @@ struct svc_xprt_class svc_rdma_class = {
5122 + .xcl_owner = THIS_MODULE,
5123 + .xcl_ops = &svc_rdma_ops,
5124 + .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
5125 ++ .xcl_ident = XPRT_TRANSPORT_RDMA,
5126 + };
5127 +
5128 + struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
5129 +diff --git a/security/commoncap.c b/security/commoncap.c
5130 +index b9d613e0ef14..963dc5981661 100644
5131 +--- a/security/commoncap.c
5132 ++++ b/security/commoncap.c
5133 +@@ -421,6 +421,9 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
5134 + cpu_caps->inheritable.cap[i] = le32_to_cpu(caps.data[i].inheritable);
5135 + }
5136 +
5137 ++ cpu_caps->permitted.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
5138 ++ cpu_caps->inheritable.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
5139 ++
5140 + return 0;
5141 + }
5142 +
5143 +diff --git a/sound/soc/blackfin/bf5xx-i2s-pcm.c b/sound/soc/blackfin/bf5xx-i2s-pcm.c
5144 +index 9cb4a80df98e..bc9983d38ff3 100644
5145 +--- a/sound/soc/blackfin/bf5xx-i2s-pcm.c
5146 ++++ b/sound/soc/blackfin/bf5xx-i2s-pcm.c
5147 +@@ -293,19 +293,19 @@ static int bf5xx_pcm_silence(struct snd_pcm_substream *substream,
5148 + unsigned int sample_size = runtime->sample_bits / 8;
5149 + void *buf = runtime->dma_area;
5150 + struct bf5xx_i2s_pcm_data *dma_data;
5151 +- unsigned int offset, size;
5152 ++ unsigned int offset, samples;
5153 +
5154 + dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
5155 +
5156 + if (dma_data->tdm_mode) {
5157 + offset = pos * 8 * sample_size;
5158 +- size = count * 8 * sample_size;
5159 ++ samples = count * 8;
5160 + } else {
5161 + offset = frames_to_bytes(runtime, pos);
5162 +- size = frames_to_bytes(runtime, count);
5163 ++ samples = count * runtime->channels;
5164 + }
5165 +
5166 +- snd_pcm_format_set_silence(runtime->format, buf + offset, size);
5167 ++ snd_pcm_format_set_silence(runtime->format, buf + offset, samples);
5168 +
5169 + return 0;
5170 + }
5171 +diff --git a/sound/soc/codecs/adau1701.c b/sound/soc/codecs/adau1701.c
5172 +index adee866f463f..56bfc679f437 100644
5173 +--- a/sound/soc/codecs/adau1701.c
5174 ++++ b/sound/soc/codecs/adau1701.c
5175 +@@ -230,8 +230,10 @@ static int adau1701_reg_read(void *context, unsigned int reg,
5176 +
5177 + *value = 0;
5178 +
5179 +- for (i = 0; i < size; i++)
5180 +- *value |= recv_buf[i] << (i * 8);
5181 ++ for (i = 0; i < size; i++) {
5182 ++ *value <<= 8;
5183 ++ *value |= recv_buf[i];
5184 ++ }
5185 +
5186 + return 0;
5187 + }
5188 +diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
5189 +index 9ad8f019adcd..764d0ea42e7c 100644
5190 +--- a/sound/soc/codecs/max98090.c
5191 ++++ b/sound/soc/codecs/max98090.c
5192 +@@ -2250,7 +2250,7 @@ static int max98090_probe(struct snd_soc_codec *codec)
5193 + /* Register for interrupts */
5194 + dev_dbg(codec->dev, "irq = %d\n", max98090->irq);
5195 +
5196 +- ret = request_threaded_irq(max98090->irq, NULL,
5197 ++ ret = devm_request_threaded_irq(codec->dev, max98090->irq, NULL,
5198 + max98090_interrupt, IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
5199 + "max98090_interrupt", codec);
5200 + if (ret < 0) {
5201 +diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
5202 +index c26a8f814b18..aa5253a3548e 100644
5203 +--- a/sound/soc/codecs/rt5640.c
5204 ++++ b/sound/soc/codecs/rt5640.c
5205 +@@ -2061,6 +2061,7 @@ static struct snd_soc_codec_driver soc_codec_dev_rt5640 = {
5206 + static const struct regmap_config rt5640_regmap = {
5207 + .reg_bits = 8,
5208 + .val_bits = 16,
5209 ++ .use_single_rw = true,
5210 +
5211 + .max_register = RT5640_VENDOR_ID2 + 1 + (ARRAY_SIZE(rt5640_ranges) *
5212 + RT5640_PR_SPACING),
5213 +diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
5214 +index 86426a117b07..c9ce9772e49b 100644
5215 +--- a/sound/soc/codecs/wm8994.c
5216 ++++ b/sound/soc/codecs/wm8994.c
5217 +@@ -3492,6 +3492,7 @@ static irqreturn_t wm8994_mic_irq(int irq, void *data)
5218 + return IRQ_HANDLED;
5219 + }
5220 +
5221 ++/* Should be called with accdet_lock held */
5222 + static void wm1811_micd_stop(struct snd_soc_codec *codec)
5223 + {
5224 + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
5225 +@@ -3499,14 +3500,10 @@ static void wm1811_micd_stop(struct snd_soc_codec *codec)
5226 + if (!wm8994->jackdet)
5227 + return;
5228 +
5229 +- mutex_lock(&wm8994->accdet_lock);
5230 +-
5231 + snd_soc_update_bits(codec, WM8958_MIC_DETECT_1, WM8958_MICD_ENA, 0);
5232 +
5233 + wm1811_jackdet_set_mode(codec, WM1811_JACKDET_MODE_JACK);
5234 +
5235 +- mutex_unlock(&wm8994->accdet_lock);
5236 +-
5237 + if (wm8994->wm8994->pdata.jd_ext_cap)
5238 + snd_soc_dapm_disable_pin(&codec->dapm,
5239 + "MICBIAS2");
5240 +@@ -3547,10 +3544,10 @@ static void wm8958_open_circuit_work(struct work_struct *work)
5241 + open_circuit_work.work);
5242 + struct device *dev = wm8994->wm8994->dev;
5243 +
5244 +- wm1811_micd_stop(wm8994->hubs.codec);
5245 +-
5246 + mutex_lock(&wm8994->accdet_lock);
5247 +
5248 ++ wm1811_micd_stop(wm8994->hubs.codec);
5249 ++
5250 + dev_dbg(dev, "Reporting open circuit\n");
5251 +
5252 + wm8994->jack_mic = false;
5253 +diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
5254 +index 0d5de6003849..61e871bf63dd 100644
5255 +--- a/sound/soc/codecs/wm_adsp.c
5256 ++++ b/sound/soc/codecs/wm_adsp.c
5257 +@@ -1694,3 +1694,5 @@ int wm_adsp2_init(struct wm_adsp *adsp, bool dvfs)
5258 + return 0;
5259 + }
5260 + EXPORT_SYMBOL_GPL(wm_adsp2_init);
5261 ++
5262 ++MODULE_LICENSE("GPL v2");
5263 +diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
5264 +index a3119a00d8fa..6c6b35e471c8 100644
5265 +--- a/sound/soc/pxa/pxa-ssp.c
5266 ++++ b/sound/soc/pxa/pxa-ssp.c
5267 +@@ -725,7 +725,8 @@ static int pxa_ssp_probe(struct snd_soc_dai *dai)
5268 + ssp_handle = of_parse_phandle(dev->of_node, "port", 0);
5269 + if (!ssp_handle) {
5270 + dev_err(dev, "unable to get 'port' phandle\n");
5271 +- return -ENODEV;
5272 ++ ret = -ENODEV;
5273 ++ goto err_priv;
5274 + }
5275 +
5276 + priv->ssp = pxa_ssp_request_of(ssp_handle, "SoC audio");
5277 +@@ -766,9 +767,7 @@ static int pxa_ssp_remove(struct snd_soc_dai *dai)
5278 + SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 | \
5279 + SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
5280 +
5281 +-#define PXA_SSP_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
5282 +- SNDRV_PCM_FMTBIT_S24_LE | \
5283 +- SNDRV_PCM_FMTBIT_S32_LE)
5284 ++#define PXA_SSP_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE)
5285 +
5286 + static const struct snd_soc_dai_ops pxa_ssp_dai_ops = {
5287 + .startup = pxa_ssp_startup,
5288 +diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
5289 +index b302f3b7a587..2ac8d88fe7eb 100644
5290 +--- a/sound/soc/samsung/i2s.c
5291 ++++ b/sound/soc/samsung/i2s.c
5292 +@@ -922,11 +922,9 @@ static int i2s_suspend(struct snd_soc_dai *dai)
5293 + {
5294 + struct i2s_dai *i2s = to_info(dai);
5295 +
5296 +- if (dai->active) {
5297 +- i2s->suspend_i2smod = readl(i2s->addr + I2SMOD);
5298 +- i2s->suspend_i2scon = readl(i2s->addr + I2SCON);
5299 +- i2s->suspend_i2spsr = readl(i2s->addr + I2SPSR);
5300 +- }
5301 ++ i2s->suspend_i2smod = readl(i2s->addr + I2SMOD);
5302 ++ i2s->suspend_i2scon = readl(i2s->addr + I2SCON);
5303 ++ i2s->suspend_i2spsr = readl(i2s->addr + I2SPSR);
5304 +
5305 + return 0;
5306 + }
5307 +@@ -935,11 +933,9 @@ static int i2s_resume(struct snd_soc_dai *dai)
5308 + {
5309 + struct i2s_dai *i2s = to_info(dai);
5310 +
5311 +- if (dai->active) {
5312 +- writel(i2s->suspend_i2scon, i2s->addr + I2SCON);
5313 +- writel(i2s->suspend_i2smod, i2s->addr + I2SMOD);
5314 +- writel(i2s->suspend_i2spsr, i2s->addr + I2SPSR);
5315 +- }
5316 ++ writel(i2s->suspend_i2scon, i2s->addr + I2SCON);
5317 ++ writel(i2s->suspend_i2smod, i2s->addr + I2SMOD);
5318 ++ writel(i2s->suspend_i2spsr, i2s->addr + I2SPSR);
5319 +
5320 + return 0;
5321 + }
5322 +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
5323 +index 330c9a6b5cb5..875cae86d708 100644
5324 +--- a/sound/soc/soc-pcm.c
5325 ++++ b/sound/soc/soc-pcm.c
5326 +@@ -1882,6 +1882,7 @@ int soc_dpcm_runtime_update(struct snd_soc_card *card)
5327 + dpcm_be_disconnect(fe, SNDRV_PCM_STREAM_PLAYBACK);
5328 + }
5329 +
5330 ++ dpcm_path_put(&list);
5331 + capture:
5332 + /* skip if FE doesn't have capture capability */
5333 + if (!fe->cpu_dai->driver->capture.channels_min)
5334 +diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
5335 +index 9f3eae290900..2d9ab9417289 100644
5336 +--- a/tools/testing/selftests/Makefile
5337 ++++ b/tools/testing/selftests/Makefile
5338 +@@ -4,6 +4,7 @@ TARGETS += efivarfs
5339 + TARGETS += kcmp
5340 + TARGETS += memory-hotplug
5341 + TARGETS += mqueue
5342 ++TARGETS += mount
5343 + TARGETS += net
5344 + TARGETS += ptrace
5345 + TARGETS += timers
5346 +diff --git a/tools/testing/selftests/mount/Makefile b/tools/testing/selftests/mount/Makefile
5347 +new file mode 100644
5348 +index 000000000000..337d853c2b72
5349 +--- /dev/null
5350 ++++ b/tools/testing/selftests/mount/Makefile
5351 +@@ -0,0 +1,17 @@
5352 ++# Makefile for mount selftests.
5353 ++
5354 ++all: unprivileged-remount-test
5355 ++
5356 ++unprivileged-remount-test: unprivileged-remount-test.c
5357 ++ gcc -Wall -O2 unprivileged-remount-test.c -o unprivileged-remount-test
5358 ++
5359 ++# Allow specific tests to be selected.
5360 ++test_unprivileged_remount: unprivileged-remount-test
5361 ++ @if [ -f /proc/self/uid_map ] ; then ./unprivileged-remount-test ; fi
5362 ++
5363 ++run_tests: all test_unprivileged_remount
5364 ++
5365 ++clean:
5366 ++ rm -f unprivileged-remount-test
5367 ++
5368 ++.PHONY: all test_unprivileged_remount
5369 +diff --git a/tools/testing/selftests/mount/unprivileged-remount-test.c b/tools/testing/selftests/mount/unprivileged-remount-test.c
5370 +new file mode 100644
5371 +index 000000000000..1b3ff2fda4d0
5372 +--- /dev/null
5373 ++++ b/tools/testing/selftests/mount/unprivileged-remount-test.c
5374 +@@ -0,0 +1,242 @@
5375 ++#define _GNU_SOURCE
5376 ++#include <sched.h>
5377 ++#include <stdio.h>
5378 ++#include <errno.h>
5379 ++#include <string.h>
5380 ++#include <sys/types.h>
5381 ++#include <sys/mount.h>
5382 ++#include <sys/wait.h>
5383 ++#include <stdlib.h>
5384 ++#include <unistd.h>
5385 ++#include <fcntl.h>
5386 ++#include <grp.h>
5387 ++#include <stdbool.h>
5388 ++#include <stdarg.h>
5389 ++
5390 ++#ifndef CLONE_NEWNS
5391 ++# define CLONE_NEWNS 0x00020000
5392 ++#endif
5393 ++#ifndef CLONE_NEWUTS
5394 ++# define CLONE_NEWUTS 0x04000000
5395 ++#endif
5396 ++#ifndef CLONE_NEWIPC
5397 ++# define CLONE_NEWIPC 0x08000000
5398 ++#endif
5399 ++#ifndef CLONE_NEWNET
5400 ++# define CLONE_NEWNET 0x40000000
5401 ++#endif
5402 ++#ifndef CLONE_NEWUSER
5403 ++# define CLONE_NEWUSER 0x10000000
5404 ++#endif
5405 ++#ifndef CLONE_NEWPID
5406 ++# define CLONE_NEWPID 0x20000000
5407 ++#endif
5408 ++
5409 ++#ifndef MS_RELATIME
5410 ++#define MS_RELATIME (1 << 21)
5411 ++#endif
5412 ++#ifndef MS_STRICTATIME
5413 ++#define MS_STRICTATIME (1 << 24)
5414 ++#endif
5415 ++
5416 ++static void die(char *fmt, ...)
5417 ++{
5418 ++ va_list ap;
5419 ++ va_start(ap, fmt);
5420 ++ vfprintf(stderr, fmt, ap);
5421 ++ va_end(ap);
5422 ++ exit(EXIT_FAILURE);
5423 ++}
5424 ++
5425 ++static void write_file(char *filename, char *fmt, ...)
5426 ++{
5427 ++ char buf[4096];
5428 ++ int fd;
5429 ++ ssize_t written;
5430 ++ int buf_len;
5431 ++ va_list ap;
5432 ++
5433 ++ va_start(ap, fmt);
5434 ++ buf_len = vsnprintf(buf, sizeof(buf), fmt, ap);
5435 ++ va_end(ap);
5436 ++ if (buf_len < 0) {
5437 ++ die("vsnprintf failed: %s\n",
5438 ++ strerror(errno));
5439 ++ }
5440 ++ if (buf_len >= sizeof(buf)) {
5441 ++ die("vsnprintf output truncated\n");
5442 ++ }
5443 ++
5444 ++ fd = open(filename, O_WRONLY);
5445 ++ if (fd < 0) {
5446 ++ die("open of %s failed: %s\n",
5447 ++ filename, strerror(errno));
5448 ++ }
5449 ++ written = write(fd, buf, buf_len);
5450 ++ if (written != buf_len) {
5451 ++ if (written >= 0) {
5452 ++ die("short write to %s\n", filename);
5453 ++ } else {
5454 ++ die("write to %s failed: %s\n",
5455 ++ filename, strerror(errno));
5456 ++ }
5457 ++ }
5458 ++ if (close(fd) != 0) {
5459 ++ die("close of %s failed: %s\n",
5460 ++ filename, strerror(errno));
5461 ++ }
5462 ++}
5463 ++
5464 ++static void create_and_enter_userns(void)
5465 ++{
5466 ++ uid_t uid;
5467 ++ gid_t gid;
5468 ++
5469 ++ uid = getuid();
5470 ++ gid = getgid();
5471 ++
5472 ++ if (unshare(CLONE_NEWUSER) !=0) {
5473 ++ die("unshare(CLONE_NEWUSER) failed: %s\n",
5474 ++ strerror(errno));
5475 ++ }
5476 ++
5477 ++ write_file("/proc/self/uid_map", "0 %d 1", uid);
5478 ++ write_file("/proc/self/gid_map", "0 %d 1", gid);
5479 ++
5480 ++ if (setgroups(0, NULL) != 0) {
5481 ++ die("setgroups failed: %s\n",
5482 ++ strerror(errno));
5483 ++ }
5484 ++ if (setgid(0) != 0) {
5485 ++ die ("setgid(0) failed %s\n",
5486 ++ strerror(errno));
5487 ++ }
5488 ++ if (setuid(0) != 0) {
5489 ++ die("setuid(0) failed %s\n",
5490 ++ strerror(errno));
5491 ++ }
5492 ++}
5493 ++
5494 ++static
5495 ++bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags)
5496 ++{
5497 ++ pid_t child;
5498 ++
5499 ++ child = fork();
5500 ++ if (child == -1) {
5501 ++ die("fork failed: %s\n",
5502 ++ strerror(errno));
5503 ++ }
5504 ++ if (child != 0) { /* parent */
5505 ++ pid_t pid;
5506 ++ int status;
5507 ++ pid = waitpid(child, &status, 0);
5508 ++ if (pid == -1) {
5509 ++ die("waitpid failed: %s\n",
5510 ++ strerror(errno));
5511 ++ }
5512 ++ if (pid != child) {
5513 ++ die("waited for %d got %d\n",
5514 ++ child, pid);
5515 ++ }
5516 ++ if (!WIFEXITED(status)) {
5517 ++ die("child did not terminate cleanly\n");
5518 ++ }
5519 ++ return WEXITSTATUS(status) == EXIT_SUCCESS ? true : false;
5520 ++ }
5521 ++
5522 ++ create_and_enter_userns();
5523 ++ if (unshare(CLONE_NEWNS) != 0) {
5524 ++ die("unshare(CLONE_NEWNS) failed: %s\n",
5525 ++ strerror(errno));
5526 ++ }
5527 ++
5528 ++ if (mount("testing", "/tmp", "ramfs", mount_flags, NULL) != 0) {
5529 ++ die("mount of /tmp failed: %s\n",
5530 ++ strerror(errno));
5531 ++ }
5532 ++
5533 ++ create_and_enter_userns();
5534 ++
5535 ++ if (unshare(CLONE_NEWNS) != 0) {
5536 ++ die("unshare(CLONE_NEWNS) failed: %s\n",
5537 ++ strerror(errno));
5538 ++ }
5539 ++
5540 ++ if (mount("/tmp", "/tmp", "none",
5541 ++ MS_REMOUNT | MS_BIND | remount_flags, NULL) != 0) {
5542 ++ /* system("cat /proc/self/mounts"); */
5543 ++ die("remount of /tmp failed: %s\n",
5544 ++ strerror(errno));
5545 ++ }
5546 ++
5547 ++ if (mount("/tmp", "/tmp", "none",
5548 ++ MS_REMOUNT | MS_BIND | invalid_flags, NULL) == 0) {
5549 ++ /* system("cat /proc/self/mounts"); */
5550 ++ die("remount of /tmp with invalid flags "
5551 ++ "succeeded unexpectedly\n");
5552 ++ }
5553 ++ exit(EXIT_SUCCESS);
5554 ++}
5555 ++
5556 ++static bool test_unpriv_remount_simple(int mount_flags)
5557 ++{
5558 ++ return test_unpriv_remount(mount_flags, mount_flags, 0);
5559 ++}
5560 ++
5561 ++static bool test_unpriv_remount_atime(int mount_flags, int invalid_flags)
5562 ++{
5563 ++ return test_unpriv_remount(mount_flags, mount_flags, invalid_flags);
5564 ++}
5565 ++
5566 ++int main(int argc, char **argv)
5567 ++{
5568 ++ if (!test_unpriv_remount_simple(MS_RDONLY|MS_NODEV)) {
5569 ++ die("MS_RDONLY malfunctions\n");
5570 ++ }
5571 ++ if (!test_unpriv_remount_simple(MS_NODEV)) {
5572 ++ die("MS_NODEV malfunctions\n");
5573 ++ }
5574 ++ if (!test_unpriv_remount_simple(MS_NOSUID|MS_NODEV)) {
5575 ++ die("MS_NOSUID malfunctions\n");
5576 ++ }
5577 ++ if (!test_unpriv_remount_simple(MS_NOEXEC|MS_NODEV)) {
5578 ++ die("MS_NOEXEC malfunctions\n");
5579 ++ }
5580 ++ if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODEV,
5581 ++ MS_NOATIME|MS_NODEV))
5582 ++ {
5583 ++ die("MS_RELATIME malfunctions\n");
5584 ++ }
5585 ++ if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODEV,
5586 ++ MS_NOATIME|MS_NODEV))
5587 ++ {
5588 ++ die("MS_STRICTATIME malfunctions\n");
5589 ++ }
5590 ++ if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODEV,
5591 ++ MS_STRICTATIME|MS_NODEV))
5592 ++ {
5593 ++ die("MS_RELATIME malfunctions\n");
5594 ++ }
5595 ++ if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODIRATIME|MS_NODEV,
5596 ++ MS_NOATIME|MS_NODEV))
5597 ++ {
5598 ++ die("MS_RELATIME malfunctions\n");
5599 ++ }
5600 ++ if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODIRATIME|MS_NODEV,
5601 ++ MS_NOATIME|MS_NODEV))
5602 ++ {
5603 ++ die("MS_RELATIME malfunctions\n");
5604 ++ }
5605 ++ if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODIRATIME|MS_NODEV,
5606 ++ MS_STRICTATIME|MS_NODEV))
5607 ++ {
5608 ++ die("MS_RELATIME malfunctions\n");
5609 ++ }
5610 ++ if (!test_unpriv_remount(MS_STRICTATIME|MS_NODEV, MS_NODEV,
5611 ++ MS_NOATIME|MS_NODEV))
5612 ++ {
5613 ++ die("Default atime malfunctions\n");
5614 ++ }
5615 ++ return EXIT_SUCCESS;
5616 ++}