Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:3.10 commit in: /
Date: Sun, 28 Aug 2016 21:54:19
Message-Id: 1472421244.80dd442fa65be5bd488631176a196b7d2fbd3d9d.mpagano@gentoo
1 commit: 80dd442fa65be5bd488631176a196b7d2fbd3d9d
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sun Aug 28 21:54:04 2016 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sun Aug 28 21:54:04 2016 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=80dd442f
7
8 Linux patch 3.10.103
9
10 0000_README | 4 +
11 1102_linux-3.10.103.patch | 6785 +++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 6789 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index ccfaff0..c6ef0e7 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -450,6 +450,10 @@ Patch: 1101_linux-3.10.102.patch
19 From: http://www.kernel.org
20 Desc: Linux 3.10.102
21
22 +Patch: 1102_linux-3.10.103.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 3.10.103
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1102_linux-3.10.103.patch b/1102_linux-3.10.103.patch
31 new file mode 100644
32 index 0000000..a1db16b
33 --- /dev/null
34 +++ b/1102_linux-3.10.103.patch
35 @@ -0,0 +1,6785 @@
36 +diff --git a/Documentation/scsi/scsi_eh.txt b/Documentation/scsi/scsi_eh.txt
37 +index 6ff16b620d84..c08b62d63afa 100644
38 +--- a/Documentation/scsi/scsi_eh.txt
39 ++++ b/Documentation/scsi/scsi_eh.txt
40 +@@ -255,19 +255,23 @@ scmd->allowed.
41 +
42 + 3. scmd recovered
43 + ACTION: scsi_eh_finish_cmd() is invoked to EH-finish scmd
44 +- - shost->host_failed--
45 + - clear scmd->eh_eflags
46 + - scsi_setup_cmd_retry()
47 + - move from local eh_work_q to local eh_done_q
48 + LOCKING: none
49 ++ CONCURRENCY: at most one thread per separate eh_work_q to
50 ++ keep queue manipulation lockless
51 +
52 + 4. EH completes
53 + ACTION: scsi_eh_flush_done_q() retries scmds or notifies upper
54 +- layer of failure.
55 ++ layer of failure. May be called concurrently but must have
56 ++ a no more than one thread per separate eh_work_q to
57 ++ manipulate the queue locklessly
58 + - scmd is removed from eh_done_q and scmd->eh_entry is cleared
59 + - if retry is necessary, scmd is requeued using
60 + scsi_queue_insert()
61 + - otherwise, scsi_finish_command() is invoked for scmd
62 ++ - zero shost->host_failed
63 + LOCKING: queue or finish function performs appropriate locking
64 +
65 +
66 +diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
67 +index 88152f214f48..302b5ed616a6 100644
68 +--- a/Documentation/sysctl/fs.txt
69 ++++ b/Documentation/sysctl/fs.txt
70 +@@ -32,6 +32,8 @@ Currently, these files are in /proc/sys/fs:
71 + - nr_open
72 + - overflowuid
73 + - overflowgid
74 ++- pipe-user-pages-hard
75 ++- pipe-user-pages-soft
76 + - protected_hardlinks
77 + - protected_symlinks
78 + - suid_dumpable
79 +@@ -159,6 +161,27 @@ The default is 65534.
80 +
81 + ==============================================================
82 +
83 ++pipe-user-pages-hard:
84 ++
85 ++Maximum total number of pages a non-privileged user may allocate for pipes.
86 ++Once this limit is reached, no new pipes may be allocated until usage goes
87 ++below the limit again. When set to 0, no limit is applied, which is the default
88 ++setting.
89 ++
90 ++==============================================================
91 ++
92 ++pipe-user-pages-soft:
93 ++
94 ++Maximum total number of pages a non-privileged user may allocate for pipes
95 ++before the pipe size gets limited to a single page. Once this limit is reached,
96 ++new pipes will be limited to a single page in size for this user in order to
97 ++limit total memory usage, and trying to increase them using fcntl() will be
98 ++denied until usage goes below the limit again. The default value allows to
99 ++allocate up to 1024 pipes at their default size. When set to 0, no limit is
100 ++applied.
101 ++
102 ++==============================================================
103 ++
104 + protected_hardlinks:
105 +
106 + A long-standing class of security issues is the hardlink-based
107 +diff --git a/Makefile b/Makefile
108 +index 868093c16ae0..d3cb458b295a 100644
109 +--- a/Makefile
110 ++++ b/Makefile
111 +@@ -1,6 +1,6 @@
112 + VERSION = 3
113 + PATCHLEVEL = 10
114 +-SUBLEVEL = 102
115 ++SUBLEVEL = 103
116 + EXTRAVERSION =
117 + NAME = TOSSUG Baby Fish
118 +
119 +diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
120 +index ca0207b9d5b6..06997ad70725 100644
121 +--- a/arch/arc/kernel/stacktrace.c
122 ++++ b/arch/arc/kernel/stacktrace.c
123 +@@ -131,7 +131,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
124 + * prelogue is setup (callee regs saved and then fp set and not other
125 + * way around
126 + */
127 +- pr_warn("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
128 ++ pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
129 + return 0;
130 +
131 + #endif
132 +diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
133 +index 3357d26ffe54..74691e652a3a 100644
134 +--- a/arch/arc/mm/tlbex.S
135 ++++ b/arch/arc/mm/tlbex.S
136 +@@ -219,7 +219,7 @@ ex_saved_reg1:
137 + #ifdef CONFIG_SMP
138 + sr r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with
139 + GET_CPU_ID r0 ; get to per cpu scratch mem,
140 +- lsl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu
141 ++ asl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu
142 + add r0, @ex_saved_reg1, r0
143 + #else
144 + st r0, [@ex_saved_reg1]
145 +@@ -239,7 +239,7 @@ ex_saved_reg1:
146 + .macro TLBMISS_RESTORE_REGS
147 + #ifdef CONFIG_SMP
148 + GET_CPU_ID r0 ; get to per cpu scratch mem
149 +- lsl r0, r0, L1_CACHE_SHIFT ; each is cache line wide
150 ++ asl r0, r0, L1_CACHE_SHIFT ; each is cache line wide
151 + add r0, @ex_saved_reg1, r0
152 + ld_s r3, [r0,12]
153 + ld_s r2, [r0, 8]
154 +diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
155 +index 03deeffd9f6d..4e2110d48c41 100644
156 +--- a/arch/arm/kernel/ptrace.c
157 ++++ b/arch/arm/kernel/ptrace.c
158 +@@ -733,8 +733,8 @@ static int vfp_set(struct task_struct *target,
159 + if (ret)
160 + return ret;
161 +
162 +- vfp_flush_hwstate(thread);
163 + thread->vfpstate.hard = new_vfp;
164 ++ vfp_flush_hwstate(thread);
165 +
166 + return 0;
167 + }
168 +diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
169 +index 3e94811690ce..a0aee80b608d 100644
170 +--- a/arch/arm/kernel/sys_oabi-compat.c
171 ++++ b/arch/arm/kernel/sys_oabi-compat.c
172 +@@ -275,8 +275,12 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
173 + mm_segment_t fs;
174 + long ret, err, i;
175 +
176 +- if (maxevents <= 0 || maxevents > (INT_MAX/sizeof(struct epoll_event)))
177 ++ if (maxevents <= 0 ||
178 ++ maxevents > (INT_MAX/sizeof(*kbuf)) ||
179 ++ maxevents > (INT_MAX/sizeof(*events)))
180 + return -EINVAL;
181 ++ if (!access_ok(VERIFY_WRITE, events, sizeof(*events) * maxevents))
182 ++ return -EFAULT;
183 + kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL);
184 + if (!kbuf)
185 + return -ENOMEM;
186 +@@ -313,6 +317,8 @@ asmlinkage long sys_oabi_semtimedop(int semid,
187 +
188 + if (nsops < 1 || nsops > SEMOPM)
189 + return -EINVAL;
190 ++ if (!access_ok(VERIFY_READ, tsops, sizeof(*tsops) * nsops))
191 ++ return -EFAULT;
192 + sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL);
193 + if (!sops)
194 + return -ENOMEM;
195 +diff --git a/arch/metag/include/asm/cmpxchg_lnkget.h b/arch/metag/include/asm/cmpxchg_lnkget.h
196 +index 0154e2807ebb..2369ad394876 100644
197 +--- a/arch/metag/include/asm/cmpxchg_lnkget.h
198 ++++ b/arch/metag/include/asm/cmpxchg_lnkget.h
199 +@@ -73,7 +73,7 @@ static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
200 + " DCACHE [%2], %0\n"
201 + #endif
202 + "2:\n"
203 +- : "=&d" (temp), "=&da" (retval)
204 ++ : "=&d" (temp), "=&d" (retval)
205 + : "da" (m), "bd" (old), "da" (new)
206 + : "cc"
207 + );
208 +diff --git a/arch/mips/ath79/early_printk.c b/arch/mips/ath79/early_printk.c
209 +index b955fafc58ba..d1adc59af5bf 100644
210 +--- a/arch/mips/ath79/early_printk.c
211 ++++ b/arch/mips/ath79/early_printk.c
212 +@@ -31,13 +31,15 @@ static inline void prom_putchar_wait(void __iomem *reg, u32 mask, u32 val)
213 + } while (1);
214 + }
215 +
216 ++#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
217 ++
218 + static void prom_putchar_ar71xx(unsigned char ch)
219 + {
220 + void __iomem *base = (void __iomem *)(KSEG1ADDR(AR71XX_UART_BASE));
221 +
222 +- prom_putchar_wait(base + UART_LSR * 4, UART_LSR_THRE, UART_LSR_THRE);
223 ++ prom_putchar_wait(base + UART_LSR * 4, BOTH_EMPTY, BOTH_EMPTY);
224 + __raw_writel(ch, base + UART_TX * 4);
225 +- prom_putchar_wait(base + UART_LSR * 4, UART_LSR_THRE, UART_LSR_THRE);
226 ++ prom_putchar_wait(base + UART_LSR * 4, BOTH_EMPTY, BOTH_EMPTY);
227 + }
228 +
229 + static void prom_putchar_ar933x(unsigned char ch)
230 +diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
231 +index 4d6fa0bf1305..883a162083af 100644
232 +--- a/arch/mips/include/asm/kvm_host.h
233 ++++ b/arch/mips/include/asm/kvm_host.h
234 +@@ -349,6 +349,7 @@ struct kvm_mips_tlb {
235 + #define KVM_MIPS_GUEST_TLB_SIZE 64
236 + struct kvm_vcpu_arch {
237 + void *host_ebase, *guest_ebase;
238 ++ int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
239 + unsigned long host_stack;
240 + unsigned long host_gp;
241 +
242 +diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
243 +index 1470b7b68b0e..a7e71744fe89 100644
244 +--- a/arch/mips/include/asm/processor.h
245 ++++ b/arch/mips/include/asm/processor.h
246 +@@ -51,7 +51,7 @@ extern unsigned int vced_count, vcei_count;
247 + * User space process size: 2GB. This is hardcoded into a few places,
248 + * so don't change it unless you know what you are doing.
249 + */
250 +-#define TASK_SIZE 0x7fff8000UL
251 ++#define TASK_SIZE 0x80000000UL
252 + #endif
253 +
254 + #ifdef __KERNEL__
255 +diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h
256 +index 6a8714193fb9..b5f77f76c899 100644
257 +--- a/arch/mips/include/uapi/asm/siginfo.h
258 ++++ b/arch/mips/include/uapi/asm/siginfo.h
259 +@@ -45,13 +45,13 @@ typedef struct siginfo {
260 +
261 + /* kill() */
262 + struct {
263 +- pid_t _pid; /* sender's pid */
264 ++ __kernel_pid_t _pid; /* sender's pid */
265 + __ARCH_SI_UID_T _uid; /* sender's uid */
266 + } _kill;
267 +
268 + /* POSIX.1b timers */
269 + struct {
270 +- timer_t _tid; /* timer id */
271 ++ __kernel_timer_t _tid; /* timer id */
272 + int _overrun; /* overrun count */
273 + char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)];
274 + sigval_t _sigval; /* same as below */
275 +@@ -60,26 +60,26 @@ typedef struct siginfo {
276 +
277 + /* POSIX.1b signals */
278 + struct {
279 +- pid_t _pid; /* sender's pid */
280 ++ __kernel_pid_t _pid; /* sender's pid */
281 + __ARCH_SI_UID_T _uid; /* sender's uid */
282 + sigval_t _sigval;
283 + } _rt;
284 +
285 + /* SIGCHLD */
286 + struct {
287 +- pid_t _pid; /* which child */
288 ++ __kernel_pid_t _pid; /* which child */
289 + __ARCH_SI_UID_T _uid; /* sender's uid */
290 + int _status; /* exit code */
291 +- clock_t _utime;
292 +- clock_t _stime;
293 ++ __kernel_clock_t _utime;
294 ++ __kernel_clock_t _stime;
295 + } _sigchld;
296 +
297 + /* IRIX SIGCHLD */
298 + struct {
299 +- pid_t _pid; /* which child */
300 +- clock_t _utime;
301 ++ __kernel_pid_t _pid; /* which child */
302 ++ __kernel_clock_t _utime;
303 + int _status; /* exit code */
304 +- clock_t _stime;
305 ++ __kernel_clock_t _stime;
306 + } _irix_sigchld;
307 +
308 + /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
309 +diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
310 +index cab150789c8d..b657fbefc466 100644
311 +--- a/arch/mips/kernel/scall64-n32.S
312 ++++ b/arch/mips/kernel/scall64-n32.S
313 +@@ -349,7 +349,7 @@ EXPORT(sysn32_call_table)
314 + PTR sys_ni_syscall /* available, was setaltroot */
315 + PTR sys_add_key
316 + PTR sys_request_key
317 +- PTR sys_keyctl /* 6245 */
318 ++ PTR compat_sys_keyctl /* 6245 */
319 + PTR sys_set_thread_area
320 + PTR sys_inotify_init
321 + PTR sys_inotify_add_watch
322 +diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
323 +index 37605dc8eef7..bf56d7e271dd 100644
324 +--- a/arch/mips/kernel/scall64-o32.S
325 ++++ b/arch/mips/kernel/scall64-o32.S
326 +@@ -474,7 +474,7 @@ sys_call_table:
327 + PTR sys_ni_syscall /* available, was setaltroot */
328 + PTR sys_add_key /* 4280 */
329 + PTR sys_request_key
330 +- PTR sys_keyctl
331 ++ PTR compat_sys_keyctl
332 + PTR sys_set_thread_area
333 + PTR sys_inotify_init
334 + PTR sys_inotify_add_watch /* 4285 */
335 +diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S
336 +index 34c35f0e3290..73553cd98070 100644
337 +--- a/arch/mips/kvm/kvm_locore.S
338 ++++ b/arch/mips/kvm/kvm_locore.S
339 +@@ -227,6 +227,7 @@ FEXPORT(__kvm_mips_load_k0k1)
340 + /* Jump to guest */
341 + eret
342 + .set pop
343 ++EXPORT(__kvm_mips_vcpu_run_end)
344 +
345 + VECTOR(MIPSX(exception), unknown)
346 + /*
347 +diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
348 +index 8aa5f30d8579..97a181a44e53 100644
349 +--- a/arch/mips/kvm/kvm_mips.c
350 ++++ b/arch/mips/kvm/kvm_mips.c
351 +@@ -343,6 +343,15 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
352 + memcpy(gebase + offset, mips32_GuestException,
353 + mips32_GuestExceptionEnd - mips32_GuestException);
354 +
355 ++#ifdef MODULE
356 ++ offset += mips32_GuestExceptionEnd - mips32_GuestException;
357 ++ memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run,
358 ++ __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run);
359 ++ vcpu->arch.vcpu_run = gebase + offset;
360 ++#else
361 ++ vcpu->arch.vcpu_run = __kvm_mips_vcpu_run;
362 ++#endif
363 ++
364 + /* Invalidate the icache for these ranges */
365 + mips32_SyncICache((unsigned long) gebase, ALIGN(size, PAGE_SIZE));
366 +
367 +@@ -426,7 +435,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
368 +
369 + kvm_guest_enter();
370 +
371 +- r = __kvm_mips_vcpu_run(run, vcpu);
372 ++ r = vcpu->arch.vcpu_run(run, vcpu);
373 +
374 + kvm_guest_exit();
375 + local_irq_enable();
376 +diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
377 +index 33085819cd89..9f7643874fba 100644
378 +--- a/arch/mips/kvm/kvm_mips_emul.c
379 ++++ b/arch/mips/kvm/kvm_mips_emul.c
380 +@@ -972,8 +972,13 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
381 + preempt_disable();
382 + if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
383 +
384 +- if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) {
385 +- kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
386 ++ if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 &&
387 ++ kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) {
388 ++ kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
389 ++ __func__, va, vcpu, read_c0_entryhi());
390 ++ er = EMULATE_FAIL;
391 ++ preempt_enable();
392 ++ goto done;
393 + }
394 + } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
395 + KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
396 +@@ -1006,11 +1011,16 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
397 + run, vcpu);
398 + preempt_enable();
399 + goto dont_update_pc;
400 +- } else {
401 +- /* We fault an entry from the guest tlb to the shadow host TLB */
402 +- kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
403 +- NULL,
404 +- NULL);
405 ++ }
406 ++ /* We fault an entry from the guest tlb to the shadow host TLB */
407 ++ if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
408 ++ NULL, NULL)) {
409 ++ kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
410 ++ __func__, va, index, vcpu,
411 ++ read_c0_entryhi());
412 ++ er = EMULATE_FAIL;
413 ++ preempt_enable();
414 ++ goto done;
415 + }
416 + }
417 + } else {
418 +@@ -1821,8 +1831,13 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
419 + tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
420 + #endif
421 + /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */
422 +- kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
423 +- NULL);
424 ++ if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
425 ++ NULL, NULL)) {
426 ++ kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
427 ++ __func__, va, index, vcpu,
428 ++ read_c0_entryhi());
429 ++ er = EMULATE_FAIL;
430 ++ }
431 + }
432 + }
433 +
434 +diff --git a/arch/mips/kvm/kvm_mips_int.h b/arch/mips/kvm/kvm_mips_int.h
435 +index 20da7d29eede..bf41ea36210e 100644
436 +--- a/arch/mips/kvm/kvm_mips_int.h
437 ++++ b/arch/mips/kvm/kvm_mips_int.h
438 +@@ -27,6 +27,8 @@
439 + #define MIPS_EXC_MAX 12
440 + /* XXXSL More to follow */
441 +
442 ++extern char __kvm_mips_vcpu_run_end[];
443 ++
444 + #define C_TI (_ULCAST_(1) << 30)
445 +
446 + #define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
447 +diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c
448 +index c777dd36d4a8..4bee4397dca8 100644
449 +--- a/arch/mips/kvm/kvm_tlb.c
450 ++++ b/arch/mips/kvm/kvm_tlb.c
451 +@@ -312,7 +312,7 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
452 + }
453 +
454 + gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
455 +- if (gfn >= kvm->arch.guest_pmap_npages) {
456 ++ if ((gfn | 1) >= kvm->arch.guest_pmap_npages) {
457 + kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
458 + gfn, badvaddr);
459 + kvm_mips_dump_host_tlbs();
460 +@@ -397,21 +397,38 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
461 + unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
462 + struct kvm *kvm = vcpu->kvm;
463 + pfn_t pfn0, pfn1;
464 ++ gfn_t gfn0, gfn1;
465 ++ long tlb_lo[2];
466 ++
467 ++ tlb_lo[0] = tlb->tlb_lo0;
468 ++ tlb_lo[1] = tlb->tlb_lo1;
469 ++
470 ++ /*
471 ++ * The commpage address must not be mapped to anything else if the guest
472 ++ * TLB contains entries nearby, or commpage accesses will break.
473 ++ */
474 ++ if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) &
475 ++ VPN2_MASK & (PAGE_MASK << 1)))
476 ++ tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0;
477 ++
478 ++ gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT;
479 ++ gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT;
480 ++ if (gfn0 >= kvm->arch.guest_pmap_npages ||
481 ++ gfn1 >= kvm->arch.guest_pmap_npages) {
482 ++ kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n",
483 ++ __func__, gfn0, gfn1, tlb->tlb_hi);
484 ++ kvm_mips_dump_guest_tlbs(vcpu);
485 ++ return -1;
486 ++ }
487 +
488 ++ if (kvm_mips_map_page(kvm, gfn0) < 0)
489 ++ return -1;
490 +
491 +- if ((tlb->tlb_hi & VPN2_MASK) == 0) {
492 +- pfn0 = 0;
493 +- pfn1 = 0;
494 +- } else {
495 +- if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT) < 0)
496 +- return -1;
497 +-
498 +- if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT) < 0)
499 +- return -1;
500 ++ if (kvm_mips_map_page(kvm, gfn1) < 0)
501 ++ return -1;
502 +
503 +- pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
504 +- pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
505 +- }
506 ++ pfn0 = kvm->arch.guest_pmap[gfn0];
507 ++ pfn1 = kvm->arch.guest_pmap[gfn1];
508 +
509 + if (hpa0)
510 + *hpa0 = pfn0 << PAGE_SHIFT;
511 +@@ -423,9 +440,9 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
512 + entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
513 + kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu));
514 + entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
515 +- (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
516 ++ (tlb_lo[0] & MIPS3_PG_D) | (tlb_lo[0] & MIPS3_PG_V);
517 + entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
518 +- (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
519 ++ (tlb_lo[1] & MIPS3_PG_D) | (tlb_lo[1] & MIPS3_PG_V);
520 +
521 + #ifdef DEBUG
522 + kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
523 +@@ -909,10 +926,16 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
524 + local_irq_restore(flags);
525 + return KVM_INVALID_INST;
526 + }
527 +- kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
528 +- &vcpu->arch.
529 +- guest_tlb[index],
530 +- NULL, NULL);
531 ++ if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
532 ++ &vcpu->arch.guest_tlb[index],
533 ++ NULL, NULL)) {
534 ++ kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n",
535 ++ __func__, opc, index, vcpu,
536 ++ read_c0_entryhi());
537 ++ kvm_mips_dump_guest_tlbs(vcpu);
538 ++ local_irq_restore(flags);
539 ++ return KVM_INVALID_INST;
540 ++ }
541 + inst = *(opc);
542 + }
543 + local_irq_restore(flags);
544 +diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
545 +index f03771900813..3d492a823a55 100644
546 +--- a/arch/mips/math-emu/cp1emu.c
547 ++++ b/arch/mips/math-emu/cp1emu.c
548 +@@ -684,9 +684,11 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
549 + case spec_op:
550 + switch (insn.r_format.func) {
551 + case jalr_op:
552 +- regs->regs[insn.r_format.rd] =
553 +- regs->cp0_epc + dec_insn.pc_inc +
554 +- dec_insn.next_pc_inc;
555 ++ if (insn.r_format.rd != 0) {
556 ++ regs->regs[insn.r_format.rd] =
557 ++ regs->cp0_epc + dec_insn.pc_inc +
558 ++ dec_insn.next_pc_inc;
559 ++ }
560 + /* Fall through */
561 + case jr_op:
562 + *contpc = regs->regs[insn.r_format.rs];
563 +diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
564 +index d7c0acb35ec2..8d49614d600d 100644
565 +--- a/arch/parisc/kernel/unaligned.c
566 ++++ b/arch/parisc/kernel/unaligned.c
567 +@@ -666,7 +666,7 @@ void handle_unaligned(struct pt_regs *regs)
568 + break;
569 + }
570 +
571 +- if (modify && R1(regs->iir))
572 ++ if (ret == 0 && modify && R1(regs->iir))
573 + regs->gr[R1(regs->iir)] = newbase;
574 +
575 +
576 +@@ -677,6 +677,14 @@ void handle_unaligned(struct pt_regs *regs)
577 +
578 + if (ret)
579 + {
580 ++ /*
581 ++ * The unaligned handler failed.
582 ++ * If we were called by __get_user() or __put_user() jump
583 ++ * to it's exception fixup handler instead of crashing.
584 ++ */
585 ++ if (!user_mode(regs) && fixup_exception(regs))
586 ++ return;
587 ++
588 + printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret);
589 + die_if_kernel("Unaligned data reference", regs, 28);
590 +
591 +diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
592 +index 60c31698f7d5..469d7715d6aa 100644
593 +--- a/arch/powerpc/include/asm/reg.h
594 ++++ b/arch/powerpc/include/asm/reg.h
595 +@@ -643,7 +643,7 @@
596 + #define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
597 + #define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
598 + #define SPRN_MMCR1 798
599 +-#define SPRN_MMCR2 769
600 ++#define SPRN_MMCR2 785
601 + #define SPRN_MMCRA 0x312
602 + #define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */
603 + #define MMCRA_SDAR_DCACHE_MISS 0x40000000UL
604 +@@ -677,13 +677,13 @@
605 + #define SPRN_PMC6 792
606 + #define SPRN_PMC7 793
607 + #define SPRN_PMC8 794
608 +-#define SPRN_SIAR 780
609 +-#define SPRN_SDAR 781
610 + #define SPRN_SIER 784
611 + #define SIER_SIPR 0x2000000 /* Sampled MSR_PR */
612 + #define SIER_SIHV 0x1000000 /* Sampled MSR_HV */
613 + #define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */
614 + #define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */
615 ++#define SPRN_SIAR 796
616 ++#define SPRN_SDAR 797
617 +
618 + #define SPRN_PA6T_MMCR0 795
619 + #define PA6T_MMCR0_EN0 0x0000000000000001UL
620 +diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
621 +index 902ca3c6b4b6..3ac1d3a90551 100644
622 +--- a/arch/powerpc/kernel/exceptions-64s.S
623 ++++ b/arch/powerpc/kernel/exceptions-64s.S
624 +@@ -857,11 +857,6 @@ hv_facility_unavailable_relon_trampoline:
625 + #endif
626 + STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist)
627 +
628 +- /* Other future vectors */
629 +- .align 7
630 +- .globl __end_interrupts
631 +-__end_interrupts:
632 +-
633 + .align 7
634 + system_call_entry_direct:
635 + #if defined(CONFIG_RELOCATABLE)
636 +@@ -1191,6 +1186,17 @@ __end_handlers:
637 + STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
638 + STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable)
639 +
640 ++ /*
641 ++ * The __end_interrupts marker must be past the out-of-line (OOL)
642 ++ * handlers, so that they are copied to real address 0x100 when running
643 ++ * a relocatable kernel. This ensures they can be reached from the short
644 ++ * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch
645 ++ * directly, without using LOAD_HANDLER().
646 ++ */
647 ++ .align 7
648 ++ .globl __end_interrupts
649 ++__end_interrupts:
650 ++
651 + #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
652 + /*
653 + * Data area reserved for FWNMI option.
654 +diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
655 +index d55357ee9028..a5e339806589 100644
656 +--- a/arch/powerpc/kernel/process.c
657 ++++ b/arch/powerpc/kernel/process.c
658 +@@ -1088,6 +1088,16 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
659 + current->thread.regs = regs - 1;
660 + }
661 +
662 ++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
663 ++ /*
664 ++ * Clear any transactional state, we're exec()ing. The cause is
665 ++ * not important as there will never be a recheckpoint so it's not
666 ++ * user visible.
667 ++ */
668 ++ if (MSR_TM_SUSPENDED(mfmsr()))
669 ++ tm_reclaim_current(0);
670 ++#endif
671 ++
672 + memset(regs->gpr, 0, sizeof(regs->gpr));
673 + regs->ctr = 0;
674 + regs->link = 0;
675 +diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
676 +index 68f97d5a4679..dc0278e7fd91 100644
677 +--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
678 ++++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
679 +@@ -551,29 +551,50 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
680 + {
681 + int config_addr;
682 + int ret;
683 ++ /* Waiting 0.2s maximum before skipping configuration */
684 ++ int max_wait = 200;
685 +
686 + /* Figure out the PE address */
687 + config_addr = pe->config_addr;
688 + if (pe->addr)
689 + config_addr = pe->addr;
690 +
691 +- /* Use new configure-pe function, if supported */
692 +- if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
693 +- ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
694 +- config_addr, BUID_HI(pe->phb->buid),
695 +- BUID_LO(pe->phb->buid));
696 +- } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
697 +- ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
698 +- config_addr, BUID_HI(pe->phb->buid),
699 +- BUID_LO(pe->phb->buid));
700 +- } else {
701 +- return -EFAULT;
702 +- }
703 ++ while (max_wait > 0) {
704 ++ /* Use new configure-pe function, if supported */
705 ++ if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
706 ++ ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
707 ++ config_addr, BUID_HI(pe->phb->buid),
708 ++ BUID_LO(pe->phb->buid));
709 ++ } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
710 ++ ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
711 ++ config_addr, BUID_HI(pe->phb->buid),
712 ++ BUID_LO(pe->phb->buid));
713 ++ } else {
714 ++ return -EFAULT;
715 ++ }
716 +
717 +- if (ret)
718 +- pr_warning("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
719 +- __func__, pe->phb->global_number, pe->addr, ret);
720 ++ if (!ret)
721 ++ return ret;
722 ++
723 ++ /*
724 ++ * If RTAS returns a delay value that's above 100ms, cut it
725 ++ * down to 100ms in case firmware made a mistake. For more
726 ++ * on how these delay values work see rtas_busy_delay_time
727 ++ */
728 ++ if (ret > RTAS_EXTENDED_DELAY_MIN+2 &&
729 ++ ret <= RTAS_EXTENDED_DELAY_MAX)
730 ++ ret = RTAS_EXTENDED_DELAY_MIN+2;
731 ++
732 ++ max_wait -= rtas_busy_delay_time(ret);
733 ++
734 ++ if (max_wait < 0)
735 ++ break;
736 ++
737 ++ rtas_busy_delay(ret);
738 ++ }
739 +
740 ++ pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
741 ++ __func__, pe->phb->global_number, pe->addr, ret);
742 + return ret;
743 + }
744 +
745 +diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
746 +index 86ae364900d6..401369134ba3 100644
747 +--- a/arch/powerpc/platforms/pseries/iommu.c
748 ++++ b/arch/powerpc/platforms/pseries/iommu.c
749 +@@ -858,7 +858,8 @@ machine_arch_initcall(pseries, find_existing_ddw_windows);
750 + static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
751 + struct ddw_query_response *query)
752 + {
753 +- struct eeh_dev *edev;
754 ++ struct device_node *dn;
755 ++ struct pci_dn *pdn;
756 + u32 cfg_addr;
757 + u64 buid;
758 + int ret;
759 +@@ -869,11 +870,10 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
760 + * Retrieve them from the pci device, not the node with the
761 + * dma-window property
762 + */
763 +- edev = pci_dev_to_eeh_dev(dev);
764 +- cfg_addr = edev->config_addr;
765 +- if (edev->pe_config_addr)
766 +- cfg_addr = edev->pe_config_addr;
767 +- buid = edev->phb->buid;
768 ++ dn = pci_device_to_OF_node(dev);
769 ++ pdn = PCI_DN(dn);
770 ++ buid = pdn->phb->buid;
771 ++ cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
772 +
773 + ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
774 + cfg_addr, BUID_HI(buid), BUID_LO(buid));
775 +@@ -887,7 +887,8 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
776 + struct ddw_create_response *create, int page_shift,
777 + int window_shift)
778 + {
779 +- struct eeh_dev *edev;
780 ++ struct device_node *dn;
781 ++ struct pci_dn *pdn;
782 + u32 cfg_addr;
783 + u64 buid;
784 + int ret;
785 +@@ -898,11 +899,10 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
786 + * Retrieve them from the pci device, not the node with the
787 + * dma-window property
788 + */
789 +- edev = pci_dev_to_eeh_dev(dev);
790 +- cfg_addr = edev->config_addr;
791 +- if (edev->pe_config_addr)
792 +- cfg_addr = edev->pe_config_addr;
793 +- buid = edev->phb->buid;
794 ++ dn = pci_device_to_OF_node(dev);
795 ++ pdn = PCI_DN(dn);
796 ++ buid = pdn->phb->buid;
797 ++ cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
798 +
799 + do {
800 + /* extra outputs are LIOBN and dma-addr (hi, lo) */
801 +diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
802 +index cd29d2f4e4f3..749313b452ae 100644
803 +--- a/arch/s390/include/asm/syscall.h
804 ++++ b/arch/s390/include/asm/syscall.h
805 +@@ -54,7 +54,7 @@ static inline void syscall_set_return_value(struct task_struct *task,
806 + struct pt_regs *regs,
807 + int error, long val)
808 + {
809 +- regs->gprs[2] = error ? -error : val;
810 ++ regs->gprs[2] = error ? error : val;
811 + }
812 +
813 + static inline void syscall_get_arguments(struct task_struct *task,
814 +diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
815 +index 6cf0111783d3..368f3582c93e 100644
816 +--- a/arch/x86/boot/Makefile
817 ++++ b/arch/x86/boot/Makefile
818 +@@ -168,6 +168,9 @@ isoimage: $(obj)/bzImage
819 + for i in lib lib64 share end ; do \
820 + if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
821 + cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
822 ++ if [ -f /usr/$$i/syslinux/ldlinux.c32 ]; then \
823 ++ cp /usr/$$i/syslinux/ldlinux.c32 $(obj)/isoimage ; \
824 ++ fi ; \
825 + break ; \
826 + fi ; \
827 + if [ $$i = end ] ; then exit 1 ; fi ; \
828 +diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
829 +index be12c534fd59..29a3d1b00ca9 100644
830 +--- a/arch/x86/include/asm/mmu_context.h
831 ++++ b/arch/x86/include/asm/mmu_context.h
832 +@@ -42,7 +42,34 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
833 + #endif
834 + cpumask_set_cpu(cpu, mm_cpumask(next));
835 +
836 +- /* Re-load page tables */
837 ++ /*
838 ++ * Re-load page tables.
839 ++ *
840 ++ * This logic has an ordering constraint:
841 ++ *
842 ++ * CPU 0: Write to a PTE for 'next'
843 ++ * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
844 ++ * CPU 1: set bit 1 in next's mm_cpumask
845 ++ * CPU 1: load from the PTE that CPU 0 writes (implicit)
846 ++ *
847 ++ * We need to prevent an outcome in which CPU 1 observes
848 ++ * the new PTE value and CPU 0 observes bit 1 clear in
849 ++ * mm_cpumask. (If that occurs, then the IPI will never
850 ++ * be sent, and CPU 0's TLB will contain a stale entry.)
851 ++ *
852 ++ * The bad outcome can occur if either CPU's load is
853 ++ * reordered before that CPU's store, so both CPUs must
854 ++ * execute full barriers to prevent this from happening.
855 ++ *
856 ++ * Thus, switch_mm needs a full barrier between the
857 ++ * store to mm_cpumask and any operation that could load
858 ++ * from next->pgd. TLB fills are special and can happen
859 ++ * due to instruction fetches or for no reason at all,
860 ++ * and neither LOCK nor MFENCE orders them.
861 ++ * Fortunately, load_cr3() is serializing and gives the
862 ++ * ordering guarantee we need.
863 ++ *
864 ++ */
865 + load_cr3(next->pgd);
866 +
867 + /* Stop flush ipis for the previous mm */
868 +@@ -65,10 +92,14 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
869 + * schedule, protecting us from simultaneous changes.
870 + */
871 + cpumask_set_cpu(cpu, mm_cpumask(next));
872 ++
873 + /*
874 + * We were in lazy tlb mode and leave_mm disabled
875 + * tlb flush IPI delivery. We must reload CR3
876 + * to make sure to use no freed page tables.
877 ++ *
878 ++ * As above, load_cr3() is serializing and orders TLB
879 ++ * fills with respect to the mm_cpumask write.
880 + */
881 + load_cr3(next->pgd);
882 + load_LDT_nolock(&next->context);
883 +diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
884 +index 59554dca96ec..e6a3b1e35fae 100644
885 +--- a/arch/x86/kernel/amd_nb.c
886 ++++ b/arch/x86/kernel/amd_nb.c
887 +@@ -67,8 +67,8 @@ int amd_cache_northbridges(void)
888 + while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
889 + i++;
890 +
891 +- if (i == 0)
892 +- return 0;
893 ++ if (!i)
894 ++ return -ENODEV;
895 +
896 + nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
897 + if (!nb)
898 +diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
899 +index 53a4e2744846..3ab03430211d 100644
900 +--- a/arch/x86/kernel/apm_32.c
901 ++++ b/arch/x86/kernel/apm_32.c
902 +@@ -392,7 +392,7 @@ static struct cpuidle_device apm_cpuidle_device;
903 + /*
904 + * Local variables
905 + */
906 +-static struct {
907 ++__visible struct {
908 + unsigned long offset;
909 + unsigned short segment;
910 + } apm_bios_entry;
911 +diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
912 +index ac057583282a..a18154454e36 100644
913 +--- a/arch/x86/kernel/cpu/perf_event_intel.c
914 ++++ b/arch/x86/kernel/cpu/perf_event_intel.c
915 +@@ -2241,13 +2241,16 @@ __init int intel_pmu_init(void)
916 + * counter, so do not extend mask to generic counters
917 + */
918 + for_each_event_constraint(c, x86_pmu.event_constraints) {
919 +- if (c->cmask != X86_RAW_EVENT_MASK
920 +- || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
921 ++ if (c->cmask == X86_RAW_EVENT_MASK
922 ++ && c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
923 ++ c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
924 + continue;
925 + }
926 +
927 +- c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
928 +- c->weight += x86_pmu.num_counters;
929 ++ c->idxmsk64 &=
930 ++ ~(~0ULL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
931 ++ c->weight = hweight64(c->idxmsk64);
932 ++
933 + }
934 + }
935 +
936 +diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
937 +index 0c6c07cea3f7..766aa3bf1798 100644
938 +--- a/arch/x86/kernel/kprobes/core.c
939 ++++ b/arch/x86/kernel/kprobes/core.c
940 +@@ -908,7 +908,19 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
941 + * normal page fault.
942 + */
943 + regs->ip = (unsigned long)cur->addr;
944 ++ /*
945 ++ * Trap flag (TF) has been set here because this fault
946 ++ * happened where the single stepping will be done.
947 ++ * So clear it by resetting the current kprobe:
948 ++ */
949 ++ regs->flags &= ~X86_EFLAGS_TF;
950 ++
951 ++ /*
952 ++ * If the TF flag was set before the kprobe hit,
953 ++ * don't touch it:
954 ++ */
955 + regs->flags |= kcb->kprobe_old_flags;
956 ++
957 + if (kcb->kprobe_status == KPROBE_REENTER)
958 + restore_previous_kprobe(kcb);
959 + else
960 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
961 +index 3c0b085b4336..8e57771d4bfd 100644
962 +--- a/arch/x86/kvm/x86.c
963 ++++ b/arch/x86/kvm/x86.c
964 +@@ -2966,6 +2966,11 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
965 + if (dbgregs->flags)
966 + return -EINVAL;
967 +
968 ++ if (dbgregs->dr6 & ~0xffffffffull)
969 ++ return -EINVAL;
970 ++ if (dbgregs->dr7 & ~0xffffffffull)
971 ++ return -EINVAL;
972 ++
973 + memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
974 + vcpu->arch.dr6 = dbgregs->dr6;
975 + vcpu->arch.dr7 = dbgregs->dr7;
976 +diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
977 +index 282375f13c7e..c26b610a604d 100644
978 +--- a/arch/x86/mm/tlb.c
979 ++++ b/arch/x86/mm/tlb.c
980 +@@ -149,7 +149,9 @@ void flush_tlb_current_task(void)
981 +
982 + preempt_disable();
983 +
984 ++ /* This is an implicit full barrier that synchronizes with switch_mm. */
985 + local_flush_tlb();
986 ++
987 + if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
988 + flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
989 + preempt_enable();
990 +@@ -188,11 +190,19 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
991 + unsigned act_entries, tlb_entries = 0;
992 +
993 + preempt_disable();
994 +- if (current->active_mm != mm)
995 ++ if (current->active_mm != mm) {
996 ++ /* Synchronize with switch_mm. */
997 ++ smp_mb();
998 ++
999 + goto flush_all;
1000 ++ }
1001 +
1002 + if (!current->mm) {
1003 + leave_mm(smp_processor_id());
1004 ++
1005 ++ /* Synchronize with switch_mm. */
1006 ++ smp_mb();
1007 ++
1008 + goto flush_all;
1009 + }
1010 +
1011 +@@ -242,10 +252,18 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
1012 + preempt_disable();
1013 +
1014 + if (current->active_mm == mm) {
1015 +- if (current->mm)
1016 ++ if (current->mm) {
1017 ++ /*
1018 ++ * Implicit full barrier (INVLPG) that synchronizes
1019 ++ * with switch_mm.
1020 ++ */
1021 + __flush_tlb_one(start);
1022 +- else
1023 ++ } else {
1024 + leave_mm(smp_processor_id());
1025 ++
1026 ++ /* Synchronize with switch_mm. */
1027 ++ smp_mb();
1028 ++ }
1029 + }
1030 +
1031 + if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
1032 +diff --git a/block/genhd.c b/block/genhd.c
1033 +index b09f5fc94dee..7af2f6a18d9b 100644
1034 +--- a/block/genhd.c
1035 ++++ b/block/genhd.c
1036 +@@ -829,6 +829,7 @@ static void disk_seqf_stop(struct seq_file *seqf, void *v)
1037 + if (iter) {
1038 + class_dev_iter_exit(iter);
1039 + kfree(iter);
1040 ++ seqf->private = NULL;
1041 + }
1042 + }
1043 +
1044 +diff --git a/crypto/gcm.c b/crypto/gcm.c
1045 +index cd97cdd8cabe..451e420ce56c 100644
1046 +--- a/crypto/gcm.c
1047 ++++ b/crypto/gcm.c
1048 +@@ -716,7 +716,9 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb,
1049 +
1050 + ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type,
1051 + CRYPTO_ALG_TYPE_HASH,
1052 +- CRYPTO_ALG_TYPE_AHASH_MASK);
1053 ++ CRYPTO_ALG_TYPE_AHASH_MASK |
1054 ++ crypto_requires_sync(algt->type,
1055 ++ algt->mask));
1056 + if (IS_ERR(ghash_alg))
1057 + return ERR_CAST(ghash_alg);
1058 +
1059 +diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
1060 +index 7281b8a93ad3..79cbbbfffffc 100644
1061 +--- a/crypto/scatterwalk.c
1062 ++++ b/crypto/scatterwalk.c
1063 +@@ -68,7 +68,8 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
1064 +
1065 + void scatterwalk_done(struct scatter_walk *walk, int out, int more)
1066 + {
1067 +- if (!(scatterwalk_pagelen(walk) & (PAGE_SIZE - 1)) || !more)
1068 ++ if (!more || walk->offset >= walk->sg->offset + walk->sg->length ||
1069 ++ !(walk->offset & (PAGE_SIZE - 1)))
1070 + scatterwalk_pagedone(walk, out, more);
1071 + }
1072 + EXPORT_SYMBOL_GPL(scatterwalk_done);
1073 +diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
1074 +index a02a91cd1de4..c5e3dd93865a 100644
1075 +--- a/drivers/acpi/pci_root.c
1076 ++++ b/drivers/acpi/pci_root.c
1077 +@@ -385,6 +385,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
1078 + int result;
1079 + struct acpi_pci_root *root;
1080 + u32 flags, base_flags;
1081 ++ bool no_aspm = false, clear_aspm = false;
1082 +
1083 + root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
1084 + if (!root)
1085 +@@ -445,31 +446,10 @@ static int acpi_pci_root_add(struct acpi_device *device,
1086 + flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
1087 + acpi_pci_osc_support(root, flags);
1088 +
1089 +- /*
1090 +- * TBD: Need PCI interface for enumeration/configuration of roots.
1091 +- */
1092 +-
1093 + mutex_lock(&acpi_pci_root_lock);
1094 + list_add_tail(&root->node, &acpi_pci_roots);
1095 + mutex_unlock(&acpi_pci_root_lock);
1096 +
1097 +- /*
1098 +- * Scan the Root Bridge
1099 +- * --------------------
1100 +- * Must do this prior to any attempt to bind the root device, as the
1101 +- * PCI namespace does not get created until this call is made (and
1102 +- * thus the root bridge's pci_dev does not exist).
1103 +- */
1104 +- root->bus = pci_acpi_scan_root(root);
1105 +- if (!root->bus) {
1106 +- printk(KERN_ERR PREFIX
1107 +- "Bus %04x:%02x not present in PCI namespace\n",
1108 +- root->segment, (unsigned int)root->secondary.start);
1109 +- result = -ENODEV;
1110 +- goto out_del_root;
1111 +- }
1112 +-
1113 +- /* Indicate support for various _OSC capabilities. */
1114 + if (pci_ext_cfg_avail())
1115 + flags |= OSC_EXT_PCI_CONFIG_SUPPORT;
1116 + if (pcie_aspm_support_enabled()) {
1117 +@@ -483,7 +463,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
1118 + if (ACPI_FAILURE(status)) {
1119 + dev_info(&device->dev, "ACPI _OSC support "
1120 + "notification failed, disabling PCIe ASPM\n");
1121 +- pcie_no_aspm();
1122 ++ no_aspm = true;
1123 + flags = base_flags;
1124 + }
1125 + }
1126 +@@ -515,7 +495,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
1127 + * We have ASPM control, but the FADT indicates
1128 + * that it's unsupported. Clear it.
1129 + */
1130 +- pcie_clear_aspm(root->bus);
1131 ++ clear_aspm = true;
1132 + }
1133 + } else {
1134 + dev_info(&device->dev,
1135 +@@ -524,7 +504,14 @@ static int acpi_pci_root_add(struct acpi_device *device,
1136 + acpi_format_exception(status), flags);
1137 + pr_info("ACPI _OSC control for PCIe not granted, "
1138 + "disabling ASPM\n");
1139 +- pcie_no_aspm();
1140 ++ /*
1141 ++ * We want to disable ASPM here, but aspm_disabled
1142 ++ * needs to remain in its state from boot so that we
1143 ++ * properly handle PCIe 1.1 devices. So we set this
1144 ++ * flag here, to defer the action until after the ACPI
1145 ++ * root scan.
1146 ++ */
1147 ++ no_aspm = true;
1148 + }
1149 + } else {
1150 + dev_info(&device->dev,
1151 +@@ -532,6 +519,33 @@ static int acpi_pci_root_add(struct acpi_device *device,
1152 + "(_OSC support mask: 0x%02x)\n", flags);
1153 + }
1154 +
1155 ++ /*
1156 ++ * TBD: Need PCI interface for enumeration/configuration of roots.
1157 ++ */
1158 ++
1159 ++ /*
1160 ++ * Scan the Root Bridge
1161 ++ * --------------------
1162 ++ * Must do this prior to any attempt to bind the root device, as the
1163 ++ * PCI namespace does not get created until this call is made (and
1164 ++ * thus the root bridge's pci_dev does not exist).
1165 ++ */
1166 ++ root->bus = pci_acpi_scan_root(root);
1167 ++ if (!root->bus) {
1168 ++ dev_err(&device->dev,
1169 ++ "Bus %04x:%02x not present in PCI namespace\n",
1170 ++ root->segment, (unsigned int)root->secondary.start);
1171 ++ result = -ENODEV;
1172 ++ goto end;
1173 ++ }
1174 ++
1175 ++ if (clear_aspm) {
1176 ++ dev_info(&device->dev, "Disabling ASPM (FADT indicates it is unsupported)\n");
1177 ++ pcie_clear_aspm(root->bus);
1178 ++ }
1179 ++ if (no_aspm)
1180 ++ pcie_no_aspm();
1181 ++
1182 + pci_acpi_add_bus_pm_notifier(device, root->bus);
1183 + if (device->wakeup.flags.run_wake)
1184 + device_set_run_wake(root->bus->bridge, true);
1185 +@@ -548,11 +562,6 @@ static int acpi_pci_root_add(struct acpi_device *device,
1186 + pci_bus_add_devices(root->bus);
1187 + return 1;
1188 +
1189 +-out_del_root:
1190 +- mutex_lock(&acpi_pci_root_lock);
1191 +- list_del(&root->node);
1192 +- mutex_unlock(&acpi_pci_root_lock);
1193 +-
1194 + end:
1195 + kfree(root);
1196 + return result;
1197 +diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
1198 +index 063036d876b0..126eb86f239f 100644
1199 +--- a/drivers/ata/libata-eh.c
1200 ++++ b/drivers/ata/libata-eh.c
1201 +@@ -604,7 +604,7 @@ void ata_scsi_error(struct Scsi_Host *host)
1202 + ata_scsi_port_error_handler(host, ap);
1203 +
1204 + /* finish or retry handled scmd's and clean up */
1205 +- WARN_ON(host->host_failed || !list_empty(&eh_work_q));
1206 ++ WARN_ON(!list_empty(&eh_work_q));
1207 +
1208 + DPRINTK("EXIT\n");
1209 + }
1210 +diff --git a/drivers/base/module.c b/drivers/base/module.c
1211 +index db930d3ee312..2a215780eda2 100644
1212 +--- a/drivers/base/module.c
1213 ++++ b/drivers/base/module.c
1214 +@@ -24,10 +24,12 @@ static char *make_driver_name(struct device_driver *drv)
1215 +
1216 + static void module_create_drivers_dir(struct module_kobject *mk)
1217 + {
1218 +- if (!mk || mk->drivers_dir)
1219 +- return;
1220 ++ static DEFINE_MUTEX(drivers_dir_mutex);
1221 +
1222 +- mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
1223 ++ mutex_lock(&drivers_dir_mutex);
1224 ++ if (mk && !mk->drivers_dir)
1225 ++ mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
1226 ++ mutex_unlock(&drivers_dir_mutex);
1227 + }
1228 +
1229 + void module_add_driver(struct module *mod, struct device_driver *drv)
1230 +diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
1231 +index 6789c1653913..cde4a6e0fab0 100644
1232 +--- a/drivers/crypto/ux500/hash/hash_core.c
1233 ++++ b/drivers/crypto/ux500/hash/hash_core.c
1234 +@@ -806,7 +806,7 @@ int hash_process_data(
1235 + &device_data->state);
1236 + memmove(req_ctx->state.buffer,
1237 + device_data->state.buffer,
1238 +- HASH_BLOCK_SIZE / sizeof(u32));
1239 ++ HASH_BLOCK_SIZE);
1240 + if (ret) {
1241 + dev_err(device_data->dev, "[%s] "
1242 + "hash_resume_state()"
1243 +@@ -858,7 +858,7 @@ int hash_process_data(
1244 +
1245 + memmove(device_data->state.buffer,
1246 + req_ctx->state.buffer,
1247 +- HASH_BLOCK_SIZE / sizeof(u32));
1248 ++ HASH_BLOCK_SIZE);
1249 + if (ret) {
1250 + dev_err(device_data->dev, "[%s] "
1251 + "hash_save_state()"
1252 +diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
1253 +index 426c51dd420c..ac11e455aea5 100644
1254 +--- a/drivers/gpio/gpio-pca953x.c
1255 ++++ b/drivers/gpio/gpio-pca953x.c
1256 +@@ -75,7 +75,7 @@ MODULE_DEVICE_TABLE(i2c, pca953x_id);
1257 + #define MAX_BANK 5
1258 + #define BANK_SZ 8
1259 +
1260 +-#define NBANK(chip) (chip->gpio_chip.ngpio / BANK_SZ)
1261 ++#define NBANK(chip) DIV_ROUND_UP(chip->gpio_chip.ngpio, BANK_SZ)
1262 +
1263 + struct pca953x_chip {
1264 + unsigned gpio_start;
1265 +diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
1266 +index b78cbe74dadf..93b74107d20d 100644
1267 +--- a/drivers/gpu/drm/drm_fb_helper.c
1268 ++++ b/drivers/gpu/drm/drm_fb_helper.c
1269 +@@ -1313,7 +1313,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
1270 + int n, int width, int height)
1271 + {
1272 + int c, o;
1273 +- struct drm_device *dev = fb_helper->dev;
1274 + struct drm_connector *connector;
1275 + struct drm_connector_helper_funcs *connector_funcs;
1276 + struct drm_encoder *encoder;
1277 +@@ -1334,7 +1333,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
1278 + if (modes[n] == NULL)
1279 + return best_score;
1280 +
1281 +- crtcs = kzalloc(dev->mode_config.num_connector *
1282 ++ crtcs = kzalloc(fb_helper->connector_count *
1283 + sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
1284 + if (!crtcs)
1285 + return best_score;
1286 +@@ -1381,7 +1380,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
1287 + best_crtc = crtc;
1288 + best_score = score;
1289 + memcpy(best_crtcs, crtcs,
1290 +- dev->mode_config.num_connector *
1291 ++ fb_helper->connector_count *
1292 + sizeof(struct drm_fb_helper_crtc *));
1293 + }
1294 + }
1295 +diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
1296 +index 489ffd2c66e5..a3d37e4a84ae 100644
1297 +--- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
1298 ++++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
1299 +@@ -85,7 +85,7 @@ static const char *const dsi_errors[] = {
1300 + "RX Prot Violation",
1301 + "HS Generic Write FIFO Full",
1302 + "LP Generic Write FIFO Full",
1303 +- "Generic Read Data Avail"
1304 ++ "Generic Read Data Avail",
1305 + "Special Packet Sent",
1306 + "Tearing Effect",
1307 + };
1308 +diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
1309 +index f3cce23f4a62..f4b9b1c0cae8 100644
1310 +--- a/drivers/gpu/drm/radeon/radeon_atombios.c
1311 ++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
1312 +@@ -1144,7 +1144,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
1313 + le16_to_cpu(firmware_info->info.usReferenceClock);
1314 + p1pll->reference_div = 0;
1315 +
1316 +- if (crev < 2)
1317 ++ if ((frev < 2) && (crev < 2))
1318 + p1pll->pll_out_min =
1319 + le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
1320 + else
1321 +@@ -1153,7 +1153,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
1322 + p1pll->pll_out_max =
1323 + le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
1324 +
1325 +- if (crev >= 4) {
1326 ++ if (((frev < 2) && (crev >= 4)) || (frev >= 2)) {
1327 + p1pll->lcd_pll_out_min =
1328 + le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
1329 + if (p1pll->lcd_pll_out_min == 0)
1330 +diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
1331 +index 8c44ef57864b..a7e1893de838 100644
1332 +--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
1333 ++++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
1334 +@@ -11,6 +11,7 @@
1335 + #include <acpi/acpi.h>
1336 + #include <acpi/acpi_bus.h>
1337 + #include <linux/pci.h>
1338 ++#include <linux/delay.h>
1339 +
1340 + #include "radeon_acpi.h"
1341 +
1342 +@@ -252,6 +253,10 @@ static int radeon_atpx_set_discrete_state(struct radeon_atpx *atpx, u8 state)
1343 + if (!info)
1344 + return -EIO;
1345 + kfree(info);
1346 ++
1347 ++ /* 200ms delay is required after off */
1348 ++ if (state == 0)
1349 ++ msleep(200);
1350 + }
1351 + return 0;
1352 + }
1353 +diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
1354 +index 1fbd38b371d4..ea62810aeda6 100644
1355 +--- a/drivers/gpu/drm/radeon/radeon_connectors.c
1356 ++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
1357 +@@ -1691,7 +1691,6 @@ radeon_add_atom_connector(struct drm_device *dev,
1358 + 1);
1359 + /* no HPD on analog connectors */
1360 + radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1361 +- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
1362 + connector->interlace_allowed = true;
1363 + connector->doublescan_allowed = true;
1364 + break;
1365 +@@ -1889,8 +1888,10 @@ radeon_add_atom_connector(struct drm_device *dev,
1366 + }
1367 +
1368 + if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
1369 +- if (i2c_bus->valid)
1370 +- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
1371 ++ if (i2c_bus->valid) {
1372 ++ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
1373 ++ DRM_CONNECTOR_POLL_DISCONNECT;
1374 ++ }
1375 + } else
1376 + connector->polled = DRM_CONNECTOR_POLL_HPD;
1377 +
1378 +@@ -1962,7 +1963,6 @@ radeon_add_legacy_connector(struct drm_device *dev,
1379 + 1);
1380 + /* no HPD on analog connectors */
1381 + radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1382 +- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
1383 + connector->interlace_allowed = true;
1384 + connector->doublescan_allowed = true;
1385 + break;
1386 +@@ -2047,10 +2047,13 @@ radeon_add_legacy_connector(struct drm_device *dev,
1387 + }
1388 +
1389 + if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
1390 +- if (i2c_bus->valid)
1391 +- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
1392 ++ if (i2c_bus->valid) {
1393 ++ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
1394 ++ DRM_CONNECTOR_POLL_DISCONNECT;
1395 ++ }
1396 + } else
1397 + connector->polled = DRM_CONNECTOR_POLL_HPD;
1398 ++
1399 + connector->display_info.subpixel_order = subpixel_order;
1400 + drm_sysfs_connector_add(connector);
1401 + }
1402 +diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
1403 +index 8df1525f71d2..e9db3f8125ed 100644
1404 +--- a/drivers/gpu/drm/radeon/radeon_device.c
1405 ++++ b/drivers/gpu/drm/radeon/radeon_device.c
1406 +@@ -449,6 +449,23 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1407 + /*
1408 + * GPU helpers function.
1409 + */
1410 ++
1411 ++/**
1412 ++ * radeon_device_is_virtual - check if we are running is a virtual environment
1413 ++ *
1414 ++ * Check if the asic has been passed through to a VM (all asics).
1415 ++ * Used at driver startup.
1416 ++ * Returns true if virtual or false if not.
1417 ++ */
1418 ++static bool radeon_device_is_virtual(void)
1419 ++{
1420 ++#ifdef CONFIG_X86
1421 ++ return boot_cpu_has(X86_FEATURE_HYPERVISOR);
1422 ++#else
1423 ++ return false;
1424 ++#endif
1425 ++}
1426 ++
1427 + /**
1428 + * radeon_card_posted - check if the hw has already been initialized
1429 + *
1430 +@@ -462,6 +479,10 @@ bool radeon_card_posted(struct radeon_device *rdev)
1431 + {
1432 + uint32_t reg;
1433 +
1434 ++ /* for pass through, always force asic_init */
1435 ++ if (radeon_device_is_virtual())
1436 ++ return false;
1437 ++
1438 + /* required for EFI mode on macbook2,1 which uses an r5xx asic */
1439 + if (efi_enabled(EFI_BOOT) &&
1440 + (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
1441 +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
1442 +index a3915d12e746..eb5700e40e1a 100644
1443 +--- a/drivers/hid/hid-input.c
1444 ++++ b/drivers/hid/hid-input.c
1445 +@@ -1084,7 +1084,7 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
1446 + return;
1447 +
1448 + /* report the usage code as scancode if the key status has changed */
1449 +- if (usage->type == EV_KEY && !!test_bit(usage->code, input->key) != value)
1450 ++ if (usage->type == EV_KEY && (!!test_bit(usage->code, input->key)) != value)
1451 + input_event(input, EV_MSC, MSC_SCAN, usage->hid);
1452 +
1453 + input_event(input, usage->type, usage->code, value);
1454 +diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
1455 +index 2f1ddca6f2e0..700145b15088 100644
1456 +--- a/drivers/hid/usbhid/hiddev.c
1457 ++++ b/drivers/hid/usbhid/hiddev.c
1458 +@@ -516,13 +516,13 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
1459 + goto inval;
1460 + } else if (uref->usage_index >= field->report_count)
1461 + goto inval;
1462 +-
1463 +- else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
1464 +- (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
1465 +- uref->usage_index + uref_multi->num_values > field->report_count))
1466 +- goto inval;
1467 + }
1468 +
1469 ++ if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
1470 ++ (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
1471 ++ uref->usage_index + uref_multi->num_values > field->report_count))
1472 ++ goto inval;
1473 ++
1474 + switch (cmd) {
1475 + case HIDIOCGUSAGE:
1476 + uref->value = field->value[uref->usage_index];
1477 +diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
1478 +index 7c9a1d97dc68..a22c427454db 100644
1479 +--- a/drivers/iio/accel/kxsd9.c
1480 ++++ b/drivers/iio/accel/kxsd9.c
1481 +@@ -81,7 +81,7 @@ static int kxsd9_write_scale(struct iio_dev *indio_dev, int micro)
1482 +
1483 + mutex_lock(&st->buf_lock);
1484 + ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
1485 +- if (ret)
1486 ++ if (ret < 0)
1487 + goto error_ret;
1488 + st->tx[0] = KXSD9_WRITE(KXSD9_REG_CTRL_C);
1489 + st->tx[1] = (ret & ~KXSD9_FS_MASK) | i;
1490 +@@ -163,7 +163,7 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev,
1491 + break;
1492 + case IIO_CHAN_INFO_SCALE:
1493 + ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
1494 +- if (ret)
1495 ++ if (ret < 0)
1496 + goto error_ret;
1497 + *val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK];
1498 + ret = IIO_VAL_INT_PLUS_MICRO;
1499 +diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
1500 +index c2744a75c3b0..6569a4e2a436 100644
1501 +--- a/drivers/iio/adc/ad7266.c
1502 ++++ b/drivers/iio/adc/ad7266.c
1503 +@@ -406,7 +406,7 @@ static int ad7266_probe(struct spi_device *spi)
1504 + st = iio_priv(indio_dev);
1505 +
1506 + st->reg = regulator_get(&spi->dev, "vref");
1507 +- if (!IS_ERR_OR_NULL(st->reg)) {
1508 ++ if (!IS_ERR(st->reg)) {
1509 + ret = regulator_enable(st->reg);
1510 + if (ret)
1511 + goto error_put_reg;
1512 +@@ -417,6 +417,10 @@ static int ad7266_probe(struct spi_device *spi)
1513 +
1514 + st->vref_uv = ret;
1515 + } else {
1516 ++ /* Any other error indicates that the regulator does exist */
1517 ++ if (PTR_ERR(st->reg) != -ENODEV)
1518 ++ return PTR_ERR(st->reg);
1519 ++
1520 + /* Use internal reference */
1521 + st->vref_uv = 2500000;
1522 + }
1523 +diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
1524 +index 4d6c7d84e155..301becccf5ed 100644
1525 +--- a/drivers/iio/industrialio-trigger.c
1526 ++++ b/drivers/iio/industrialio-trigger.c
1527 +@@ -203,22 +203,35 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
1528 +
1529 + /* Prevent the module from being removed whilst attached to a trigger */
1530 + __module_get(pf->indio_dev->info->driver_module);
1531 ++
1532 ++ /* Get irq number */
1533 + pf->irq = iio_trigger_get_irq(trig);
1534 ++ if (pf->irq < 0)
1535 ++ goto out_put_module;
1536 ++
1537 ++ /* Request irq */
1538 + ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
1539 + pf->type, pf->name,
1540 + pf);
1541 +- if (ret < 0) {
1542 +- module_put(pf->indio_dev->info->driver_module);
1543 +- return ret;
1544 +- }
1545 ++ if (ret < 0)
1546 ++ goto out_put_irq;
1547 +
1548 ++ /* Enable trigger in driver */
1549 + if (trig->ops && trig->ops->set_trigger_state && notinuse) {
1550 + ret = trig->ops->set_trigger_state(trig, true);
1551 + if (ret < 0)
1552 +- module_put(pf->indio_dev->info->driver_module);
1553 ++ goto out_free_irq;
1554 + }
1555 +
1556 + return ret;
1557 ++
1558 ++out_free_irq:
1559 ++ free_irq(pf->irq, pf);
1560 ++out_put_irq:
1561 ++ iio_trigger_put_irq(trig, pf->irq);
1562 ++out_put_module:
1563 ++ module_put(pf->indio_dev->info->driver_module);
1564 ++ return ret;
1565 + }
1566 +
1567 + static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
1568 +diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
1569 +index f2f63933e8a9..5befec118a18 100644
1570 +--- a/drivers/infiniband/core/ucm.c
1571 ++++ b/drivers/infiniband/core/ucm.c
1572 +@@ -48,6 +48,7 @@
1573 +
1574 + #include <asm/uaccess.h>
1575 +
1576 ++#include <rdma/ib.h>
1577 + #include <rdma/ib_cm.h>
1578 + #include <rdma/ib_user_cm.h>
1579 + #include <rdma/ib_marshall.h>
1580 +@@ -1104,6 +1105,9 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
1581 + struct ib_ucm_cmd_hdr hdr;
1582 + ssize_t result;
1583 +
1584 ++ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
1585 ++ return -EACCES;
1586 ++
1587 + if (len < sizeof(hdr))
1588 + return -EINVAL;
1589 +
1590 +diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
1591 +index 5ca44cd9b00c..99f1c170770f 100644
1592 +--- a/drivers/infiniband/core/ucma.c
1593 ++++ b/drivers/infiniband/core/ucma.c
1594 +@@ -43,6 +43,7 @@
1595 + #include <linux/sysctl.h>
1596 + #include <linux/module.h>
1597 +
1598 ++#include <rdma/ib.h>
1599 + #include <rdma/rdma_user_cm.h>
1600 + #include <rdma/ib_marshall.h>
1601 + #include <rdma/rdma_cm.h>
1602 +@@ -1249,6 +1250,9 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
1603 + struct rdma_ucm_cmd_hdr hdr;
1604 + ssize_t ret;
1605 +
1606 ++ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
1607 ++ return -EACCES;
1608 ++
1609 + if (len < sizeof(hdr))
1610 + return -EINVAL;
1611 +
1612 +diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
1613 +index b6062b9236a2..f50623d07a75 100644
1614 +--- a/drivers/infiniband/core/uverbs_main.c
1615 ++++ b/drivers/infiniband/core/uverbs_main.c
1616 +@@ -48,6 +48,8 @@
1617 +
1618 + #include <asm/uaccess.h>
1619 +
1620 ++#include <rdma/ib.h>
1621 ++
1622 + #include "uverbs.h"
1623 +
1624 + MODULE_AUTHOR("Roland Dreier");
1625 +@@ -588,6 +590,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
1626 + struct ib_uverbs_file *file = filp->private_data;
1627 + struct ib_uverbs_cmd_hdr hdr;
1628 +
1629 ++ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
1630 ++ return -EACCES;
1631 ++
1632 + if (count < sizeof hdr)
1633 + return -EINVAL;
1634 +
1635 +diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
1636 +index 890c23b3d714..f55d69500a5f 100644
1637 +--- a/drivers/infiniband/hw/mlx4/ah.c
1638 ++++ b/drivers/infiniband/hw/mlx4/ah.c
1639 +@@ -65,6 +65,7 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
1640 +
1641 + ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
1642 + ah->av.ib.g_slid = ah_attr->src_path_bits;
1643 ++ ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
1644 + if (ah_attr->ah_flags & IB_AH_GRH) {
1645 + ah->av.ib.g_slid |= 0x80;
1646 + ah->av.ib.gid_index = ah_attr->grh.sgid_index;
1647 +@@ -82,7 +83,6 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
1648 + !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support))
1649 + --ah->av.ib.stat_rate;
1650 + }
1651 +- ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
1652 +
1653 + return &ah->ibah;
1654 + }
1655 +diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
1656 +index 262a18437ceb..1fe3bdb0da14 100644
1657 +--- a/drivers/infiniband/hw/mlx4/qp.c
1658 ++++ b/drivers/infiniband/hw/mlx4/qp.c
1659 +@@ -346,7 +346,7 @@ static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags)
1660 + sizeof (struct mlx4_wqe_raddr_seg);
1661 + case MLX4_IB_QPT_RC:
1662 + return sizeof (struct mlx4_wqe_ctrl_seg) +
1663 +- sizeof (struct mlx4_wqe_atomic_seg) +
1664 ++ sizeof (struct mlx4_wqe_masked_atomic_seg) +
1665 + sizeof (struct mlx4_wqe_raddr_seg);
1666 + case MLX4_IB_QPT_SMI:
1667 + case MLX4_IB_QPT_GSI:
1668 +diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
1669 +index b56c9428f3c5..8cb29b36c82a 100644
1670 +--- a/drivers/infiniband/hw/qib/qib_file_ops.c
1671 ++++ b/drivers/infiniband/hw/qib/qib_file_ops.c
1672 +@@ -45,6 +45,8 @@
1673 + #include <linux/delay.h>
1674 + #include <linux/export.h>
1675 +
1676 ++#include <rdma/ib.h>
1677 ++
1678 + #include "qib.h"
1679 + #include "qib_common.h"
1680 + #include "qib_user_sdma.h"
1681 +@@ -1977,6 +1979,9 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
1682 + ssize_t ret = 0;
1683 + void *dest;
1684 +
1685 ++ if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
1686 ++ return -EACCES;
1687 ++
1688 + if (count < sizeof(cmd.type)) {
1689 + ret = -EINVAL;
1690 + goto bail;
1691 +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
1692 +index b6e049a3c7a8..a481094af85f 100644
1693 +--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
1694 ++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
1695 +@@ -887,7 +887,9 @@ struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
1696 + neigh = NULL;
1697 + goto out_unlock;
1698 + }
1699 +- neigh->alive = jiffies;
1700 ++
1701 ++ if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE))
1702 ++ neigh->alive = jiffies;
1703 + goto out_unlock;
1704 + }
1705 + }
1706 +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
1707 +index 856c1b03e22d..685e125d6366 100644
1708 +--- a/drivers/input/joystick/xpad.c
1709 ++++ b/drivers/input/joystick/xpad.c
1710 +@@ -843,6 +843,9 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
1711 + struct usb_endpoint_descriptor *ep_irq_in;
1712 + int i, error;
1713 +
1714 ++ if (intf->cur_altsetting->desc.bNumEndpoints != 2)
1715 ++ return -ENODEV;
1716 ++
1717 + for (i = 0; xpad_device[i].idVendor; i++) {
1718 + if ((le16_to_cpu(udev->descriptor.idVendor) == xpad_device[i].idVendor) &&
1719 + (le16_to_cpu(udev->descriptor.idProduct) == xpad_device[i].idProduct))
1720 +diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
1721 +index a0a4bbaef02c..3f2f3ac96a55 100644
1722 +--- a/drivers/input/misc/uinput.c
1723 ++++ b/drivers/input/misc/uinput.c
1724 +@@ -835,9 +835,15 @@ static long uinput_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1725 + }
1726 +
1727 + #ifdef CONFIG_COMPAT
1728 ++
1729 ++#define UI_SET_PHYS_COMPAT _IOW(UINPUT_IOCTL_BASE, 108, compat_uptr_t)
1730 ++
1731 + static long uinput_compat_ioctl(struct file *file,
1732 + unsigned int cmd, unsigned long arg)
1733 + {
1734 ++ if (cmd == UI_SET_PHYS_COMPAT)
1735 ++ cmd = UI_SET_PHYS;
1736 ++
1737 + return uinput_ioctl_handler(file, cmd, arg, compat_ptr(arg));
1738 + }
1739 + #endif
1740 +diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c
1741 +index 9a83be6b6584..abba11220f29 100644
1742 +--- a/drivers/input/touchscreen/wacom_w8001.c
1743 ++++ b/drivers/input/touchscreen/wacom_w8001.c
1744 +@@ -28,7 +28,7 @@ MODULE_AUTHOR("Jaya Kumar <jayakumar.lkml@×××××.com>");
1745 + MODULE_DESCRIPTION(DRIVER_DESC);
1746 + MODULE_LICENSE("GPL");
1747 +
1748 +-#define W8001_MAX_LENGTH 11
1749 ++#define W8001_MAX_LENGTH 13
1750 + #define W8001_LEAD_MASK 0x80
1751 + #define W8001_LEAD_BYTE 0x80
1752 + #define W8001_TAB_MASK 0x40
1753 +diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
1754 +index a7e4939787c9..eab9167937e2 100644
1755 +--- a/drivers/isdn/hardware/mISDN/hfcpci.c
1756 ++++ b/drivers/isdn/hardware/mISDN/hfcpci.c
1757 +@@ -2295,8 +2295,8 @@ _hfcpci_softirq(struct device *dev, void *arg)
1758 + static void
1759 + hfcpci_softirq(void *arg)
1760 + {
1761 +- (void) driver_for_each_device(&hfc_driver.driver, NULL, arg,
1762 +- _hfcpci_softirq);
1763 ++ WARN_ON_ONCE(driver_for_each_device(&hfc_driver.driver, NULL, arg,
1764 ++ _hfcpci_softirq) != 0);
1765 +
1766 + /* if next event would be in the past ... */
1767 + if ((s32)(hfc_jiffies + tics - jiffies) <= 0)
1768 +diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
1769 +index 7fcf21cb4ff8..a9a47cd029d5 100644
1770 +--- a/drivers/md/dm-flakey.c
1771 ++++ b/drivers/md/dm-flakey.c
1772 +@@ -286,10 +286,16 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
1773 + pb->bio_submitted = true;
1774 +
1775 + /*
1776 +- * Map reads as normal.
1777 ++ * Map reads as normal only if corrupt_bio_byte set.
1778 + */
1779 +- if (bio_data_dir(bio) == READ)
1780 +- goto map_bio;
1781 ++ if (bio_data_dir(bio) == READ) {
1782 ++ /* If flags were specified, only corrupt those that match. */
1783 ++ if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
1784 ++ all_corrupt_bio_flags_match(bio, fc))
1785 ++ goto map_bio;
1786 ++ else
1787 ++ return -EIO;
1788 ++ }
1789 +
1790 + /*
1791 + * Drop writes?
1792 +@@ -327,12 +333,13 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
1793 +
1794 + /*
1795 + * Corrupt successful READs while in down state.
1796 +- * If flags were specified, only corrupt those that match.
1797 + */
1798 +- if (fc->corrupt_bio_byte && !error && pb->bio_submitted &&
1799 +- (bio_data_dir(bio) == READ) && (fc->corrupt_bio_rw == READ) &&
1800 +- all_corrupt_bio_flags_match(bio, fc))
1801 +- corrupt_bio_data(bio, fc);
1802 ++ if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
1803 ++ if (fc->corrupt_bio_byte)
1804 ++ corrupt_bio_data(bio, fc);
1805 ++ else
1806 ++ return -EIO;
1807 ++ }
1808 +
1809 + return error;
1810 + }
1811 +diff --git a/drivers/media/dvb-frontends/stb6100.c b/drivers/media/dvb-frontends/stb6100.c
1812 +index cea175d19890..4ef8a5c7003e 100644
1813 +--- a/drivers/media/dvb-frontends/stb6100.c
1814 ++++ b/drivers/media/dvb-frontends/stb6100.c
1815 +@@ -193,7 +193,7 @@ static int stb6100_write_reg_range(struct stb6100_state *state, u8 buf[], int st
1816 + .len = len + 1
1817 + };
1818 +
1819 +- if (1 + len > sizeof(buf)) {
1820 ++ if (1 + len > sizeof(cmdbuf)) {
1821 + printk(KERN_WARNING
1822 + "%s: i2c wr: len=%d is too big!\n",
1823 + KBUILD_MODNAME, len);
1824 +diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
1825 +index 961d7ff75427..eb92027cef92 100644
1826 +--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
1827 ++++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
1828 +@@ -1000,6 +1000,11 @@ static int match_child(struct device *dev, void *data)
1829 + return !strcmp(dev_name(dev), (char *)data);
1830 + }
1831 +
1832 ++static void s5p_mfc_memdev_release(struct device *dev)
1833 ++{
1834 ++ dma_release_declared_memory(dev);
1835 ++}
1836 ++
1837 + static void *mfc_get_drv_data(struct platform_device *pdev);
1838 +
1839 + static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev)
1840 +@@ -1012,6 +1017,9 @@ static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev)
1841 + mfc_err("Not enough memory\n");
1842 + return -ENOMEM;
1843 + }
1844 ++
1845 ++ dev_set_name(dev->mem_dev_l, "%s", "s5p-mfc-l");
1846 ++ dev->mem_dev_l->release = s5p_mfc_memdev_release;
1847 + device_initialize(dev->mem_dev_l);
1848 + of_property_read_u32_array(dev->plat_dev->dev.of_node,
1849 + "samsung,mfc-l", mem_info, 2);
1850 +@@ -1029,6 +1037,9 @@ static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev)
1851 + mfc_err("Not enough memory\n");
1852 + return -ENOMEM;
1853 + }
1854 ++
1855 ++ dev_set_name(dev->mem_dev_r, "%s", "s5p-mfc-r");
1856 ++ dev->mem_dev_r->release = s5p_mfc_memdev_release;
1857 + device_initialize(dev->mem_dev_r);
1858 + of_property_read_u32_array(dev->plat_dev->dev.of_node,
1859 + "samsung,mfc-r", mem_info, 2);
1860 +diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
1861 +index c6bf23599eb9..a2863b7b9e21 100644
1862 +--- a/drivers/mmc/card/block.c
1863 ++++ b/drivers/mmc/card/block.c
1864 +@@ -1582,8 +1582,8 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
1865 +
1866 + packed_cmd_hdr = packed->cmd_hdr;
1867 + memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
1868 +- packed_cmd_hdr[0] = (packed->nr_entries << 16) |
1869 +- (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
1870 ++ packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) |
1871 ++ (PACKED_CMD_WR << 8) | PACKED_CMD_VER);
1872 + hdr_blocks = mmc_large_sector(card) ? 8 : 1;
1873 +
1874 + /*
1875 +@@ -1597,14 +1597,14 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
1876 + ((brq->data.blocks * brq->data.blksz) >=
1877 + card->ext_csd.data_tag_unit_size);
1878 + /* Argument of CMD23 */
1879 +- packed_cmd_hdr[(i * 2)] =
1880 ++ packed_cmd_hdr[(i * 2)] = cpu_to_le32(
1881 + (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
1882 + (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
1883 +- blk_rq_sectors(prq);
1884 ++ blk_rq_sectors(prq));
1885 + /* Argument of CMD18 or CMD25 */
1886 +- packed_cmd_hdr[((i * 2)) + 1] =
1887 ++ packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32(
1888 + mmc_card_blockaddr(card) ?
1889 +- blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
1890 ++ blk_rq_pos(prq) : blk_rq_pos(prq) << 9);
1891 + packed->blocks += blk_rq_sectors(prq);
1892 + i++;
1893 + }
1894 +diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
1895 +index a56133585e92..03331c173bd0 100644
1896 +--- a/drivers/mtd/ubi/build.c
1897 ++++ b/drivers/mtd/ubi/build.c
1898 +@@ -997,6 +997,9 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
1899 + goto out_detach;
1900 + }
1901 +
1902 ++ /* Make device "available" before it becomes accessible via sysfs */
1903 ++ ubi_devices[ubi_num] = ubi;
1904 ++
1905 + err = uif_init(ubi, &ref);
1906 + if (err)
1907 + goto out_detach;
1908 +@@ -1041,7 +1044,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
1909 + wake_up_process(ubi->bgt_thread);
1910 + spin_unlock(&ubi->wl_lock);
1911 +
1912 +- ubi_devices[ubi_num] = ubi;
1913 + ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
1914 + return ubi_num;
1915 +
1916 +@@ -1052,6 +1054,7 @@ out_uif:
1917 + ubi_assert(ref);
1918 + uif_close(ubi);
1919 + out_detach:
1920 ++ ubi_devices[ubi_num] = NULL;
1921 + ubi_wl_close(ubi);
1922 + ubi_free_internal_volumes(ubi);
1923 + vfree(ubi->vtbl);
1924 +diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
1925 +index 8330703c098f..96131eb34c9f 100644
1926 +--- a/drivers/mtd/ubi/vmt.c
1927 ++++ b/drivers/mtd/ubi/vmt.c
1928 +@@ -534,13 +534,6 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
1929 + spin_unlock(&ubi->volumes_lock);
1930 + }
1931 +
1932 +- /* Change volume table record */
1933 +- vtbl_rec = ubi->vtbl[vol_id];
1934 +- vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
1935 +- err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
1936 +- if (err)
1937 +- goto out_acc;
1938 +-
1939 + if (pebs < 0) {
1940 + for (i = 0; i < -pebs; i++) {
1941 + err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i);
1942 +@@ -558,6 +551,24 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
1943 + spin_unlock(&ubi->volumes_lock);
1944 + }
1945 +
1946 ++ /*
1947 ++ * When we shrink a volume we have to flush all pending (erase) work.
1948 ++ * Otherwise it can happen that upon next attach UBI finds a LEB with
1949 ++ * lnum > highest_lnum and refuses to attach.
1950 ++ */
1951 ++ if (pebs < 0) {
1952 ++ err = ubi_wl_flush(ubi, vol_id, UBI_ALL);
1953 ++ if (err)
1954 ++ goto out_acc;
1955 ++ }
1956 ++
1957 ++ /* Change volume table record */
1958 ++ vtbl_rec = ubi->vtbl[vol_id];
1959 ++ vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
1960 ++ err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
1961 ++ if (err)
1962 ++ goto out_acc;
1963 ++
1964 + vol->reserved_pebs = reserved_pebs;
1965 + if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
1966 + vol->used_ebs = reserved_pebs;
1967 +diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
1968 +index 535d5dd8d816..024078c5fb16 100644
1969 +--- a/drivers/net/can/at91_can.c
1970 ++++ b/drivers/net/can/at91_can.c
1971 +@@ -731,9 +731,10 @@ static int at91_poll_rx(struct net_device *dev, int quota)
1972 +
1973 + /* upper group completed, look again in lower */
1974 + if (priv->rx_next > get_mb_rx_low_last(priv) &&
1975 +- quota > 0 && mb > get_mb_rx_last(priv)) {
1976 ++ mb > get_mb_rx_last(priv)) {
1977 + priv->rx_next = get_mb_rx_first(priv);
1978 +- goto again;
1979 ++ if (quota > 0)
1980 ++ goto again;
1981 + }
1982 +
1983 + return received;
1984 +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
1985 +index f66aeb79abdf..464e5f66b66d 100644
1986 +--- a/drivers/net/can/dev.c
1987 ++++ b/drivers/net/can/dev.c
1988 +@@ -772,6 +772,11 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
1989 + return -EOPNOTSUPP;
1990 + }
1991 +
1992 ++static void can_dellink(struct net_device *dev, struct list_head *head)
1993 ++{
1994 ++ return;
1995 ++}
1996 ++
1997 + static struct rtnl_link_ops can_link_ops __read_mostly = {
1998 + .kind = "can",
1999 + .maxtype = IFLA_CAN_MAX,
2000 +@@ -779,6 +784,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
2001 + .setup = can_setup,
2002 + .newlink = can_newlink,
2003 + .changelink = can_changelink,
2004 ++ .dellink = can_dellink,
2005 + .get_size = can_get_size,
2006 + .fill_info = can_fill_info,
2007 + .get_xstats_size = can_get_xstats_size,
2008 +diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
2009 +index a85a9c2f1385..7357e54f1de9 100644
2010 +--- a/drivers/net/ethernet/atheros/alx/main.c
2011 ++++ b/drivers/net/ethernet/atheros/alx/main.c
2012 +@@ -86,9 +86,14 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
2013 + while (!cur_buf->skb && next != rxq->read_idx) {
2014 + struct alx_rfd *rfd = &rxq->rfd[cur];
2015 +
2016 +- skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
2017 ++ skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp);
2018 + if (!skb)
2019 + break;
2020 ++
2021 ++ /* Workround for the HW RX DMA overflow issue */
2022 ++ if (((unsigned long)skb->data & 0xfff) == 0xfc0)
2023 ++ skb_reserve(skb, 64);
2024 ++
2025 + dma = dma_map_single(&alx->hw.pdev->dev,
2026 + skb->data, alx->rxbuf_size,
2027 + DMA_FROM_DEVICE);
2028 +diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
2029 +index d5643c143bb8..df3af299a7d2 100644
2030 +--- a/drivers/net/ethernet/marvell/mvneta.c
2031 ++++ b/drivers/net/ethernet/marvell/mvneta.c
2032 +@@ -210,7 +210,7 @@
2033 + /* Various constants */
2034 +
2035 + /* Coalescing */
2036 +-#define MVNETA_TXDONE_COAL_PKTS 1
2037 ++#define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */
2038 + #define MVNETA_RX_COAL_PKTS 32
2039 + #define MVNETA_RX_COAL_USEC 100
2040 +
2041 +diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
2042 +index 74581cbcafa7..a5802419381f 100644
2043 +--- a/drivers/net/usb/cdc_ncm.c
2044 ++++ b/drivers/net/usb/cdc_ncm.c
2045 +@@ -477,6 +477,13 @@ advance:
2046 + if (cdc_ncm_setup(ctx))
2047 + goto error2;
2048 +
2049 ++ /* Some firmwares need a pause here or they will silently fail
2050 ++ * to set up the interface properly. This value was decided
2051 ++ * empirically on a Sierra Wireless MC7455 running 02.08.02.00
2052 ++ * firmware.
2053 ++ */
2054 ++ usleep_range(10000, 20000);
2055 ++
2056 + /* configure data interface */
2057 + temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
2058 + if (temp)
2059 +@@ -598,24 +605,13 @@ EXPORT_SYMBOL_GPL(cdc_ncm_select_altsetting);
2060 +
2061 + static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
2062 + {
2063 +- int ret;
2064 +-
2065 + /* MBIM backwards compatible function? */
2066 + cdc_ncm_select_altsetting(dev, intf);
2067 + if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
2068 + return -ENODEV;
2069 +
2070 + /* NCM data altsetting is always 1 */
2071 +- ret = cdc_ncm_bind_common(dev, intf, 1);
2072 +-
2073 +- /*
2074 +- * We should get an event when network connection is "connected" or
2075 +- * "disconnected". Set network connection in "disconnected" state
2076 +- * (carrier is OFF) during attach, so the IP network stack does not
2077 +- * start IPv6 negotiation and more.
2078 +- */
2079 +- usbnet_link_change(dev, 0, 0);
2080 +- return ret;
2081 ++ return cdc_ncm_bind_common(dev, intf, 1);
2082 + }
2083 +
2084 + static void cdc_ncm_align_tail(struct sk_buff *skb, size_t modulus, size_t remainder, size_t max)
2085 +@@ -1161,7 +1157,8 @@ static void cdc_ncm_disconnect(struct usb_interface *intf)
2086 +
2087 + static const struct driver_info cdc_ncm_info = {
2088 + .description = "CDC NCM",
2089 +- .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET,
2090 ++ .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
2091 ++ | FLAG_LINK_INTR,
2092 + .bind = cdc_ncm_bind,
2093 + .unbind = cdc_ncm_unbind,
2094 + .check_connect = cdc_ncm_check_connect,
2095 +@@ -1175,7 +1172,7 @@ static const struct driver_info cdc_ncm_info = {
2096 + static const struct driver_info wwan_info = {
2097 + .description = "Mobile Broadband Network Device",
2098 + .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
2099 +- | FLAG_WWAN,
2100 ++ | FLAG_LINK_INTR | FLAG_WWAN,
2101 + .bind = cdc_ncm_bind,
2102 + .unbind = cdc_ncm_unbind,
2103 + .check_connect = cdc_ncm_check_connect,
2104 +@@ -1189,7 +1186,7 @@ static const struct driver_info wwan_info = {
2105 + static const struct driver_info wwan_noarp_info = {
2106 + .description = "Mobile Broadband Network Device (NO ARP)",
2107 + .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
2108 +- | FLAG_WWAN | FLAG_NOARP,
2109 ++ | FLAG_LINK_INTR | FLAG_WWAN | FLAG_NOARP,
2110 + .bind = cdc_ncm_bind,
2111 + .unbind = cdc_ncm_unbind,
2112 + .check_connect = cdc_ncm_check_connect,
2113 +diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
2114 +index f77ef36acf87..61879b1f7083 100644
2115 +--- a/drivers/net/wireless/ath/ath5k/led.c
2116 ++++ b/drivers/net/wireless/ath/ath5k/led.c
2117 +@@ -77,7 +77,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath5k_led_devices) = {
2118 + /* HP Compaq CQ60-206US (ddreggors@××××××.com) */
2119 + { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137a), ATH_LED(3, 1) },
2120 + /* HP Compaq C700 (nitrousnrg@×××××.com) */
2121 +- { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 1) },
2122 ++ { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 0) },
2123 + /* LiteOn AR5BXB63 (magooz@×××××.it) */
2124 + { ATH_SDEVICE(PCI_VENDOR_ID_ATHEROS, 0x3067), ATH_LED(3, 0) },
2125 + /* IBM-specific AR5212 (all others) */
2126 +diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
2127 +index cb34c7895f2a..735c26620387 100644
2128 +--- a/drivers/net/wireless/mac80211_hwsim.c
2129 ++++ b/drivers/net/wireless/mac80211_hwsim.c
2130 +@@ -1931,6 +1931,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
2131 + if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] ||
2132 + !info->attrs[HWSIM_ATTR_FLAGS] ||
2133 + !info->attrs[HWSIM_ATTR_COOKIE] ||
2134 ++ !info->attrs[HWSIM_ATTR_SIGNAL] ||
2135 + !info->attrs[HWSIM_ATTR_TX_INFO])
2136 + goto out;
2137 +
2138 +diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
2139 +index 6fc0853fd7f9..d066f74f743a 100644
2140 +--- a/drivers/net/wireless/rtlwifi/base.c
2141 ++++ b/drivers/net/wireless/rtlwifi/base.c
2142 +@@ -1392,9 +1392,9 @@ void rtl_watchdog_wq_callback(void *data)
2143 + if (((rtlpriv->link_info.num_rx_inperiod +
2144 + rtlpriv->link_info.num_tx_inperiod) > 8) ||
2145 + (rtlpriv->link_info.num_rx_inperiod > 2))
2146 +- rtlpriv->enter_ps = true;
2147 +- else
2148 + rtlpriv->enter_ps = false;
2149 ++ else
2150 ++ rtlpriv->enter_ps = true;
2151 +
2152 + /* LeisurePS only work in infra mode. */
2153 + schedule_work(&rtlpriv->works.lps_change_work);
2154 +diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
2155 +index d332d55885f8..2d7cd0c080d3 100644
2156 +--- a/drivers/pci/probe.c
2157 ++++ b/drivers/pci/probe.c
2158 +@@ -173,9 +173,6 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
2159 + struct pci_bus_region region;
2160 + bool bar_too_big = false, bar_disabled = false;
2161 +
2162 +- if (dev->non_compliant_bars)
2163 +- return 0;
2164 +-
2165 + mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
2166 +
2167 + /* No printks while decoding is disabled! */
2168 +@@ -295,6 +292,9 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
2169 + {
2170 + unsigned int pos, reg;
2171 +
2172 ++ if (dev->non_compliant_bars)
2173 ++ return;
2174 ++
2175 + for (pos = 0; pos < howmany; pos++) {
2176 + struct resource *res = &dev->resource[pos];
2177 + reg = PCI_BASE_ADDRESS_0 + (pos << 2);
2178 +diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
2179 +index d111c8687f9b..46497c6cbcc1 100644
2180 +--- a/drivers/platform/x86/hp-wmi.c
2181 ++++ b/drivers/platform/x86/hp-wmi.c
2182 +@@ -640,6 +640,11 @@ static int hp_wmi_rfkill_setup(struct platform_device *device)
2183 + if (err)
2184 + return err;
2185 +
2186 ++ err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, &wireless,
2187 ++ sizeof(wireless), 0);
2188 ++ if (err)
2189 ++ return err;
2190 ++
2191 + if (wireless & 0x1) {
2192 + wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev,
2193 + RFKILL_TYPE_WLAN,
2194 +diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
2195 +index ec8ccdae7aba..0090de46aa5e 100644
2196 +--- a/drivers/s390/net/qeth_l2_main.c
2197 ++++ b/drivers/s390/net/qeth_l2_main.c
2198 +@@ -898,6 +898,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
2199 + qeth_l2_set_offline(cgdev);
2200 +
2201 + if (card->dev) {
2202 ++ netif_napi_del(&card->napi);
2203 + unregister_netdev(card->dev);
2204 + card->dev = NULL;
2205 + }
2206 +diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
2207 +index c1b0b2761f8d..7366bef742de 100644
2208 +--- a/drivers/s390/net/qeth_l3_main.c
2209 ++++ b/drivers/s390/net/qeth_l3_main.c
2210 +@@ -3333,6 +3333,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
2211 + qeth_l3_set_offline(cgdev);
2212 +
2213 + if (card->dev) {
2214 ++ netif_napi_del(&card->napi);
2215 + unregister_netdev(card->dev);
2216 + card->dev = NULL;
2217 + }
2218 +diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
2219 +index 6a0d362e2596..284efac5f202 100644
2220 +--- a/drivers/scsi/aacraid/commsup.c
2221 ++++ b/drivers/scsi/aacraid/commsup.c
2222 +@@ -590,10 +590,10 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
2223 + }
2224 + return -EFAULT;
2225 + }
2226 +- /* We used to udelay() here but that absorbed
2227 +- * a CPU when a timeout occured. Not very
2228 +- * useful. */
2229 +- cpu_relax();
2230 ++ /*
2231 ++ * Allow other processes / CPUS to use core
2232 ++ */
2233 ++ schedule();
2234 + }
2235 + } else if (down_interruptible(&fibptr->event_wait)) {
2236 + /* Do nothing ... satisfy
2237 +@@ -1920,6 +1920,10 @@ int aac_command_thread(void *data)
2238 + if (difference <= 0)
2239 + difference = 1;
2240 + set_current_state(TASK_INTERRUPTIBLE);
2241 ++
2242 ++ if (kthread_should_stop())
2243 ++ break;
2244 ++
2245 + schedule_timeout(difference);
2246 +
2247 + if (kthread_should_stop())
2248 +diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
2249 +index a683a831527b..02278130826b 100644
2250 +--- a/drivers/scsi/be2iscsi/be_main.c
2251 ++++ b/drivers/scsi/be2iscsi/be_main.c
2252 +@@ -2978,7 +2978,7 @@ be_sgl_create_contiguous(void *virtual_address,
2253 + {
2254 + WARN_ON(!virtual_address);
2255 + WARN_ON(!physical_address);
2256 +- WARN_ON(!length > 0);
2257 ++ WARN_ON(!length);
2258 + WARN_ON(!sgl);
2259 +
2260 + sgl->va = virtual_address;
2261 +diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
2262 +index 25ac2c00f8b3..2891faa8e384 100644
2263 +--- a/drivers/scsi/ipr.c
2264 ++++ b/drivers/scsi/ipr.c
2265 +@@ -9607,6 +9607,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
2266 + ioa_cfg->intr_flag = IPR_USE_MSI;
2267 + else {
2268 + ioa_cfg->intr_flag = IPR_USE_LSI;
2269 ++ ioa_cfg->clear_isr = 1;
2270 + ioa_cfg->nvectors = 1;
2271 + dev_info(&pdev->dev, "Cannot enable MSI.\n");
2272 + }
2273 +diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
2274 +index 9acbc885239b..5ba69ea8eb92 100644
2275 +--- a/drivers/scsi/scsi_error.c
2276 ++++ b/drivers/scsi/scsi_error.c
2277 +@@ -898,7 +898,6 @@ static int scsi_request_sense(struct scsi_cmnd *scmd)
2278 + */
2279 + void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
2280 + {
2281 +- scmd->device->host->host_failed--;
2282 + scmd->eh_eflags = 0;
2283 + list_move_tail(&scmd->eh_entry, done_q);
2284 + }
2285 +@@ -1892,6 +1891,9 @@ int scsi_error_handler(void *data)
2286 + else
2287 + scsi_unjam_host(shost);
2288 +
2289 ++ /* All scmds have been handled */
2290 ++ shost->host_failed = 0;
2291 ++
2292 + /*
2293 + * Note - if the above fails completely, the action is to take
2294 + * individual devices offline and flush the queue of any
2295 +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
2296 +index 9f3168e8e5a8..60031e15d562 100644
2297 +--- a/drivers/scsi/scsi_lib.c
2298 ++++ b/drivers/scsi/scsi_lib.c
2299 +@@ -546,66 +546,6 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
2300 +
2301 + static void __scsi_release_buffers(struct scsi_cmnd *, int);
2302 +
2303 +-/*
2304 +- * Function: scsi_end_request()
2305 +- *
2306 +- * Purpose: Post-processing of completed commands (usually invoked at end
2307 +- * of upper level post-processing and scsi_io_completion).
2308 +- *
2309 +- * Arguments: cmd - command that is complete.
2310 +- * error - 0 if I/O indicates success, < 0 for I/O error.
2311 +- * bytes - number of bytes of completed I/O
2312 +- * requeue - indicates whether we should requeue leftovers.
2313 +- *
2314 +- * Lock status: Assumed that lock is not held upon entry.
2315 +- *
2316 +- * Returns: cmd if requeue required, NULL otherwise.
2317 +- *
2318 +- * Notes: This is called for block device requests in order to
2319 +- * mark some number of sectors as complete.
2320 +- *
2321 +- * We are guaranteeing that the request queue will be goosed
2322 +- * at some point during this call.
2323 +- * Notes: If cmd was requeued, upon return it will be a stale pointer.
2324 +- */
2325 +-static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
2326 +- int bytes, int requeue)
2327 +-{
2328 +- struct request_queue *q = cmd->device->request_queue;
2329 +- struct request *req = cmd->request;
2330 +-
2331 +- /*
2332 +- * If there are blocks left over at the end, set up the command
2333 +- * to queue the remainder of them.
2334 +- */
2335 +- if (blk_end_request(req, error, bytes)) {
2336 +- /* kill remainder if no retrys */
2337 +- if (error && scsi_noretry_cmd(cmd))
2338 +- blk_end_request_all(req, error);
2339 +- else {
2340 +- if (requeue) {
2341 +- /*
2342 +- * Bleah. Leftovers again. Stick the
2343 +- * leftovers in the front of the
2344 +- * queue, and goose the queue again.
2345 +- */
2346 +- scsi_release_buffers(cmd);
2347 +- scsi_requeue_command(q, cmd);
2348 +- cmd = NULL;
2349 +- }
2350 +- return cmd;
2351 +- }
2352 +- }
2353 +-
2354 +- /*
2355 +- * This will goose the queue request function at the end, so we don't
2356 +- * need to worry about launching another command.
2357 +- */
2358 +- __scsi_release_buffers(cmd, 0);
2359 +- scsi_next_command(cmd);
2360 +- return NULL;
2361 +-}
2362 +-
2363 + static inline unsigned int scsi_sgtable_index(unsigned short nents)
2364 + {
2365 + unsigned int index;
2366 +@@ -735,16 +675,9 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
2367 + *
2368 + * Returns: Nothing
2369 + *
2370 +- * Notes: This function is matched in terms of capabilities to
2371 +- * the function that created the scatter-gather list.
2372 +- * In other words, if there are no bounce buffers
2373 +- * (the normal case for most drivers), we don't need
2374 +- * the logic to deal with cleaning up afterwards.
2375 +- *
2376 +- * We must call scsi_end_request(). This will finish off
2377 +- * the specified number of sectors. If we are done, the
2378 +- * command block will be released and the queue function
2379 +- * will be goosed. If we are not done then we have to
2380 ++ * Notes: We will finish off the specified number of sectors. If we
2381 ++ * are done, the command block will be released and the queue
2382 ++ * function will be goosed. If we are not done then we have to
2383 + * figure out what to do next:
2384 + *
2385 + * a) We can call scsi_requeue_command(). The request
2386 +@@ -753,7 +686,7 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
2387 + * be used if we made forward progress, or if we want
2388 + * to switch from READ(10) to READ(6) for example.
2389 + *
2390 +- * b) We can call scsi_queue_insert(). The request will
2391 ++ * b) We can call __scsi_queue_insert(). The request will
2392 + * be put back on the queue and retried using the same
2393 + * command as before, possibly after a delay.
2394 + *
2395 +@@ -857,12 +790,28 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
2396 + }
2397 +
2398 + /*
2399 +- * A number of bytes were successfully read. If there
2400 +- * are leftovers and there is some kind of error
2401 +- * (result != 0), retry the rest.
2402 ++ * special case: failed zero length commands always need to
2403 ++ * drop down into the retry code. Otherwise, if we finished
2404 ++ * all bytes in the request we are done now.
2405 + */
2406 +- if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
2407 +- return;
2408 ++ if (!(blk_rq_bytes(req) == 0 && error) &&
2409 ++ !blk_end_request(req, error, good_bytes))
2410 ++ goto next_command;
2411 ++
2412 ++ /*
2413 ++ * Kill remainder if no retrys.
2414 ++ */
2415 ++ if (error && scsi_noretry_cmd(cmd)) {
2416 ++ blk_end_request_all(req, error);
2417 ++ goto next_command;
2418 ++ }
2419 ++
2420 ++ /*
2421 ++ * If there had been no error, but we have leftover bytes in the
2422 ++ * requeues just queue the command up again.
2423 ++ */
2424 ++ if (result == 0)
2425 ++ goto requeue;
2426 +
2427 + error = __scsi_error_from_host_byte(cmd, result);
2428 +
2429 +@@ -984,7 +933,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
2430 + switch (action) {
2431 + case ACTION_FAIL:
2432 + /* Give up and fail the remainder of the request */
2433 +- scsi_release_buffers(cmd);
2434 + if (!(req->cmd_flags & REQ_QUIET)) {
2435 + if (description)
2436 + scmd_printk(KERN_INFO, cmd, "%s\n",
2437 +@@ -994,12 +942,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
2438 + scsi_print_sense("", cmd);
2439 + scsi_print_command(cmd);
2440 + }
2441 +- if (blk_end_request_err(req, error))
2442 +- scsi_requeue_command(q, cmd);
2443 +- else
2444 +- scsi_next_command(cmd);
2445 +- break;
2446 ++ if (!blk_end_request_err(req, error))
2447 ++ goto next_command;
2448 ++ /*FALLTHRU*/
2449 + case ACTION_REPREP:
2450 ++ requeue:
2451 + /* Unprep the request and put it back at the head of the queue.
2452 + * A new command will be prepared and issued.
2453 + */
2454 +@@ -1015,6 +962,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
2455 + __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
2456 + break;
2457 + }
2458 ++ return;
2459 ++
2460 ++next_command:
2461 ++ __scsi_release_buffers(cmd, 0);
2462 ++ scsi_next_command(cmd);
2463 + }
2464 +
2465 + static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
2466 +diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
2467 +index 34d18dcfa0db..109a535b639c 100644
2468 +--- a/drivers/spi/spi-xilinx.c
2469 ++++ b/drivers/spi/spi-xilinx.c
2470 +@@ -315,7 +315,7 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
2471 + }
2472 +
2473 + /* See if there is more data to send */
2474 +- if (!xspi->remaining_bytes > 0)
2475 ++ if (xspi->remaining_bytes <= 0)
2476 + break;
2477 + }
2478 +
2479 +diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
2480 +index 32950ad94857..b30c41b3e0cc 100644
2481 +--- a/drivers/staging/iio/accel/sca3000_core.c
2482 ++++ b/drivers/staging/iio/accel/sca3000_core.c
2483 +@@ -588,7 +588,7 @@ static ssize_t sca3000_read_frequency(struct device *dev,
2484 + goto error_ret_mut;
2485 + ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
2486 + mutex_unlock(&st->lock);
2487 +- if (ret)
2488 ++ if (ret < 0)
2489 + goto error_ret;
2490 + val = ret;
2491 + if (base_freq > 0)
2492 +diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
2493 +index a9af1b9ae160..1f6e09649e5a 100644
2494 +--- a/drivers/tty/vt/keyboard.c
2495 ++++ b/drivers/tty/vt/keyboard.c
2496 +@@ -371,34 +371,22 @@ static void to_utf8(struct vc_data *vc, uint c)
2497 +
2498 + static void do_compute_shiftstate(void)
2499 + {
2500 +- unsigned int i, j, k, sym, val;
2501 ++ unsigned int k, sym, val;
2502 +
2503 + shift_state = 0;
2504 + memset(shift_down, 0, sizeof(shift_down));
2505 +
2506 +- for (i = 0; i < ARRAY_SIZE(key_down); i++) {
2507 +-
2508 +- if (!key_down[i])
2509 ++ for_each_set_bit(k, key_down, min(NR_KEYS, KEY_CNT)) {
2510 ++ sym = U(key_maps[0][k]);
2511 ++ if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
2512 + continue;
2513 +
2514 +- k = i * BITS_PER_LONG;
2515 +-
2516 +- for (j = 0; j < BITS_PER_LONG; j++, k++) {
2517 +-
2518 +- if (!test_bit(k, key_down))
2519 +- continue;
2520 ++ val = KVAL(sym);
2521 ++ if (val == KVAL(K_CAPSSHIFT))
2522 ++ val = KVAL(K_SHIFT);
2523 +
2524 +- sym = U(key_maps[0][k]);
2525 +- if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
2526 +- continue;
2527 +-
2528 +- val = KVAL(sym);
2529 +- if (val == KVAL(K_CAPSSHIFT))
2530 +- val = KVAL(K_SHIFT);
2531 +-
2532 +- shift_down[val]++;
2533 +- shift_state |= (1 << val);
2534 +- }
2535 ++ shift_down[val]++;
2536 ++ shift_state |= BIT(val);
2537 + }
2538 + }
2539 +
2540 +diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
2541 +index 62e532fb82ad..cfce807531f6 100644
2542 +--- a/drivers/usb/core/devio.c
2543 ++++ b/drivers/usb/core/devio.c
2544 +@@ -1106,10 +1106,11 @@ static int proc_getdriver(struct dev_state *ps, void __user *arg)
2545 +
2546 + static int proc_connectinfo(struct dev_state *ps, void __user *arg)
2547 + {
2548 +- struct usbdevfs_connectinfo ci = {
2549 +- .devnum = ps->dev->devnum,
2550 +- .slow = ps->dev->speed == USB_SPEED_LOW
2551 +- };
2552 ++ struct usbdevfs_connectinfo ci;
2553 ++
2554 ++ memset(&ci, 0, sizeof(ci));
2555 ++ ci.devnum = ps->dev->devnum;
2556 ++ ci.slow = ps->dev->speed == USB_SPEED_LOW;
2557 +
2558 + if (copy_to_user(arg, &ci, sizeof(ci)))
2559 + return -EFAULT;
2560 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
2561 +index 8eb2de6beee4..4e5156d212dd 100644
2562 +--- a/drivers/usb/core/hub.c
2563 ++++ b/drivers/usb/core/hub.c
2564 +@@ -113,6 +113,7 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
2565 + #define HUB_DEBOUNCE_STEP 25
2566 + #define HUB_DEBOUNCE_STABLE 100
2567 +
2568 ++static void hub_release(struct kref *kref);
2569 + static int usb_reset_and_verify_device(struct usb_device *udev);
2570 +
2571 + static inline char *portspeed(struct usb_hub *hub, int portstatus)
2572 +@@ -1024,10 +1025,20 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
2573 + unsigned delay;
2574 +
2575 + /* Continue a partial initialization */
2576 +- if (type == HUB_INIT2)
2577 +- goto init2;
2578 +- if (type == HUB_INIT3)
2579 ++ if (type == HUB_INIT2 || type == HUB_INIT3) {
2580 ++ device_lock(hub->intfdev);
2581 ++
2582 ++ /* Was the hub disconnected while we were waiting? */
2583 ++ if (hub->disconnected) {
2584 ++ device_unlock(hub->intfdev);
2585 ++ kref_put(&hub->kref, hub_release);
2586 ++ return;
2587 ++ }
2588 ++ if (type == HUB_INIT2)
2589 ++ goto init2;
2590 + goto init3;
2591 ++ }
2592 ++ kref_get(&hub->kref);
2593 +
2594 + /* The superspeed hub except for root hub has to use Hub Depth
2595 + * value as an offset into the route string to locate the bits
2596 +@@ -1224,6 +1235,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
2597 + PREPARE_DELAYED_WORK(&hub->init_work, hub_init_func3);
2598 + schedule_delayed_work(&hub->init_work,
2599 + msecs_to_jiffies(delay));
2600 ++ device_unlock(hub->intfdev);
2601 + return; /* Continues at init3: below */
2602 + } else {
2603 + msleep(delay);
2604 +@@ -1244,6 +1256,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
2605 + /* Allow autosuspend if it was suppressed */
2606 + if (type <= HUB_INIT3)
2607 + usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
2608 ++
2609 ++ if (type == HUB_INIT2 || type == HUB_INIT3)
2610 ++ device_unlock(hub->intfdev);
2611 ++
2612 ++ kref_put(&hub->kref, hub_release);
2613 + }
2614 +
2615 + /* Implement the continuations for the delays above */
2616 +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
2617 +index 94e9cddc05c1..aa27ec1f4813 100644
2618 +--- a/drivers/usb/core/quirks.c
2619 ++++ b/drivers/usb/core/quirks.c
2620 +@@ -170,14 +170,6 @@ static const struct usb_device_id usb_quirk_list[] = {
2621 + /* INTEL VALUE SSD */
2622 + { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
2623 +
2624 +- { } /* terminating entry must be last */
2625 +-};
2626 +-
2627 +-static const struct usb_device_id usb_interface_quirk_list[] = {
2628 +- /* Logitech UVC Cameras */
2629 +- { USB_VENDOR_AND_INTERFACE_INFO(0x046d, USB_CLASS_VIDEO, 1, 0),
2630 +- .driver_info = USB_QUIRK_RESET_RESUME },
2631 +-
2632 + /* ASUS Base Station(T100) */
2633 + { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
2634 + USB_QUIRK_IGNORE_REMOTE_WAKEUP },
2635 +@@ -191,6 +183,14 @@ static const struct usb_device_id usb_interface_quirk_list[] = {
2636 + { } /* terminating entry must be last */
2637 + };
2638 +
2639 ++static const struct usb_device_id usb_interface_quirk_list[] = {
2640 ++ /* Logitech UVC Cameras */
2641 ++ { USB_VENDOR_AND_INTERFACE_INFO(0x046d, USB_CLASS_VIDEO, 1, 0),
2642 ++ .driver_info = USB_QUIRK_RESET_RESUME },
2643 ++
2644 ++ { } /* terminating entry must be last */
2645 ++};
2646 ++
2647 + static bool usb_match_any_interface(struct usb_device *udev,
2648 + const struct usb_device_id *id)
2649 + {
2650 +diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
2651 +index 9d3044bdebe5..c6cc5201665a 100644
2652 +--- a/drivers/usb/musb/musb_host.c
2653 ++++ b/drivers/usb/musb/musb_host.c
2654 +@@ -581,14 +581,13 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
2655 + musb_writew(ep->regs, MUSB_TXCSR, 0);
2656 +
2657 + /* scrub all previous state, clearing toggle */
2658 +- } else {
2659 +- csr = musb_readw(ep->regs, MUSB_RXCSR);
2660 +- if (csr & MUSB_RXCSR_RXPKTRDY)
2661 +- WARNING("rx%d, packet/%d ready?\n", ep->epnum,
2662 +- musb_readw(ep->regs, MUSB_RXCOUNT));
2663 +-
2664 +- musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
2665 + }
2666 ++ csr = musb_readw(ep->regs, MUSB_RXCSR);
2667 ++ if (csr & MUSB_RXCSR_RXPKTRDY)
2668 ++ WARNING("rx%d, packet/%d ready?\n", ep->epnum,
2669 ++ musb_readw(ep->regs, MUSB_RXCOUNT));
2670 ++
2671 ++ musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
2672 +
2673 + /* target addr and (for multipoint) hub addr/port */
2674 + if (musb->is_multipoint) {
2675 +@@ -948,9 +947,15 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
2676 + if (is_in) {
2677 + dma = is_dma_capable() ? ep->rx_channel : NULL;
2678 +
2679 +- /* clear nak timeout bit */
2680 ++ /*
2681 ++ * Need to stop the transaction by clearing REQPKT first
2682 ++ * then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED
2683 ++ * DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2
2684 ++ */
2685 + rx_csr = musb_readw(epio, MUSB_RXCSR);
2686 + rx_csr |= MUSB_RXCSR_H_WZC_BITS;
2687 ++ rx_csr &= ~MUSB_RXCSR_H_REQPKT;
2688 ++ musb_writew(epio, MUSB_RXCSR, rx_csr);
2689 + rx_csr &= ~MUSB_RXCSR_DATAERROR;
2690 + musb_writew(epio, MUSB_RXCSR, rx_csr);
2691 +
2692 +diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
2693 +index ed4949faa70d..64223a923932 100644
2694 +--- a/drivers/usb/renesas_usbhs/mod_gadget.c
2695 ++++ b/drivers/usb/renesas_usbhs/mod_gadget.c
2696 +@@ -558,6 +558,9 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
2697 + struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
2698 + struct usbhs_pipe *pipe;
2699 + int ret = -EIO;
2700 ++ unsigned long flags;
2701 ++
2702 ++ usbhs_lock(priv, flags);
2703 +
2704 + /*
2705 + * if it already have pipe,
2706 +@@ -566,7 +569,8 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
2707 + if (uep->pipe) {
2708 + usbhs_pipe_clear(uep->pipe);
2709 + usbhs_pipe_sequence_data0(uep->pipe);
2710 +- return 0;
2711 ++ ret = 0;
2712 ++ goto usbhsg_ep_enable_end;
2713 + }
2714 +
2715 + pipe = usbhs_pipe_malloc(priv,
2716 +@@ -594,6 +598,9 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
2717 + ret = 0;
2718 + }
2719 +
2720 ++usbhsg_ep_enable_end:
2721 ++ usbhs_unlock(priv, flags);
2722 ++
2723 + return ret;
2724 + }
2725 +
2726 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
2727 +index bcb6f5c2bae4..006a2a721edf 100644
2728 +--- a/drivers/usb/serial/option.c
2729 ++++ b/drivers/usb/serial/option.c
2730 +@@ -274,6 +274,7 @@ static void option_instat_callback(struct urb *urb);
2731 + #define TELIT_PRODUCT_LE922_USBCFG5 0x1045
2732 + #define TELIT_PRODUCT_LE920 0x1200
2733 + #define TELIT_PRODUCT_LE910 0x1201
2734 ++#define TELIT_PRODUCT_LE910_USBCFG4 0x1206
2735 +
2736 + /* ZTE PRODUCTS */
2737 + #define ZTE_VENDOR_ID 0x19d2
2738 +@@ -1206,6 +1207,8 @@ static const struct usb_device_id option_ids[] = {
2739 + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
2740 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
2741 + .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
2742 ++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
2743 ++ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
2744 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
2745 + .driver_info = (kernel_ulong_t)&telit_le920_blacklist },
2746 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
2747 +diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
2748 +index 7d7add5ceba4..148e8ea1bc96 100644
2749 +--- a/drivers/virtio/virtio_balloon.c
2750 ++++ b/drivers/virtio/virtio_balloon.c
2751 +@@ -177,6 +177,8 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num)
2752 + num = min(num, ARRAY_SIZE(vb->pfns));
2753 +
2754 + mutex_lock(&vb->balloon_lock);
2755 ++ /* We can't release more pages than taken */
2756 ++ num = min(num, (size_t)vb->num_pages);
2757 + for (vb->num_pfns = 0; vb->num_pfns < num;
2758 + vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
2759 + page = balloon_page_dequeue(vb_dev_info);
2760 +diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
2761 +index 8abd7d579037..2e4517277e80 100644
2762 +--- a/drivers/xen/xen-acpi-processor.c
2763 ++++ b/drivers/xen/xen-acpi-processor.c
2764 +@@ -426,36 +426,7 @@ upload:
2765 +
2766 + return 0;
2767 + }
2768 +-static int __init check_prereq(void)
2769 +-{
2770 +- struct cpuinfo_x86 *c = &cpu_data(0);
2771 +-
2772 +- if (!xen_initial_domain())
2773 +- return -ENODEV;
2774 +-
2775 +- if (!acpi_gbl_FADT.smi_command)
2776 +- return -ENODEV;
2777 +-
2778 +- if (c->x86_vendor == X86_VENDOR_INTEL) {
2779 +- if (!cpu_has(c, X86_FEATURE_EST))
2780 +- return -ENODEV;
2781 +
2782 +- return 0;
2783 +- }
2784 +- if (c->x86_vendor == X86_VENDOR_AMD) {
2785 +- /* Copied from powernow-k8.h, can't include ../cpufreq/powernow
2786 +- * as we get compile warnings for the static functions.
2787 +- */
2788 +-#define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007
2789 +-#define USE_HW_PSTATE 0x00000080
2790 +- u32 eax, ebx, ecx, edx;
2791 +- cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
2792 +- if ((edx & USE_HW_PSTATE) != USE_HW_PSTATE)
2793 +- return -ENODEV;
2794 +- return 0;
2795 +- }
2796 +- return -ENODEV;
2797 +-}
2798 + /* acpi_perf_data is a pointer to percpu data. */
2799 + static struct acpi_processor_performance __percpu *acpi_perf_data;
2800 +
2801 +@@ -511,10 +482,10 @@ static struct syscore_ops xap_syscore_ops = {
2802 + static int __init xen_acpi_processor_init(void)
2803 + {
2804 + unsigned int i;
2805 +- int rc = check_prereq();
2806 ++ int rc;
2807 +
2808 +- if (rc)
2809 +- return rc;
2810 ++ if (!xen_initial_domain())
2811 ++ return -ENODEV;
2812 +
2813 + nr_acpi_bits = get_max_acpi_id() + 1;
2814 + acpi_ids_done = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL);
2815 +diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
2816 +index 75fe3d466515..ba3fac8318bb 100644
2817 +--- a/drivers/xen/xen-pciback/conf_space.c
2818 ++++ b/drivers/xen/xen-pciback/conf_space.c
2819 +@@ -183,8 +183,7 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
2820 + field_start = OFFSET(cfg_entry);
2821 + field_end = OFFSET(cfg_entry) + field->size;
2822 +
2823 +- if ((req_start >= field_start && req_start < field_end)
2824 +- || (req_end > field_start && req_end <= field_end)) {
2825 ++ if (req_end > field_start && field_end > req_start) {
2826 + err = conf_space_read(dev, cfg_entry, field_start,
2827 + &tmp_val);
2828 + if (err)
2829 +@@ -230,8 +229,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
2830 + field_start = OFFSET(cfg_entry);
2831 + field_end = OFFSET(cfg_entry) + field->size;
2832 +
2833 +- if ((req_start >= field_start && req_start < field_end)
2834 +- || (req_end > field_start && req_end <= field_end)) {
2835 ++ if (req_end > field_start && field_end > req_start) {
2836 + tmp_val = 0;
2837 +
2838 + err = xen_pcibk_config_read(dev, field_start,
2839 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
2840 +index d05a30072023..7c33afd7d5d3 100644
2841 +--- a/fs/cifs/connect.c
2842 ++++ b/fs/cifs/connect.c
2843 +@@ -408,7 +408,9 @@ cifs_echo_request(struct work_struct *work)
2844 + * server->ops->need_neg() == true. Also, no need to ping if
2845 + * we got a response recently.
2846 + */
2847 +- if (!server->ops->need_neg || server->ops->need_neg(server) ||
2848 ++
2849 ++ if (server->tcpStatus == CifsNeedReconnect ||
2850 ++ server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew ||
2851 + (server->ops->can_echo && !server->ops->can_echo(server)) ||
2852 + time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ))
2853 + goto requeue_echo;
2854 +diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
2855 +index 0c2425b21974..a998c929286f 100644
2856 +--- a/fs/cifs/dir.c
2857 ++++ b/fs/cifs/dir.c
2858 +@@ -227,6 +227,13 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
2859 + goto cifs_create_get_file_info;
2860 + }
2861 +
2862 ++ if (S_ISDIR(newinode->i_mode)) {
2863 ++ CIFSSMBClose(xid, tcon, fid->netfid);
2864 ++ iput(newinode);
2865 ++ rc = -EISDIR;
2866 ++ goto out;
2867 ++ }
2868 ++
2869 + if (!S_ISREG(newinode->i_mode)) {
2870 + /*
2871 + * The server may allow us to open things like
2872 +@@ -391,10 +398,14 @@ cifs_create_set_dentry:
2873 + if (rc != 0) {
2874 + cifs_dbg(FYI, "Create worked, get_inode_info failed rc = %d\n",
2875 + rc);
2876 +- if (server->ops->close)
2877 +- server->ops->close(xid, tcon, fid);
2878 +- goto out;
2879 ++ goto out_err;
2880 + }
2881 ++
2882 ++ if (S_ISDIR(newinode->i_mode)) {
2883 ++ rc = -EISDIR;
2884 ++ goto out_err;
2885 ++ }
2886 ++
2887 + d_drop(direntry);
2888 + d_add(direntry, newinode);
2889 +
2890 +@@ -402,6 +413,13 @@ out:
2891 + kfree(buf);
2892 + kfree(full_path);
2893 + return rc;
2894 ++
2895 ++out_err:
2896 ++ if (server->ops->close)
2897 ++ server->ops->close(xid, tcon, fid);
2898 ++ if (newinode)
2899 ++ iput(newinode);
2900 ++ goto out;
2901 + }
2902 +
2903 + int
2904 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
2905 +index eb0de4c3ca76..9dd8c968d94e 100644
2906 +--- a/fs/cifs/smb2pdu.c
2907 ++++ b/fs/cifs/smb2pdu.c
2908 +@@ -1250,6 +1250,33 @@ SMB2_echo(struct TCP_Server_Info *server)
2909 +
2910 + cifs_dbg(FYI, "In echo request\n");
2911 +
2912 ++ if (server->tcpStatus == CifsNeedNegotiate) {
2913 ++ struct list_head *tmp, *tmp2;
2914 ++ struct cifs_ses *ses;
2915 ++ struct cifs_tcon *tcon;
2916 ++
2917 ++ cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
2918 ++ spin_lock(&cifs_tcp_ses_lock);
2919 ++ list_for_each(tmp, &server->smb_ses_list) {
2920 ++ ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
2921 ++ list_for_each(tmp2, &ses->tcon_list) {
2922 ++ tcon = list_entry(tmp2, struct cifs_tcon,
2923 ++ tcon_list);
2924 ++ /* add check for persistent handle reconnect */
2925 ++ if (tcon && tcon->need_reconnect) {
2926 ++ spin_unlock(&cifs_tcp_ses_lock);
2927 ++ rc = smb2_reconnect(SMB2_ECHO, tcon);
2928 ++ spin_lock(&cifs_tcp_ses_lock);
2929 ++ }
2930 ++ }
2931 ++ }
2932 ++ spin_unlock(&cifs_tcp_ses_lock);
2933 ++ }
2934 ++
2935 ++ /* if no session, renegotiate failed above */
2936 ++ if (server->tcpStatus == CifsNeedNegotiate)
2937 ++ return -EIO;
2938 ++
2939 + rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
2940 + if (rc)
2941 + return rc;
2942 +diff --git a/fs/dcache.c b/fs/dcache.c
2943 +index 17222fa5bdc6..2d0b9d2f3c43 100644
2944 +--- a/fs/dcache.c
2945 ++++ b/fs/dcache.c
2946 +@@ -1311,7 +1311,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
2947 + struct dentry *dentry = __d_alloc(parent->d_sb, name);
2948 + if (!dentry)
2949 + return NULL;
2950 +-
2951 ++ dentry->d_flags |= DCACHE_RCUACCESS;
2952 + spin_lock(&parent->d_lock);
2953 + /*
2954 + * don't need child lock because it is not subject
2955 +@@ -2101,7 +2101,6 @@ static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
2956 + {
2957 + BUG_ON(!d_unhashed(entry));
2958 + hlist_bl_lock(b);
2959 +- entry->d_flags |= DCACHE_RCUACCESS;
2960 + hlist_bl_add_head_rcu(&entry->d_hash, b);
2961 + hlist_bl_unlock(b);
2962 + }
2963 +@@ -2285,6 +2284,7 @@ static void __d_move(struct dentry * dentry, struct dentry * target)
2964 +
2965 + /* ... and switch the parents */
2966 + if (IS_ROOT(dentry)) {
2967 ++ dentry->d_flags |= DCACHE_RCUACCESS;
2968 + dentry->d_parent = target->d_parent;
2969 + target->d_parent = target;
2970 + INIT_LIST_HEAD(&target->d_child);
2971 +@@ -2401,6 +2401,7 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
2972 + switch_names(dentry, anon);
2973 + swap(dentry->d_name.hash, anon->d_name.hash);
2974 +
2975 ++ dentry->d_flags |= DCACHE_RCUACCESS;
2976 + dentry->d_parent = dentry;
2977 + list_del_init(&dentry->d_child);
2978 + anon->d_parent = dparent;
2979 +diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
2980 +index 9ff3664bb3ea..d4644cc938ba 100644
2981 +--- a/fs/ecryptfs/file.c
2982 ++++ b/fs/ecryptfs/file.c
2983 +@@ -183,6 +183,19 @@ out:
2984 + return rc;
2985 + }
2986 +
2987 ++static int ecryptfs_mmap(struct file *file, struct vm_area_struct *vma)
2988 ++{
2989 ++ struct file *lower_file = ecryptfs_file_to_lower(file);
2990 ++ /*
2991 ++ * Don't allow mmap on top of file systems that don't support it
2992 ++ * natively. If FILESYSTEM_MAX_STACK_DEPTH > 2 or ecryptfs
2993 ++ * allows recursive mounting, this will need to be extended.
2994 ++ */
2995 ++ if (!lower_file->f_op->mmap)
2996 ++ return -ENODEV;
2997 ++ return generic_file_mmap(file, vma);
2998 ++}
2999 ++
3000 + /**
3001 + * ecryptfs_open
3002 + * @inode: inode speciying file to open
3003 +@@ -358,7 +371,7 @@ const struct file_operations ecryptfs_main_fops = {
3004 + #ifdef CONFIG_COMPAT
3005 + .compat_ioctl = ecryptfs_compat_ioctl,
3006 + #endif
3007 +- .mmap = generic_file_mmap,
3008 ++ .mmap = ecryptfs_mmap,
3009 + .open = ecryptfs_open,
3010 + .flush = ecryptfs_flush,
3011 + .release = ecryptfs_release,
3012 +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
3013 +index df633bb25909..7eea76168d33 100644
3014 +--- a/fs/ext4/extents.c
3015 ++++ b/fs/ext4/extents.c
3016 +@@ -361,9 +361,13 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
3017 + ext4_fsblk_t block = ext4_ext_pblock(ext);
3018 + int len = ext4_ext_get_actual_len(ext);
3019 + ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
3020 +- ext4_lblk_t last = lblock + len - 1;
3021 +
3022 +- if (len == 0 || lblock > last)
3023 ++ /*
3024 ++ * We allow neither:
3025 ++ * - zero length
3026 ++ * - overflow/wrap-around
3027 ++ */
3028 ++ if (lblock + len <= lblock)
3029 + return 0;
3030 + return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
3031 + }
3032 +@@ -454,6 +458,10 @@ static int __ext4_ext_check(const char *function, unsigned int line,
3033 + error_msg = "invalid extent entries";
3034 + goto corrupted;
3035 + }
3036 ++ if (unlikely(depth > 32)) {
3037 ++ error_msg = "too large eh_depth";
3038 ++ goto corrupted;
3039 ++ }
3040 + /* Verify checksum on non-root extent tree nodes */
3041 + if (ext_depth(inode) != depth &&
3042 + !ext4_extent_block_csum_verify(inode, eh)) {
3043 +diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
3044 +index 4d4718cf25ab..00cbc648e1dc 100644
3045 +--- a/fs/ext4/ialloc.c
3046 ++++ b/fs/ext4/ialloc.c
3047 +@@ -1027,11 +1027,13 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
3048 + goto iget_failed;
3049 +
3050 + /*
3051 +- * If the orphans has i_nlinks > 0 then it should be able to be
3052 +- * truncated, otherwise it won't be removed from the orphan list
3053 +- * during processing and an infinite loop will result.
3054 ++ * If the orphans has i_nlinks > 0 then it should be able to
3055 ++ * be truncated, otherwise it won't be removed from the orphan
3056 ++ * list during processing and an infinite loop will result.
3057 ++ * Similarly, it must not be a bad inode.
3058 + */
3059 +- if (inode->i_nlink && !ext4_can_truncate(inode))
3060 ++ if ((inode->i_nlink && !ext4_can_truncate(inode)) ||
3061 ++ is_bad_inode(inode))
3062 + goto bad_orphan;
3063 +
3064 + if (NEXT_ORPHAN(inode) > max_ino)
3065 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
3066 +index fb7e576df25c..221b58298847 100644
3067 +--- a/fs/ext4/inode.c
3068 ++++ b/fs/ext4/inode.c
3069 +@@ -206,9 +206,9 @@ void ext4_evict_inode(struct inode *inode)
3070 + * Note that directories do not have this problem because they
3071 + * don't use page cache.
3072 + */
3073 +- if (ext4_should_journal_data(inode) &&
3074 +- (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
3075 +- inode->i_ino != EXT4_JOURNAL_INO) {
3076 ++ if (inode->i_ino != EXT4_JOURNAL_INO &&
3077 ++ ext4_should_journal_data(inode) &&
3078 ++ (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
3079 + journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
3080 + tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
3081 +
3082 +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
3083 +index 61ee01603940..08b4495c1b12 100644
3084 +--- a/fs/ext4/mballoc.c
3085 ++++ b/fs/ext4/mballoc.c
3086 +@@ -1232,6 +1232,7 @@ static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
3087 + static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
3088 + {
3089 + int order = 1;
3090 ++ int bb_incr = 1 << (e4b->bd_blkbits - 1);
3091 + void *bb;
3092 +
3093 + BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
3094 +@@ -1244,7 +1245,8 @@ static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
3095 + /* this block is part of buddy of order 'order' */
3096 + return order;
3097 + }
3098 +- bb += 1 << (e4b->bd_blkbits - order);
3099 ++ bb += bb_incr;
3100 ++ bb_incr >>= 1;
3101 + order++;
3102 + }
3103 + return 0;
3104 +@@ -2514,7 +2516,7 @@ int ext4_mb_init(struct super_block *sb)
3105 + {
3106 + struct ext4_sb_info *sbi = EXT4_SB(sb);
3107 + unsigned i, j;
3108 +- unsigned offset;
3109 ++ unsigned offset, offset_incr;
3110 + unsigned max;
3111 + int ret;
3112 +
3113 +@@ -2543,11 +2545,13 @@ int ext4_mb_init(struct super_block *sb)
3114 +
3115 + i = 1;
3116 + offset = 0;
3117 ++ offset_incr = 1 << (sb->s_blocksize_bits - 1);
3118 + max = sb->s_blocksize << 2;
3119 + do {
3120 + sbi->s_mb_offsets[i] = offset;
3121 + sbi->s_mb_maxs[i] = max;
3122 +- offset += 1 << (sb->s_blocksize_bits - i);
3123 ++ offset += offset_incr;
3124 ++ offset_incr = offset_incr >> 1;
3125 + max = max >> 1;
3126 + i++;
3127 + } while (i <= sb->s_blocksize_bits + 1);
3128 +@@ -2872,7 +2876,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
3129 + ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
3130 + "fs metadata", block, block+len);
3131 + /* File system mounted not to panic on error
3132 +- * Fix the bitmap and repeat the block allocation
3133 ++ * Fix the bitmap and return EUCLEAN
3134 + * We leak some of the blocks here.
3135 + */
3136 + ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3137 +@@ -2881,7 +2885,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
3138 + ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3139 + err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
3140 + if (!err)
3141 +- err = -EAGAIN;
3142 ++ err = -EUCLEAN;
3143 + goto out_err;
3144 + }
3145 +
3146 +@@ -4448,18 +4452,7 @@ repeat:
3147 + }
3148 + if (likely(ac->ac_status == AC_STATUS_FOUND)) {
3149 + *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
3150 +- if (*errp == -EAGAIN) {
3151 +- /*
3152 +- * drop the reference that we took
3153 +- * in ext4_mb_use_best_found
3154 +- */
3155 +- ext4_mb_release_context(ac);
3156 +- ac->ac_b_ex.fe_group = 0;
3157 +- ac->ac_b_ex.fe_start = 0;
3158 +- ac->ac_b_ex.fe_len = 0;
3159 +- ac->ac_status = AC_STATUS_CONTINUE;
3160 +- goto repeat;
3161 +- } else if (*errp) {
3162 ++ if (*errp) {
3163 + ext4_discard_allocated_blocks(ac);
3164 + goto errout;
3165 + } else {
3166 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
3167 +index 063eb5094a63..15a81897df4e 100644
3168 +--- a/fs/ext4/super.c
3169 ++++ b/fs/ext4/super.c
3170 +@@ -2153,6 +2153,16 @@ static void ext4_orphan_cleanup(struct super_block *sb,
3171 + while (es->s_last_orphan) {
3172 + struct inode *inode;
3173 +
3174 ++ /*
3175 ++ * We may have encountered an error during cleanup; if
3176 ++ * so, skip the rest.
3177 ++ */
3178 ++ if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
3179 ++ jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
3180 ++ es->s_last_orphan = 0;
3181 ++ break;
3182 ++ }
3183 ++
3184 + inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
3185 + if (IS_ERR(inode)) {
3186 + es->s_last_orphan = 0;
3187 +diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
3188 +index 4d371f3b9a45..efe802e5bb3d 100644
3189 +--- a/fs/fuse/inode.c
3190 ++++ b/fs/fuse/inode.c
3191 +@@ -913,7 +913,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
3192 + arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
3193 + FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK |
3194 + FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
3195 +- FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
3196 ++ FUSE_FLOCK_LOCKS | FUSE_HAS_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
3197 + FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO;
3198 + req->in.h.opcode = FUSE_INIT;
3199 + req->in.numargs = 1;
3200 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
3201 +index d8ac734a1e44..c2b89a1a403b 100644
3202 +--- a/fs/nfs/nfs4proc.c
3203 ++++ b/fs/nfs/nfs4proc.c
3204 +@@ -2332,12 +2332,11 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
3205 + call_close |= is_wronly;
3206 + else if (is_wronly)
3207 + calldata->arg.fmode |= FMODE_WRITE;
3208 ++ if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE))
3209 ++ call_close |= is_rdwr;
3210 + } else if (is_rdwr)
3211 + calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
3212 +
3213 +- if (calldata->arg.fmode == 0)
3214 +- call_close |= is_rdwr;
3215 +-
3216 + if (!nfs4_valid_open_stateid(state))
3217 + call_close = 0;
3218 + spin_unlock(&state->owner->so_lock);
3219 +diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
3220 +index 41e6a04a561f..0f9a5b4ad53b 100644
3221 +--- a/fs/nilfs2/the_nilfs.c
3222 ++++ b/fs/nilfs2/the_nilfs.c
3223 +@@ -431,7 +431,7 @@ static int nilfs_valid_sb(struct nilfs_super_block *sbp)
3224 + if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC)
3225 + return 0;
3226 + bytes = le16_to_cpu(sbp->s_bytes);
3227 +- if (bytes > BLOCK_SIZE)
3228 ++ if (bytes < sumoff + 4 || bytes > BLOCK_SIZE)
3229 + return 0;
3230 + crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp,
3231 + sumoff);
3232 +diff --git a/fs/pipe.c b/fs/pipe.c
3233 +index 50267e6ba688..c281867c453e 100644
3234 +--- a/fs/pipe.c
3235 ++++ b/fs/pipe.c
3236 +@@ -39,6 +39,12 @@ unsigned int pipe_max_size = 1048576;
3237 + */
3238 + unsigned int pipe_min_size = PAGE_SIZE;
3239 +
3240 ++/* Maximum allocatable pages per user. Hard limit is unset by default, soft
3241 ++ * matches default values.
3242 ++ */
3243 ++unsigned long pipe_user_pages_hard;
3244 ++unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
3245 ++
3246 + /*
3247 + * We use a start+len construction, which provides full use of the
3248 + * allocated memory.
3249 +@@ -794,20 +800,49 @@ pipe_fasync(int fd, struct file *filp, int on)
3250 + return retval;
3251 + }
3252 +
3253 ++static void account_pipe_buffers(struct pipe_inode_info *pipe,
3254 ++ unsigned long old, unsigned long new)
3255 ++{
3256 ++ atomic_long_add(new - old, &pipe->user->pipe_bufs);
3257 ++}
3258 ++
3259 ++static bool too_many_pipe_buffers_soft(struct user_struct *user)
3260 ++{
3261 ++ return pipe_user_pages_soft &&
3262 ++ atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_soft;
3263 ++}
3264 ++
3265 ++static bool too_many_pipe_buffers_hard(struct user_struct *user)
3266 ++{
3267 ++ return pipe_user_pages_hard &&
3268 ++ atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_hard;
3269 ++}
3270 ++
3271 + struct pipe_inode_info *alloc_pipe_info(void)
3272 + {
3273 + struct pipe_inode_info *pipe;
3274 +
3275 + pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
3276 + if (pipe) {
3277 +- pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * PIPE_DEF_BUFFERS, GFP_KERNEL);
3278 ++ unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
3279 ++ struct user_struct *user = get_current_user();
3280 ++
3281 ++ if (!too_many_pipe_buffers_hard(user)) {
3282 ++ if (too_many_pipe_buffers_soft(user))
3283 ++ pipe_bufs = 1;
3284 ++ pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * pipe_bufs, GFP_KERNEL);
3285 ++ }
3286 ++
3287 + if (pipe->bufs) {
3288 + init_waitqueue_head(&pipe->wait);
3289 + pipe->r_counter = pipe->w_counter = 1;
3290 +- pipe->buffers = PIPE_DEF_BUFFERS;
3291 ++ pipe->buffers = pipe_bufs;
3292 ++ pipe->user = user;
3293 ++ account_pipe_buffers(pipe, 0, pipe_bufs);
3294 + mutex_init(&pipe->mutex);
3295 + return pipe;
3296 + }
3297 ++ free_uid(user);
3298 + kfree(pipe);
3299 + }
3300 +
3301 +@@ -818,6 +853,8 @@ void free_pipe_info(struct pipe_inode_info *pipe)
3302 + {
3303 + int i;
3304 +
3305 ++ account_pipe_buffers(pipe, pipe->buffers, 0);
3306 ++ free_uid(pipe->user);
3307 + for (i = 0; i < pipe->buffers; i++) {
3308 + struct pipe_buffer *buf = pipe->bufs + i;
3309 + if (buf->ops)
3310 +@@ -1208,6 +1245,7 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
3311 + memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
3312 + }
3313 +
3314 ++ account_pipe_buffers(pipe, pipe->buffers, nr_pages);
3315 + pipe->curbuf = 0;
3316 + kfree(pipe->bufs);
3317 + pipe->bufs = bufs;
3318 +@@ -1279,6 +1317,11 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
3319 + if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
3320 + ret = -EPERM;
3321 + goto out;
3322 ++ } else if ((too_many_pipe_buffers_hard(pipe->user) ||
3323 ++ too_many_pipe_buffers_soft(pipe->user)) &&
3324 ++ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
3325 ++ ret = -EPERM;
3326 ++ goto out;
3327 + }
3328 + ret = pipe_set_size(pipe, nr_pages);
3329 + break;
3330 +diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
3331 +index 881324c08430..a335e4e6aba1 100644
3332 +--- a/fs/ubifs/file.c
3333 ++++ b/fs/ubifs/file.c
3334 +@@ -54,6 +54,7 @@
3335 + #include <linux/mount.h>
3336 + #include <linux/namei.h>
3337 + #include <linux/slab.h>
3338 ++#include <linux/migrate.h>
3339 +
3340 + static int read_block(struct inode *inode, void *addr, unsigned int block,
3341 + struct ubifs_data_node *dn)
3342 +@@ -1422,6 +1423,26 @@ static int ubifs_set_page_dirty(struct page *page)
3343 + return ret;
3344 + }
3345 +
3346 ++#ifdef CONFIG_MIGRATION
3347 ++static int ubifs_migrate_page(struct address_space *mapping,
3348 ++ struct page *newpage, struct page *page, enum migrate_mode mode)
3349 ++{
3350 ++ int rc;
3351 ++
3352 ++ rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode);
3353 ++ if (rc != MIGRATEPAGE_SUCCESS)
3354 ++ return rc;
3355 ++
3356 ++ if (PagePrivate(page)) {
3357 ++ ClearPagePrivate(page);
3358 ++ SetPagePrivate(newpage);
3359 ++ }
3360 ++
3361 ++ migrate_page_copy(newpage, page);
3362 ++ return MIGRATEPAGE_SUCCESS;
3363 ++}
3364 ++#endif
3365 ++
3366 + static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
3367 + {
3368 + /*
3369 +@@ -1558,6 +1579,9 @@ const struct address_space_operations ubifs_file_address_operations = {
3370 + .write_end = ubifs_write_end,
3371 + .invalidatepage = ubifs_invalidatepage,
3372 + .set_page_dirty = ubifs_set_page_dirty,
3373 ++#ifdef CONFIG_MIGRATION
3374 ++ .migratepage = ubifs_migrate_page,
3375 ++#endif
3376 + .releasepage = ubifs_releasepage,
3377 + };
3378 +
3379 +diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
3380 +index f010ab4594f1..06dec557d247 100644
3381 +--- a/fs/xfs/xfs_inode.c
3382 ++++ b/fs/xfs/xfs_inode.c
3383 +@@ -2604,13 +2604,14 @@ xfs_iflush_cluster(
3384 + * We need to check under the i_flags_lock for a valid inode
3385 + * here. Skip it if it is not valid or the wrong inode.
3386 + */
3387 +- spin_lock(&ip->i_flags_lock);
3388 +- if (!ip->i_ino ||
3389 ++ spin_lock(&iq->i_flags_lock);
3390 ++ if (!iq->i_ino ||
3391 ++ __xfs_iflags_test(iq, XFS_ISTALE) ||
3392 + (XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) {
3393 +- spin_unlock(&ip->i_flags_lock);
3394 ++ spin_unlock(&iq->i_flags_lock);
3395 + continue;
3396 + }
3397 +- spin_unlock(&ip->i_flags_lock);
3398 ++ spin_unlock(&iq->i_flags_lock);
3399 +
3400 + /*
3401 + * Do an un-protected check to see if the inode is dirty and
3402 +@@ -2726,7 +2727,7 @@ xfs_iflush(
3403 + struct xfs_buf **bpp)
3404 + {
3405 + struct xfs_mount *mp = ip->i_mount;
3406 +- struct xfs_buf *bp;
3407 ++ struct xfs_buf *bp = NULL;
3408 + struct xfs_dinode *dip;
3409 + int error;
3410 +
3411 +@@ -2768,14 +2769,22 @@ xfs_iflush(
3412 + }
3413 +
3414 + /*
3415 +- * Get the buffer containing the on-disk inode.
3416 ++ * Get the buffer containing the on-disk inode. We are doing a try-lock
3417 ++ * operation here, so we may get an EAGAIN error. In that case, we
3418 ++ * simply want to return with the inode still dirty.
3419 ++ *
3420 ++ * If we get any other error, we effectively have a corruption situation
3421 ++ * and we cannot flush the inode, so we treat it the same as failing
3422 ++ * xfs_iflush_int().
3423 + */
3424 + error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
3425 + 0);
3426 +- if (error || !bp) {
3427 ++ if (error == EAGAIN) {
3428 + xfs_ifunlock(ip);
3429 + return error;
3430 + }
3431 ++ if (error)
3432 ++ goto corrupt_out;
3433 +
3434 + /*
3435 + * First flush out the inode that xfs_iflush was called with.
3436 +@@ -2803,7 +2812,8 @@ xfs_iflush(
3437 + return 0;
3438 +
3439 + corrupt_out:
3440 +- xfs_buf_relse(bp);
3441 ++ if (bp)
3442 ++ xfs_buf_relse(bp);
3443 + xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3444 + cluster_corrupt_out:
3445 + error = XFS_ERROR(EFSCORRUPTED);
3446 +diff --git a/include/linux/console.h b/include/linux/console.h
3447 +index 73bab0f58af5..6877ffc97d8c 100644
3448 +--- a/include/linux/console.h
3449 ++++ b/include/linux/console.h
3450 +@@ -153,6 +153,7 @@ extern int console_trylock(void);
3451 + extern void console_unlock(void);
3452 + extern void console_conditional_schedule(void);
3453 + extern void console_unblank(void);
3454 ++extern void console_flush_on_panic(void);
3455 + extern struct tty_driver *console_device(int *);
3456 + extern void console_stop(struct console *);
3457 + extern void console_start(struct console *);
3458 +diff --git a/include/linux/migrate.h b/include/linux/migrate.h
3459 +index a405d3dc0f61..e98692748066 100644
3460 +--- a/include/linux/migrate.h
3461 ++++ b/include/linux/migrate.h
3462 +@@ -55,6 +55,9 @@ extern int migrate_vmas(struct mm_struct *mm,
3463 + extern void migrate_page_copy(struct page *newpage, struct page *page);
3464 + extern int migrate_huge_page_move_mapping(struct address_space *mapping,
3465 + struct page *newpage, struct page *page);
3466 ++extern int migrate_page_move_mapping(struct address_space *mapping,
3467 ++ struct page *newpage, struct page *page,
3468 ++ struct buffer_head *head, enum migrate_mode mode);
3469 + #else
3470 +
3471 + static inline void putback_lru_pages(struct list_head *l) {}
3472 +diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
3473 +index dd49566315c6..547a5846e6ac 100644
3474 +--- a/include/linux/netfilter/x_tables.h
3475 ++++ b/include/linux/netfilter/x_tables.h
3476 +@@ -239,11 +239,18 @@ extern void xt_unregister_match(struct xt_match *target);
3477 + extern int xt_register_matches(struct xt_match *match, unsigned int n);
3478 + extern void xt_unregister_matches(struct xt_match *match, unsigned int n);
3479 +
3480 ++int xt_check_entry_offsets(const void *base, const char *elems,
3481 ++ unsigned int target_offset,
3482 ++ unsigned int next_offset);
3483 ++
3484 + extern int xt_check_match(struct xt_mtchk_param *,
3485 + unsigned int size, u_int8_t proto, bool inv_proto);
3486 + extern int xt_check_target(struct xt_tgchk_param *,
3487 + unsigned int size, u_int8_t proto, bool inv_proto);
3488 +
3489 ++void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
3490 ++ struct xt_counters_info *info, bool compat);
3491 ++
3492 + extern struct xt_table *xt_register_table(struct net *net,
3493 + const struct xt_table *table,
3494 + struct xt_table_info *bootstrap,
3495 +@@ -423,7 +430,7 @@ extern void xt_compat_init_offsets(u_int8_t af, unsigned int number);
3496 + extern int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
3497 +
3498 + extern int xt_compat_match_offset(const struct xt_match *match);
3499 +-extern int xt_compat_match_from_user(struct xt_entry_match *m,
3500 ++extern void xt_compat_match_from_user(struct xt_entry_match *m,
3501 + void **dstptr, unsigned int *size);
3502 + extern int xt_compat_match_to_user(const struct xt_entry_match *m,
3503 + void __user **dstptr, unsigned int *size);
3504 +@@ -433,6 +440,9 @@ extern void xt_compat_target_from_user(struct xt_entry_target *t,
3505 + void **dstptr, unsigned int *size);
3506 + extern int xt_compat_target_to_user(const struct xt_entry_target *t,
3507 + void __user **dstptr, unsigned int *size);
3508 ++int xt_compat_check_entry_offsets(const void *base, const char *elems,
3509 ++ unsigned int target_offset,
3510 ++ unsigned int next_offset);
3511 +
3512 + #endif /* CONFIG_COMPAT */
3513 + #endif /* _X_TABLES_H */
3514 +diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
3515 +index ab5752692113..b3374f63bc36 100644
3516 +--- a/include/linux/pipe_fs_i.h
3517 ++++ b/include/linux/pipe_fs_i.h
3518 +@@ -42,6 +42,7 @@ struct pipe_buffer {
3519 + * @fasync_readers: reader side fasync
3520 + * @fasync_writers: writer side fasync
3521 + * @bufs: the circular array of pipe buffers
3522 ++ * @user: the user who created this pipe
3523 + **/
3524 + struct pipe_inode_info {
3525 + struct mutex mutex;
3526 +@@ -57,6 +58,7 @@ struct pipe_inode_info {
3527 + struct fasync_struct *fasync_readers;
3528 + struct fasync_struct *fasync_writers;
3529 + struct pipe_buffer *bufs;
3530 ++ struct user_struct *user;
3531 + };
3532 +
3533 + /*
3534 +@@ -140,6 +142,8 @@ void pipe_unlock(struct pipe_inode_info *);
3535 + void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *);
3536 +
3537 + extern unsigned int pipe_max_size, pipe_min_size;
3538 ++extern unsigned long pipe_user_pages_hard;
3539 ++extern unsigned long pipe_user_pages_soft;
3540 + int pipe_proc_fn(struct ctl_table *, int, void __user *, size_t *, loff_t *);
3541 +
3542 +
3543 +diff --git a/include/linux/sched.h b/include/linux/sched.h
3544 +index 4781332f2e11..7728941e7ddc 100644
3545 +--- a/include/linux/sched.h
3546 ++++ b/include/linux/sched.h
3547 +@@ -671,6 +671,7 @@ struct user_struct {
3548 + #endif
3549 + unsigned long locked_shm; /* How many pages of mlocked shm ? */
3550 + unsigned long unix_inflight; /* How many files in flight in unix sockets */
3551 ++ atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */
3552 +
3553 + #ifdef CONFIG_KEYS
3554 + struct key *uid_keyring; /* UID specific keyring */
3555 +diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h
3556 +index daec99af5d54..1c88b177cb9c 100644
3557 +--- a/include/linux/usb/ehci_def.h
3558 ++++ b/include/linux/usb/ehci_def.h
3559 +@@ -178,11 +178,11 @@ struct ehci_regs {
3560 + * PORTSCx
3561 + */
3562 + /* HOSTPC: offset 0x84 */
3563 +- u32 hostpc[1]; /* HOSTPC extension */
3564 ++ u32 hostpc[0]; /* HOSTPC extension */
3565 + #define HOSTPC_PHCD (1<<22) /* Phy clock disable */
3566 + #define HOSTPC_PSPD (3<<25) /* Port speed detection */
3567 +
3568 +- u32 reserved5[16];
3569 ++ u32 reserved5[17];
3570 +
3571 + /* USBMODE_EX: offset 0xc8 */
3572 + u32 usbmode_ex; /* USB Device mode extension */
3573 +diff --git a/include/rdma/ib.h b/include/rdma/ib.h
3574 +new file mode 100644
3575 +index 000000000000..f09331ad0aba
3576 +--- /dev/null
3577 ++++ b/include/rdma/ib.h
3578 +@@ -0,0 +1,54 @@
3579 ++/*
3580 ++ * Copyright (c) 2010 Intel Corporation. All rights reserved.
3581 ++ *
3582 ++ * This software is available to you under a choice of one of two
3583 ++ * licenses. You may choose to be licensed under the terms of the GNU
3584 ++ * General Public License (GPL) Version 2, available from the file
3585 ++ * COPYING in the main directory of this source tree, or the
3586 ++ * OpenIB.org BSD license below:
3587 ++ *
3588 ++ * Redistribution and use in source and binary forms, with or
3589 ++ * without modification, are permitted provided that the following
3590 ++ * conditions are met:
3591 ++ *
3592 ++ * - Redistributions of source code must retain the above
3593 ++ * copyright notice, this list of conditions and the following
3594 ++ * disclaimer.
3595 ++ *
3596 ++ * - Redistributions in binary form must reproduce the above
3597 ++ * copyright notice, this list of conditions and the following
3598 ++ * disclaimer in the documentation and/or other materials
3599 ++ * provided with the distribution.
3600 ++ *
3601 ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
3602 ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
3603 ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
3604 ++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
3605 ++ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
3606 ++ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
3607 ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
3608 ++ * SOFTWARE.
3609 ++ */
3610 ++
3611 ++#if !defined(_RDMA_IB_H)
3612 ++#define _RDMA_IB_H
3613 ++
3614 ++#include <linux/types.h>
3615 ++#include <linux/sched.h>
3616 ++
3617 ++/*
3618 ++ * The IB interfaces that use write() as bi-directional ioctl() are
3619 ++ * fundamentally unsafe, since there are lots of ways to trigger "write()"
3620 ++ * calls from various contexts with elevated privileges. That includes the
3621 ++ * traditional suid executable error message writes, but also various kernel
3622 ++ * interfaces that can write to file descriptors.
3623 ++ *
3624 ++ * This function provides protection for the legacy API by restricting the
3625 ++ * calling context.
3626 ++ */
3627 ++static inline bool ib_safe_file_access(struct file *filp)
3628 ++{
3629 ++ return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS);
3630 ++}
3631 ++
3632 ++#endif /* _RDMA_IB_H */
3633 +diff --git a/kernel/module.c b/kernel/module.c
3634 +index f8a4f48b48a9..2c87e521032b 100644
3635 +--- a/kernel/module.c
3636 ++++ b/kernel/module.c
3637 +@@ -2475,13 +2475,18 @@ static inline void kmemleak_load_module(const struct module *mod,
3638 + #endif
3639 +
3640 + #ifdef CONFIG_MODULE_SIG
3641 +-static int module_sig_check(struct load_info *info)
3642 ++static int module_sig_check(struct load_info *info, int flags)
3643 + {
3644 + int err = -ENOKEY;
3645 + const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1;
3646 + const void *mod = info->hdr;
3647 +
3648 +- if (info->len > markerlen &&
3649 ++ /*
3650 ++ * Require flags == 0, as a module with version information
3651 ++ * removed is no longer the module that was signed
3652 ++ */
3653 ++ if (flags == 0 &&
3654 ++ info->len > markerlen &&
3655 + memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) {
3656 + /* We truncate the module to discard the signature */
3657 + info->len -= markerlen;
3658 +@@ -2503,7 +2508,7 @@ static int module_sig_check(struct load_info *info)
3659 + return err;
3660 + }
3661 + #else /* !CONFIG_MODULE_SIG */
3662 +-static int module_sig_check(struct load_info *info)
3663 ++static int module_sig_check(struct load_info *info, int flags)
3664 + {
3665 + return 0;
3666 + }
3667 +@@ -3228,7 +3233,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
3668 + struct module *mod;
3669 + long err;
3670 +
3671 +- err = module_sig_check(info);
3672 ++ err = module_sig_check(info, flags);
3673 + if (err)
3674 + goto free_copy;
3675 +
3676 +diff --git a/kernel/panic.c b/kernel/panic.c
3677 +index 167ec097ce8b..d3d74c4e2258 100644
3678 +--- a/kernel/panic.c
3679 ++++ b/kernel/panic.c
3680 +@@ -22,6 +22,7 @@
3681 + #include <linux/sysrq.h>
3682 + #include <linux/init.h>
3683 + #include <linux/nmi.h>
3684 ++#include <linux/console.h>
3685 +
3686 + #define PANIC_TIMER_STEP 100
3687 + #define PANIC_BLINK_SPD 18
3688 +@@ -128,6 +129,8 @@ void panic(const char *fmt, ...)
3689 +
3690 + bust_spinlocks(0);
3691 +
3692 ++ console_flush_on_panic();
3693 ++
3694 + if (!panic_blink)
3695 + panic_blink = no_blink;
3696 +
3697 +diff --git a/kernel/printk.c b/kernel/printk.c
3698 +index fd0154a57d6e..ee8f6be7d8a9 100644
3699 +--- a/kernel/printk.c
3700 ++++ b/kernel/printk.c
3701 +@@ -2033,13 +2033,24 @@ void console_unlock(void)
3702 + static u64 seen_seq;
3703 + unsigned long flags;
3704 + bool wake_klogd = false;
3705 +- bool retry;
3706 ++ bool do_cond_resched, retry;
3707 +
3708 + if (console_suspended) {
3709 + up(&console_sem);
3710 + return;
3711 + }
3712 +
3713 ++ /*
3714 ++ * Console drivers are called under logbuf_lock, so
3715 ++ * @console_may_schedule should be cleared before; however, we may
3716 ++ * end up dumping a lot of lines, for example, if called from
3717 ++ * console registration path, and should invoke cond_resched()
3718 ++ * between lines if allowable. Not doing so can cause a very long
3719 ++ * scheduling stall on a slow console leading to RCU stall and
3720 ++ * softlockup warnings which exacerbate the issue with more
3721 ++ * messages practically incapacitating the system.
3722 ++ */
3723 ++ do_cond_resched = console_may_schedule;
3724 + console_may_schedule = 0;
3725 +
3726 + /* flush buffered message fragment immediately to console */
3727 +@@ -2096,6 +2107,9 @@ skip:
3728 + call_console_drivers(level, text, len);
3729 + start_critical_timings();
3730 + local_irq_restore(flags);
3731 ++
3732 ++ if (do_cond_resched)
3733 ++ cond_resched();
3734 + }
3735 + console_locked = 0;
3736 + mutex_release(&console_lock_dep_map, 1, _RET_IP_);
3737 +@@ -2164,6 +2178,25 @@ void console_unblank(void)
3738 + console_unlock();
3739 + }
3740 +
3741 ++/**
3742 ++ * console_flush_on_panic - flush console content on panic
3743 ++ *
3744 ++ * Immediately output all pending messages no matter what.
3745 ++ */
3746 ++void console_flush_on_panic(void)
3747 ++{
3748 ++ /*
3749 ++ * If someone else is holding the console lock, trylock will fail
3750 ++ * and may_schedule may be set. Ignore and proceed to unlock so
3751 ++ * that messages are flushed out. As this can be called from any
3752 ++ * context and we don't want to get preempted while flushing,
3753 ++ * ensure may_schedule is cleared.
3754 ++ */
3755 ++ console_trylock();
3756 ++ console_may_schedule = 0;
3757 ++ console_unlock();
3758 ++}
3759 ++
3760 + /*
3761 + * Return the console tty driver structure and its associated index
3762 + */
3763 +diff --git a/kernel/signal.c b/kernel/signal.c
3764 +index 4d1f7fa3138d..7b81c53b0097 100644
3765 +--- a/kernel/signal.c
3766 ++++ b/kernel/signal.c
3767 +@@ -3004,11 +3004,9 @@ static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
3768 + * Nor can they impersonate a kill()/tgkill(), which adds source info.
3769 + */
3770 + if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3771 +- (task_pid_vnr(current) != pid)) {
3772 +- /* We used to allow any < 0 si_code */
3773 +- WARN_ON_ONCE(info->si_code < 0);
3774 ++ (task_pid_vnr(current) != pid))
3775 + return -EPERM;
3776 +- }
3777 ++
3778 + info->si_signo = sig;
3779 +
3780 + /* POSIX.1b doesn't mention process groups. */
3781 +@@ -3053,12 +3051,10 @@ static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
3782 + /* Not even root can pretend to send signals from the kernel.
3783 + * Nor can they impersonate a kill()/tgkill(), which adds source info.
3784 + */
3785 +- if (((info->si_code >= 0 || info->si_code == SI_TKILL)) &&
3786 +- (task_pid_vnr(current) != pid)) {
3787 +- /* We used to allow any < 0 si_code */
3788 +- WARN_ON_ONCE(info->si_code < 0);
3789 ++ if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3790 ++ (task_pid_vnr(current) != pid))
3791 + return -EPERM;
3792 +- }
3793 ++
3794 + info->si_signo = sig;
3795 +
3796 + return do_send_specific(tgid, pid, sig, info);
3797 +diff --git a/kernel/sysctl.c b/kernel/sysctl.c
3798 +index 9469f4c61a30..4fd49fe1046d 100644
3799 +--- a/kernel/sysctl.c
3800 ++++ b/kernel/sysctl.c
3801 +@@ -1632,6 +1632,20 @@ static struct ctl_table fs_table[] = {
3802 + .proc_handler = &pipe_proc_fn,
3803 + .extra1 = &pipe_min_size,
3804 + },
3805 ++ {
3806 ++ .procname = "pipe-user-pages-hard",
3807 ++ .data = &pipe_user_pages_hard,
3808 ++ .maxlen = sizeof(pipe_user_pages_hard),
3809 ++ .mode = 0644,
3810 ++ .proc_handler = proc_doulongvec_minmax,
3811 ++ },
3812 ++ {
3813 ++ .procname = "pipe-user-pages-soft",
3814 ++ .data = &pipe_user_pages_soft,
3815 ++ .maxlen = sizeof(pipe_user_pages_soft),
3816 ++ .mode = 0644,
3817 ++ .proc_handler = proc_doulongvec_minmax,
3818 ++ },
3819 + { }
3820 + };
3821 +
3822 +diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
3823 +index fdb23e84b011..7be4d67cecbd 100644
3824 +--- a/kernel/trace/trace_printk.c
3825 ++++ b/kernel/trace/trace_printk.c
3826 +@@ -38,6 +38,10 @@ struct trace_bprintk_fmt {
3827 + static inline struct trace_bprintk_fmt *lookup_format(const char *fmt)
3828 + {
3829 + struct trace_bprintk_fmt *pos;
3830 ++
3831 ++ if (!fmt)
3832 ++ return ERR_PTR(-EINVAL);
3833 ++
3834 + list_for_each_entry(pos, &trace_bprintk_fmt_list, list) {
3835 + if (!strcmp(pos->fmt, fmt))
3836 + return pos;
3837 +@@ -59,7 +63,8 @@ void hold_module_trace_bprintk_format(const char **start, const char **end)
3838 + for (iter = start; iter < end; iter++) {
3839 + struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);
3840 + if (tb_fmt) {
3841 +- *iter = tb_fmt->fmt;
3842 ++ if (!IS_ERR(tb_fmt))
3843 ++ *iter = tb_fmt->fmt;
3844 + continue;
3845 + }
3846 +
3847 +diff --git a/lib/dma-debug.c b/lib/dma-debug.c
3848 +index eb43517bf261..c32437f6be61 100644
3849 +--- a/lib/dma-debug.c
3850 ++++ b/lib/dma-debug.c
3851 +@@ -445,9 +445,9 @@ static struct dma_debug_entry *dma_entry_alloc(void)
3852 + spin_lock_irqsave(&free_entries_lock, flags);
3853 +
3854 + if (list_empty(&free_entries)) {
3855 +- pr_err("DMA-API: debugging out of memory - disabling\n");
3856 + global_disable = true;
3857 + spin_unlock_irqrestore(&free_entries_lock, flags);
3858 ++ pr_err("DMA-API: debugging out of memory - disabling\n");
3859 + return NULL;
3860 + }
3861 +
3862 +diff --git a/mm/migrate.c b/mm/migrate.c
3863 +index a88c12f2235d..808f8abb1b8f 100644
3864 +--- a/mm/migrate.c
3865 ++++ b/mm/migrate.c
3866 +@@ -30,6 +30,7 @@
3867 + #include <linux/mempolicy.h>
3868 + #include <linux/vmalloc.h>
3869 + #include <linux/security.h>
3870 ++#include <linux/backing-dev.h>
3871 + #include <linux/memcontrol.h>
3872 + #include <linux/syscalls.h>
3873 + #include <linux/hugetlb.h>
3874 +@@ -307,10 +308,12 @@ static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
3875 + * 2 for pages with a mapping
3876 + * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
3877 + */
3878 +-static int migrate_page_move_mapping(struct address_space *mapping,
3879 ++int migrate_page_move_mapping(struct address_space *mapping,
3880 + struct page *newpage, struct page *page,
3881 + struct buffer_head *head, enum migrate_mode mode)
3882 + {
3883 ++ struct zone *oldzone, *newzone;
3884 ++ int dirty;
3885 + int expected_count = 0;
3886 + void **pslot;
3887 +
3888 +@@ -321,6 +324,9 @@ static int migrate_page_move_mapping(struct address_space *mapping,
3889 + return MIGRATEPAGE_SUCCESS;
3890 + }
3891 +
3892 ++ oldzone = page_zone(page);
3893 ++ newzone = page_zone(newpage);
3894 ++
3895 + spin_lock_irq(&mapping->tree_lock);
3896 +
3897 + pslot = radix_tree_lookup_slot(&mapping->page_tree,
3898 +@@ -361,6 +367,13 @@ static int migrate_page_move_mapping(struct address_space *mapping,
3899 + set_page_private(newpage, page_private(page));
3900 + }
3901 +
3902 ++ /* Move dirty while page refs frozen and newpage not yet exposed */
3903 ++ dirty = PageDirty(page);
3904 ++ if (dirty) {
3905 ++ ClearPageDirty(page);
3906 ++ SetPageDirty(newpage);
3907 ++ }
3908 ++
3909 + radix_tree_replace_slot(pslot, newpage);
3910 +
3911 + /*
3912 +@@ -370,6 +383,9 @@ static int migrate_page_move_mapping(struct address_space *mapping,
3913 + */
3914 + page_unfreeze_refs(page, expected_count - 1);
3915 +
3916 ++ spin_unlock(&mapping->tree_lock);
3917 ++ /* Leave irq disabled to prevent preemption while updating stats */
3918 ++
3919 + /*
3920 + * If moved to a different zone then also account
3921 + * the page for that zone. Other VM counters will be
3922 +@@ -380,16 +396,23 @@ static int migrate_page_move_mapping(struct address_space *mapping,
3923 + * via NR_FILE_PAGES and NR_ANON_PAGES if they
3924 + * are mapped to swap space.
3925 + */
3926 +- __dec_zone_page_state(page, NR_FILE_PAGES);
3927 +- __inc_zone_page_state(newpage, NR_FILE_PAGES);
3928 +- if (!PageSwapCache(page) && PageSwapBacked(page)) {
3929 +- __dec_zone_page_state(page, NR_SHMEM);
3930 +- __inc_zone_page_state(newpage, NR_SHMEM);
3931 ++ if (newzone != oldzone) {
3932 ++ __dec_zone_state(oldzone, NR_FILE_PAGES);
3933 ++ __inc_zone_state(newzone, NR_FILE_PAGES);
3934 ++ if (PageSwapBacked(page) && !PageSwapCache(page)) {
3935 ++ __dec_zone_state(oldzone, NR_SHMEM);
3936 ++ __inc_zone_state(newzone, NR_SHMEM);
3937 ++ }
3938 ++ if (dirty && mapping_cap_account_dirty(mapping)) {
3939 ++ __dec_zone_state(oldzone, NR_FILE_DIRTY);
3940 ++ __inc_zone_state(newzone, NR_FILE_DIRTY);
3941 ++ }
3942 + }
3943 +- spin_unlock_irq(&mapping->tree_lock);
3944 ++ local_irq_enable();
3945 +
3946 + return MIGRATEPAGE_SUCCESS;
3947 + }
3948 ++EXPORT_SYMBOL(migrate_page_move_mapping);
3949 +
3950 + /*
3951 + * The expected number of remaining references is the same as that
3952 +@@ -460,20 +483,9 @@ void migrate_page_copy(struct page *newpage, struct page *page)
3953 + if (PageMappedToDisk(page))
3954 + SetPageMappedToDisk(newpage);
3955 +
3956 +- if (PageDirty(page)) {
3957 +- clear_page_dirty_for_io(page);
3958 +- /*
3959 +- * Want to mark the page and the radix tree as dirty, and
3960 +- * redo the accounting that clear_page_dirty_for_io undid,
3961 +- * but we can't use set_page_dirty because that function
3962 +- * is actually a signal that all of the page has become dirty.
3963 +- * Whereas only part of our page may be dirty.
3964 +- */
3965 +- if (PageSwapBacked(page))
3966 +- SetPageDirty(newpage);
3967 +- else
3968 +- __set_page_dirty_nobuffers(newpage);
3969 +- }
3970 ++ /* Move dirty on pages not done by migrate_page_move_mapping() */
3971 ++ if (PageDirty(page))
3972 ++ SetPageDirty(newpage);
3973 +
3974 + mlock_migrate_page(newpage, page);
3975 + ksm_migrate_page(newpage, page);
3976 +@@ -492,6 +504,7 @@ void migrate_page_copy(struct page *newpage, struct page *page)
3977 + if (PageWriteback(newpage))
3978 + end_page_writeback(newpage);
3979 + }
3980 ++EXPORT_SYMBOL(migrate_page_copy);
3981 +
3982 + /************************************************************
3983 + * Migration functions
3984 +diff --git a/mm/shmem.c b/mm/shmem.c
3985 +index 4e4a7349c5cd..cc02b6c6eec4 100644
3986 +--- a/mm/shmem.c
3987 ++++ b/mm/shmem.c
3988 +@@ -1948,9 +1948,11 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
3989 + NULL);
3990 + if (error) {
3991 + /* Remove the !PageUptodate pages we added */
3992 +- shmem_undo_range(inode,
3993 +- (loff_t)start << PAGE_CACHE_SHIFT,
3994 +- (loff_t)index << PAGE_CACHE_SHIFT, true);
3995 ++ if (index > start) {
3996 ++ shmem_undo_range(inode,
3997 ++ (loff_t)start << PAGE_CACHE_SHIFT,
3998 ++ ((loff_t)index << PAGE_CACHE_SHIFT) - 1, true);
3999 ++ }
4000 + goto undone;
4001 + }
4002 +
4003 +diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
4004 +index 5f36f70ce44d..4b966c6c0145 100644
4005 +--- a/net/bluetooth/l2cap_sock.c
4006 ++++ b/net/bluetooth/l2cap_sock.c
4007 +@@ -725,7 +725,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
4008 + break;
4009 + }
4010 +
4011 +- if (get_user(opt, (u32 __user *) optval)) {
4012 ++ if (get_user(opt, (u16 __user *) optval)) {
4013 + err = -EFAULT;
4014 + break;
4015 + }
4016 +diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
4017 +index 7ec4e0522215..c1de8d404c47 100644
4018 +--- a/net/ceph/osdmap.c
4019 ++++ b/net/ceph/osdmap.c
4020 +@@ -798,6 +798,110 @@ bad:
4021 + }
4022 +
4023 + /*
4024 ++ * Encoding order is (new_up_client, new_state, new_weight). Need to
4025 ++ * apply in the (new_weight, new_state, new_up_client) order, because
4026 ++ * an incremental map may look like e.g.
4027 ++ *
4028 ++ * new_up_client: { osd=6, addr=... } # set osd_state and addr
4029 ++ * new_state: { osd=6, xorstate=EXISTS } # clear osd_state
4030 ++ */
4031 ++static int decode_new_up_state_weight(void **p, void *end,
4032 ++ struct ceph_osdmap *map)
4033 ++{
4034 ++ void *new_up_client;
4035 ++ void *new_state;
4036 ++ void *new_weight_end;
4037 ++ u32 len;
4038 ++
4039 ++ new_up_client = *p;
4040 ++ ceph_decode_32_safe(p, end, len, e_inval);
4041 ++ len *= sizeof(u32) + sizeof(struct ceph_entity_addr);
4042 ++ ceph_decode_need(p, end, len, e_inval);
4043 ++ *p += len;
4044 ++
4045 ++ new_state = *p;
4046 ++ ceph_decode_32_safe(p, end, len, e_inval);
4047 ++ len *= sizeof(u32) + sizeof(u8);
4048 ++ ceph_decode_need(p, end, len, e_inval);
4049 ++ *p += len;
4050 ++
4051 ++ /* new_weight */
4052 ++ ceph_decode_32_safe(p, end, len, e_inval);
4053 ++ while (len--) {
4054 ++ s32 osd;
4055 ++ u32 w;
4056 ++
4057 ++ ceph_decode_need(p, end, 2*sizeof(u32), e_inval);
4058 ++ osd = ceph_decode_32(p);
4059 ++ w = ceph_decode_32(p);
4060 ++ BUG_ON(osd >= map->max_osd);
4061 ++ pr_info("osd%d weight 0x%x %s\n", osd, w,
4062 ++ w == CEPH_OSD_IN ? "(in)" :
4063 ++ (w == CEPH_OSD_OUT ? "(out)" : ""));
4064 ++ map->osd_weight[osd] = w;
4065 ++
4066 ++ /*
4067 ++ * If we are marking in, set the EXISTS, and clear the
4068 ++ * AUTOOUT and NEW bits.
4069 ++ */
4070 ++ if (w) {
4071 ++ map->osd_state[osd] |= CEPH_OSD_EXISTS;
4072 ++ map->osd_state[osd] &= ~(CEPH_OSD_AUTOOUT |
4073 ++ CEPH_OSD_NEW);
4074 ++ }
4075 ++ }
4076 ++ new_weight_end = *p;
4077 ++
4078 ++ /* new_state (up/down) */
4079 ++ *p = new_state;
4080 ++ len = ceph_decode_32(p);
4081 ++ while (len--) {
4082 ++ s32 osd;
4083 ++ u8 xorstate;
4084 ++
4085 ++ osd = ceph_decode_32(p);
4086 ++ xorstate = ceph_decode_8(p);
4087 ++ if (xorstate == 0)
4088 ++ xorstate = CEPH_OSD_UP;
4089 ++ BUG_ON(osd >= map->max_osd);
4090 ++ if ((map->osd_state[osd] & CEPH_OSD_UP) &&
4091 ++ (xorstate & CEPH_OSD_UP))
4092 ++ pr_info("osd%d down\n", osd);
4093 ++ if ((map->osd_state[osd] & CEPH_OSD_EXISTS) &&
4094 ++ (xorstate & CEPH_OSD_EXISTS)) {
4095 ++ pr_info("osd%d does not exist\n", osd);
4096 ++ map->osd_weight[osd] = CEPH_OSD_IN;
4097 ++ memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr));
4098 ++ map->osd_state[osd] = 0;
4099 ++ } else {
4100 ++ map->osd_state[osd] ^= xorstate;
4101 ++ }
4102 ++ }
4103 ++
4104 ++ /* new_up_client */
4105 ++ *p = new_up_client;
4106 ++ len = ceph_decode_32(p);
4107 ++ while (len--) {
4108 ++ s32 osd;
4109 ++ struct ceph_entity_addr addr;
4110 ++
4111 ++ osd = ceph_decode_32(p);
4112 ++ ceph_decode_copy(p, &addr, sizeof(addr));
4113 ++ ceph_decode_addr(&addr);
4114 ++ BUG_ON(osd >= map->max_osd);
4115 ++ pr_info("osd%d up\n", osd);
4116 ++ map->osd_state[osd] |= CEPH_OSD_EXISTS | CEPH_OSD_UP;
4117 ++ map->osd_addr[osd] = addr;
4118 ++ }
4119 ++
4120 ++ *p = new_weight_end;
4121 ++ return 0;
4122 ++
4123 ++e_inval:
4124 ++ return -EINVAL;
4125 ++}
4126 ++
4127 ++/*
4128 + * decode and apply an incremental map update.
4129 + */
4130 + struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
4131 +@@ -912,50 +1016,10 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
4132 + __remove_pg_pool(&map->pg_pools, pi);
4133 + }
4134 +
4135 +- /* new_up */
4136 +- err = -EINVAL;
4137 +- ceph_decode_32_safe(p, end, len, bad);
4138 +- while (len--) {
4139 +- u32 osd;
4140 +- struct ceph_entity_addr addr;
4141 +- ceph_decode_32_safe(p, end, osd, bad);
4142 +- ceph_decode_copy_safe(p, end, &addr, sizeof(addr), bad);
4143 +- ceph_decode_addr(&addr);
4144 +- pr_info("osd%d up\n", osd);
4145 +- BUG_ON(osd >= map->max_osd);
4146 +- map->osd_state[osd] |= CEPH_OSD_UP;
4147 +- map->osd_addr[osd] = addr;
4148 +- }
4149 +-
4150 +- /* new_state */
4151 +- ceph_decode_32_safe(p, end, len, bad);
4152 +- while (len--) {
4153 +- u32 osd;
4154 +- u8 xorstate;
4155 +- ceph_decode_32_safe(p, end, osd, bad);
4156 +- xorstate = **(u8 **)p;
4157 +- (*p)++; /* clean flag */
4158 +- if (xorstate == 0)
4159 +- xorstate = CEPH_OSD_UP;
4160 +- if (xorstate & CEPH_OSD_UP)
4161 +- pr_info("osd%d down\n", osd);
4162 +- if (osd < map->max_osd)
4163 +- map->osd_state[osd] ^= xorstate;
4164 +- }
4165 +-
4166 +- /* new_weight */
4167 +- ceph_decode_32_safe(p, end, len, bad);
4168 +- while (len--) {
4169 +- u32 osd, off;
4170 +- ceph_decode_need(p, end, sizeof(u32)*2, bad);
4171 +- osd = ceph_decode_32(p);
4172 +- off = ceph_decode_32(p);
4173 +- pr_info("osd%d weight 0x%x %s\n", osd, off,
4174 +- off == CEPH_OSD_IN ? "(in)" :
4175 +- (off == CEPH_OSD_OUT ? "(out)" : ""));
4176 +- if (osd < map->max_osd)
4177 +- map->osd_weight[osd] = off;
4178 +- }
4179 ++ /* new_up_client, new_state, new_weight */
4180 ++ err = decode_new_up_state_weight(p, end, map);
4181 ++ if (err)
4182 ++ goto bad;
4183 +
4184 + /* new_pg_temp */
4185 + ceph_decode_32_safe(p, end, len, bad);
4186 +diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
4187 +index b31553d385bb..89570f070e0e 100644
4188 +--- a/net/ipv4/ipmr.c
4189 ++++ b/net/ipv4/ipmr.c
4190 +@@ -881,8 +881,10 @@ static struct mfc_cache *ipmr_cache_alloc(void)
4191 + {
4192 + struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
4193 +
4194 +- if (c)
4195 ++ if (c) {
4196 ++ c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
4197 + c->mfc_un.res.minvif = MAXVIFS;
4198 ++ }
4199 + return c;
4200 + }
4201 +
4202 +diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
4203 +index c8abe31961ed..95a5f261fe8a 100644
4204 +--- a/net/ipv4/netfilter/arp_tables.c
4205 ++++ b/net/ipv4/netfilter/arp_tables.c
4206 +@@ -350,11 +350,12 @@ unsigned int arpt_do_table(struct sk_buff *skb,
4207 + }
4208 +
4209 + /* All zeroes == unconditional rule. */
4210 +-static inline bool unconditional(const struct arpt_arp *arp)
4211 ++static inline bool unconditional(const struct arpt_entry *e)
4212 + {
4213 + static const struct arpt_arp uncond;
4214 +
4215 +- return memcmp(arp, &uncond, sizeof(uncond)) == 0;
4216 ++ return e->target_offset == sizeof(struct arpt_entry) &&
4217 ++ memcmp(&e->arp, &uncond, sizeof(uncond)) == 0;
4218 + }
4219 +
4220 + /* Figures out from what hook each rule can be called: returns 0 if
4221 +@@ -393,11 +394,10 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
4222 + |= ((1 << hook) | (1 << NF_ARP_NUMHOOKS));
4223 +
4224 + /* Unconditional return/END. */
4225 +- if ((e->target_offset == sizeof(struct arpt_entry) &&
4226 ++ if ((unconditional(e) &&
4227 + (strcmp(t->target.u.user.name,
4228 + XT_STANDARD_TARGET) == 0) &&
4229 +- t->verdict < 0 && unconditional(&e->arp)) ||
4230 +- visited) {
4231 ++ t->verdict < 0) || visited) {
4232 + unsigned int oldpos, size;
4233 +
4234 + if ((strcmp(t->target.u.user.name,
4235 +@@ -430,6 +430,8 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
4236 + size = e->next_offset;
4237 + e = (struct arpt_entry *)
4238 + (entry0 + pos + size);
4239 ++ if (pos + size >= newinfo->size)
4240 ++ return 0;
4241 + e->counters.pcnt = pos;
4242 + pos += size;
4243 + } else {
4244 +@@ -452,6 +454,8 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
4245 + } else {
4246 + /* ... this is a fallthru */
4247 + newpos = pos + e->next_offset;
4248 ++ if (newpos >= newinfo->size)
4249 ++ return 0;
4250 + }
4251 + e = (struct arpt_entry *)
4252 + (entry0 + newpos);
4253 +@@ -465,25 +469,6 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
4254 + return 1;
4255 + }
4256 +
4257 +-static inline int check_entry(const struct arpt_entry *e, const char *name)
4258 +-{
4259 +- const struct xt_entry_target *t;
4260 +-
4261 +- if (!arp_checkentry(&e->arp)) {
4262 +- duprintf("arp_tables: arp check failed %p %s.\n", e, name);
4263 +- return -EINVAL;
4264 +- }
4265 +-
4266 +- if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset)
4267 +- return -EINVAL;
4268 +-
4269 +- t = arpt_get_target_c(e);
4270 +- if (e->target_offset + t->u.target_size > e->next_offset)
4271 +- return -EINVAL;
4272 +-
4273 +- return 0;
4274 +-}
4275 +-
4276 + static inline int check_target(struct arpt_entry *e, const char *name)
4277 + {
4278 + struct xt_entry_target *t = arpt_get_target(e);
4279 +@@ -513,10 +498,6 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
4280 + struct xt_target *target;
4281 + int ret;
4282 +
4283 +- ret = check_entry(e, name);
4284 +- if (ret)
4285 +- return ret;
4286 +-
4287 + t = arpt_get_target(e);
4288 + target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
4289 + t->u.user.revision);
4290 +@@ -542,7 +523,7 @@ static bool check_underflow(const struct arpt_entry *e)
4291 + const struct xt_entry_target *t;
4292 + unsigned int verdict;
4293 +
4294 +- if (!unconditional(&e->arp))
4295 ++ if (!unconditional(e))
4296 + return false;
4297 + t = arpt_get_target_c(e);
4298 + if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
4299 +@@ -561,9 +542,11 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
4300 + unsigned int valid_hooks)
4301 + {
4302 + unsigned int h;
4303 ++ int err;
4304 +
4305 + if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 ||
4306 +- (unsigned char *)e + sizeof(struct arpt_entry) >= limit) {
4307 ++ (unsigned char *)e + sizeof(struct arpt_entry) >= limit ||
4308 ++ (unsigned char *)e + e->next_offset > limit) {
4309 + duprintf("Bad offset %p\n", e);
4310 + return -EINVAL;
4311 + }
4312 +@@ -575,6 +558,14 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
4313 + return -EINVAL;
4314 + }
4315 +
4316 ++ if (!arp_checkentry(&e->arp))
4317 ++ return -EINVAL;
4318 ++
4319 ++ err = xt_check_entry_offsets(e, e->elems, e->target_offset,
4320 ++ e->next_offset);
4321 ++ if (err)
4322 ++ return err;
4323 ++
4324 + /* Check hooks & underflows */
4325 + for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
4326 + if (!(valid_hooks & (1 << h)))
4327 +@@ -583,9 +574,9 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
4328 + newinfo->hook_entry[h] = hook_entries[h];
4329 + if ((unsigned char *)e - base == underflows[h]) {
4330 + if (!check_underflow(e)) {
4331 +- pr_err("Underflows must be unconditional and "
4332 +- "use the STANDARD target with "
4333 +- "ACCEPT/DROP\n");
4334 ++ pr_debug("Underflows must be unconditional and "
4335 ++ "use the STANDARD target with "
4336 ++ "ACCEPT/DROP\n");
4337 + return -EINVAL;
4338 + }
4339 + newinfo->underflow[h] = underflows[h];
4340 +@@ -675,10 +666,8 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
4341 + }
4342 + }
4343 +
4344 +- if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) {
4345 +- duprintf("Looping hook\n");
4346 ++ if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
4347 + return -ELOOP;
4348 +- }
4349 +
4350 + /* Finally, each sanity check must pass */
4351 + i = 0;
4352 +@@ -1071,6 +1060,9 @@ static int do_replace(struct net *net, const void __user *user,
4353 + /* overflow check */
4354 + if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
4355 + return -ENOMEM;
4356 ++ if (tmp.num_counters == 0)
4357 ++ return -EINVAL;
4358 ++
4359 + tmp.name[sizeof(tmp.name)-1] = 0;
4360 +
4361 + newinfo = xt_alloc_table_info(tmp.size);
4362 +@@ -1111,56 +1103,18 @@ static int do_add_counters(struct net *net, const void __user *user,
4363 + unsigned int i, curcpu;
4364 + struct xt_counters_info tmp;
4365 + struct xt_counters *paddc;
4366 +- unsigned int num_counters;
4367 +- const char *name;
4368 +- int size;
4369 +- void *ptmp;
4370 + struct xt_table *t;
4371 + const struct xt_table_info *private;
4372 + int ret = 0;
4373 + void *loc_cpu_entry;
4374 + struct arpt_entry *iter;
4375 + unsigned int addend;
4376 +-#ifdef CONFIG_COMPAT
4377 +- struct compat_xt_counters_info compat_tmp;
4378 +-
4379 +- if (compat) {
4380 +- ptmp = &compat_tmp;
4381 +- size = sizeof(struct compat_xt_counters_info);
4382 +- } else
4383 +-#endif
4384 +- {
4385 +- ptmp = &tmp;
4386 +- size = sizeof(struct xt_counters_info);
4387 +- }
4388 +
4389 +- if (copy_from_user(ptmp, user, size) != 0)
4390 +- return -EFAULT;
4391 ++ paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
4392 ++ if (IS_ERR(paddc))
4393 ++ return PTR_ERR(paddc);
4394 +
4395 +-#ifdef CONFIG_COMPAT
4396 +- if (compat) {
4397 +- num_counters = compat_tmp.num_counters;
4398 +- name = compat_tmp.name;
4399 +- } else
4400 +-#endif
4401 +- {
4402 +- num_counters = tmp.num_counters;
4403 +- name = tmp.name;
4404 +- }
4405 +-
4406 +- if (len != size + num_counters * sizeof(struct xt_counters))
4407 +- return -EINVAL;
4408 +-
4409 +- paddc = vmalloc(len - size);
4410 +- if (!paddc)
4411 +- return -ENOMEM;
4412 +-
4413 +- if (copy_from_user(paddc, user + size, len - size) != 0) {
4414 +- ret = -EFAULT;
4415 +- goto free;
4416 +- }
4417 +-
4418 +- t = xt_find_table_lock(net, NFPROTO_ARP, name);
4419 ++ t = xt_find_table_lock(net, NFPROTO_ARP, tmp.name);
4420 + if (IS_ERR_OR_NULL(t)) {
4421 + ret = t ? PTR_ERR(t) : -ENOENT;
4422 + goto free;
4423 +@@ -1168,7 +1122,7 @@ static int do_add_counters(struct net *net, const void __user *user,
4424 +
4425 + local_bh_disable();
4426 + private = t->private;
4427 +- if (private->number != num_counters) {
4428 ++ if (private->number != tmp.num_counters) {
4429 + ret = -EINVAL;
4430 + goto unlock_up_free;
4431 + }
4432 +@@ -1194,6 +1148,18 @@ static int do_add_counters(struct net *net, const void __user *user,
4433 + }
4434 +
4435 + #ifdef CONFIG_COMPAT
4436 ++struct compat_arpt_replace {
4437 ++ char name[XT_TABLE_MAXNAMELEN];
4438 ++ u32 valid_hooks;
4439 ++ u32 num_entries;
4440 ++ u32 size;
4441 ++ u32 hook_entry[NF_ARP_NUMHOOKS];
4442 ++ u32 underflow[NF_ARP_NUMHOOKS];
4443 ++ u32 num_counters;
4444 ++ compat_uptr_t counters;
4445 ++ struct compat_arpt_entry entries[0];
4446 ++};
4447 ++
4448 + static inline void compat_release_entry(struct compat_arpt_entry *e)
4449 + {
4450 + struct xt_entry_target *t;
4451 +@@ -1202,24 +1168,22 @@ static inline void compat_release_entry(struct compat_arpt_entry *e)
4452 + module_put(t->u.kernel.target->me);
4453 + }
4454 +
4455 +-static inline int
4456 ++static int
4457 + check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
4458 + struct xt_table_info *newinfo,
4459 + unsigned int *size,
4460 + const unsigned char *base,
4461 +- const unsigned char *limit,
4462 +- const unsigned int *hook_entries,
4463 +- const unsigned int *underflows,
4464 +- const char *name)
4465 ++ const unsigned char *limit)
4466 + {
4467 + struct xt_entry_target *t;
4468 + struct xt_target *target;
4469 + unsigned int entry_offset;
4470 +- int ret, off, h;
4471 ++ int ret, off;
4472 +
4473 + duprintf("check_compat_entry_size_and_hooks %p\n", e);
4474 + if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 ||
4475 +- (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit) {
4476 ++ (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit ||
4477 ++ (unsigned char *)e + e->next_offset > limit) {
4478 + duprintf("Bad offset %p, limit = %p\n", e, limit);
4479 + return -EINVAL;
4480 + }
4481 +@@ -1231,8 +1195,11 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
4482 + return -EINVAL;
4483 + }
4484 +
4485 +- /* For purposes of check_entry casting the compat entry is fine */
4486 +- ret = check_entry((struct arpt_entry *)e, name);
4487 ++ if (!arp_checkentry(&e->arp))
4488 ++ return -EINVAL;
4489 ++
4490 ++ ret = xt_compat_check_entry_offsets(e, e->elems, e->target_offset,
4491 ++ e->next_offset);
4492 + if (ret)
4493 + return ret;
4494 +
4495 +@@ -1256,17 +1223,6 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
4496 + if (ret)
4497 + goto release_target;
4498 +
4499 +- /* Check hooks & underflows */
4500 +- for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
4501 +- if ((unsigned char *)e - base == hook_entries[h])
4502 +- newinfo->hook_entry[h] = hook_entries[h];
4503 +- if ((unsigned char *)e - base == underflows[h])
4504 +- newinfo->underflow[h] = underflows[h];
4505 +- }
4506 +-
4507 +- /* Clear counters and comefrom */
4508 +- memset(&e->counters, 0, sizeof(e->counters));
4509 +- e->comefrom = 0;
4510 + return 0;
4511 +
4512 + release_target:
4513 +@@ -1275,18 +1231,17 @@ out:
4514 + return ret;
4515 + }
4516 +
4517 +-static int
4518 ++static void
4519 + compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
4520 +- unsigned int *size, const char *name,
4521 ++ unsigned int *size,
4522 + struct xt_table_info *newinfo, unsigned char *base)
4523 + {
4524 + struct xt_entry_target *t;
4525 + struct xt_target *target;
4526 + struct arpt_entry *de;
4527 + unsigned int origsize;
4528 +- int ret, h;
4529 ++ int h;
4530 +
4531 +- ret = 0;
4532 + origsize = *size;
4533 + de = (struct arpt_entry *)*dstptr;
4534 + memcpy(de, e, sizeof(struct arpt_entry));
4535 +@@ -1307,144 +1262,81 @@ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
4536 + if ((unsigned char *)de - base < newinfo->underflow[h])
4537 + newinfo->underflow[h] -= origsize - *size;
4538 + }
4539 +- return ret;
4540 + }
4541 +
4542 +-static int translate_compat_table(const char *name,
4543 +- unsigned int valid_hooks,
4544 +- struct xt_table_info **pinfo,
4545 ++static int translate_compat_table(struct xt_table_info **pinfo,
4546 + void **pentry0,
4547 +- unsigned int total_size,
4548 +- unsigned int number,
4549 +- unsigned int *hook_entries,
4550 +- unsigned int *underflows)
4551 ++ const struct compat_arpt_replace *compatr)
4552 + {
4553 + unsigned int i, j;
4554 + struct xt_table_info *newinfo, *info;
4555 + void *pos, *entry0, *entry1;
4556 + struct compat_arpt_entry *iter0;
4557 +- struct arpt_entry *iter1;
4558 ++ struct arpt_replace repl;
4559 + unsigned int size;
4560 + int ret = 0;
4561 +
4562 + info = *pinfo;
4563 + entry0 = *pentry0;
4564 +- size = total_size;
4565 +- info->number = number;
4566 +-
4567 +- /* Init all hooks to impossible value. */
4568 +- for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
4569 +- info->hook_entry[i] = 0xFFFFFFFF;
4570 +- info->underflow[i] = 0xFFFFFFFF;
4571 +- }
4572 ++ size = compatr->size;
4573 ++ info->number = compatr->num_entries;
4574 +
4575 + duprintf("translate_compat_table: size %u\n", info->size);
4576 + j = 0;
4577 + xt_compat_lock(NFPROTO_ARP);
4578 +- xt_compat_init_offsets(NFPROTO_ARP, number);
4579 ++ xt_compat_init_offsets(NFPROTO_ARP, compatr->num_entries);
4580 + /* Walk through entries, checking offsets. */
4581 +- xt_entry_foreach(iter0, entry0, total_size) {
4582 ++ xt_entry_foreach(iter0, entry0, compatr->size) {
4583 + ret = check_compat_entry_size_and_hooks(iter0, info, &size,
4584 + entry0,
4585 +- entry0 + total_size,
4586 +- hook_entries,
4587 +- underflows,
4588 +- name);
4589 ++ entry0 + compatr->size);
4590 + if (ret != 0)
4591 + goto out_unlock;
4592 + ++j;
4593 + }
4594 +
4595 + ret = -EINVAL;
4596 +- if (j != number) {
4597 ++ if (j != compatr->num_entries) {
4598 + duprintf("translate_compat_table: %u not %u entries\n",
4599 +- j, number);
4600 ++ j, compatr->num_entries);
4601 + goto out_unlock;
4602 + }
4603 +
4604 +- /* Check hooks all assigned */
4605 +- for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
4606 +- /* Only hooks which are valid */
4607 +- if (!(valid_hooks & (1 << i)))
4608 +- continue;
4609 +- if (info->hook_entry[i] == 0xFFFFFFFF) {
4610 +- duprintf("Invalid hook entry %u %u\n",
4611 +- i, hook_entries[i]);
4612 +- goto out_unlock;
4613 +- }
4614 +- if (info->underflow[i] == 0xFFFFFFFF) {
4615 +- duprintf("Invalid underflow %u %u\n",
4616 +- i, underflows[i]);
4617 +- goto out_unlock;
4618 +- }
4619 +- }
4620 +-
4621 + ret = -ENOMEM;
4622 + newinfo = xt_alloc_table_info(size);
4623 + if (!newinfo)
4624 + goto out_unlock;
4625 +
4626 +- newinfo->number = number;
4627 ++ newinfo->number = compatr->num_entries;
4628 + for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
4629 + newinfo->hook_entry[i] = info->hook_entry[i];
4630 + newinfo->underflow[i] = info->underflow[i];
4631 + }
4632 + entry1 = newinfo->entries[raw_smp_processor_id()];
4633 + pos = entry1;
4634 +- size = total_size;
4635 +- xt_entry_foreach(iter0, entry0, total_size) {
4636 +- ret = compat_copy_entry_from_user(iter0, &pos, &size,
4637 +- name, newinfo, entry1);
4638 +- if (ret != 0)
4639 +- break;
4640 +- }
4641 ++ size = compatr->size;
4642 ++ xt_entry_foreach(iter0, entry0, compatr->size)
4643 ++ compat_copy_entry_from_user(iter0, &pos, &size,
4644 ++ newinfo, entry1);
4645 ++
4646 ++ /* all module references in entry0 are now gone */
4647 ++
4648 + xt_compat_flush_offsets(NFPROTO_ARP);
4649 + xt_compat_unlock(NFPROTO_ARP);
4650 +- if (ret)
4651 +- goto free_newinfo;
4652 +
4653 +- ret = -ELOOP;
4654 +- if (!mark_source_chains(newinfo, valid_hooks, entry1))
4655 +- goto free_newinfo;
4656 ++ memcpy(&repl, compatr, sizeof(*compatr));
4657 +
4658 +- i = 0;
4659 +- xt_entry_foreach(iter1, entry1, newinfo->size) {
4660 +- ret = check_target(iter1, name);
4661 +- if (ret != 0)
4662 +- break;
4663 +- ++i;
4664 +- if (strcmp(arpt_get_target(iter1)->u.user.name,
4665 +- XT_ERROR_TARGET) == 0)
4666 +- ++newinfo->stacksize;
4667 +- }
4668 +- if (ret) {
4669 +- /*
4670 +- * The first i matches need cleanup_entry (calls ->destroy)
4671 +- * because they had called ->check already. The other j-i
4672 +- * entries need only release.
4673 +- */
4674 +- int skip = i;
4675 +- j -= i;
4676 +- xt_entry_foreach(iter0, entry0, newinfo->size) {
4677 +- if (skip-- > 0)
4678 +- continue;
4679 +- if (j-- == 0)
4680 +- break;
4681 +- compat_release_entry(iter0);
4682 +- }
4683 +- xt_entry_foreach(iter1, entry1, newinfo->size) {
4684 +- if (i-- == 0)
4685 +- break;
4686 +- cleanup_entry(iter1);
4687 +- }
4688 +- xt_free_table_info(newinfo);
4689 +- return ret;
4690 ++ for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
4691 ++ repl.hook_entry[i] = newinfo->hook_entry[i];
4692 ++ repl.underflow[i] = newinfo->underflow[i];
4693 + }
4694 +
4695 +- /* And one copy for every other CPU */
4696 +- for_each_possible_cpu(i)
4697 +- if (newinfo->entries[i] && newinfo->entries[i] != entry1)
4698 +- memcpy(newinfo->entries[i], entry1, newinfo->size);
4699 ++ repl.num_counters = 0;
4700 ++ repl.counters = NULL;
4701 ++ repl.size = newinfo->size;
4702 ++ ret = translate_table(newinfo, entry1, &repl);
4703 ++ if (ret)
4704 ++ goto free_newinfo;
4705 +
4706 + *pinfo = newinfo;
4707 + *pentry0 = entry1;
4708 +@@ -1453,31 +1345,18 @@ static int translate_compat_table(const char *name,
4709 +
4710 + free_newinfo:
4711 + xt_free_table_info(newinfo);
4712 +-out:
4713 +- xt_entry_foreach(iter0, entry0, total_size) {
4714 ++ return ret;
4715 ++out_unlock:
4716 ++ xt_compat_flush_offsets(NFPROTO_ARP);
4717 ++ xt_compat_unlock(NFPROTO_ARP);
4718 ++ xt_entry_foreach(iter0, entry0, compatr->size) {
4719 + if (j-- == 0)
4720 + break;
4721 + compat_release_entry(iter0);
4722 + }
4723 + return ret;
4724 +-out_unlock:
4725 +- xt_compat_flush_offsets(NFPROTO_ARP);
4726 +- xt_compat_unlock(NFPROTO_ARP);
4727 +- goto out;
4728 + }
4729 +
4730 +-struct compat_arpt_replace {
4731 +- char name[XT_TABLE_MAXNAMELEN];
4732 +- u32 valid_hooks;
4733 +- u32 num_entries;
4734 +- u32 size;
4735 +- u32 hook_entry[NF_ARP_NUMHOOKS];
4736 +- u32 underflow[NF_ARP_NUMHOOKS];
4737 +- u32 num_counters;
4738 +- compat_uptr_t counters;
4739 +- struct compat_arpt_entry entries[0];
4740 +-};
4741 +-
4742 + static int compat_do_replace(struct net *net, void __user *user,
4743 + unsigned int len)
4744 + {
4745 +@@ -1495,6 +1374,9 @@ static int compat_do_replace(struct net *net, void __user *user,
4746 + return -ENOMEM;
4747 + if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
4748 + return -ENOMEM;
4749 ++ if (tmp.num_counters == 0)
4750 ++ return -EINVAL;
4751 ++
4752 + tmp.name[sizeof(tmp.name)-1] = 0;
4753 +
4754 + newinfo = xt_alloc_table_info(tmp.size);
4755 +@@ -1508,10 +1390,7 @@ static int compat_do_replace(struct net *net, void __user *user,
4756 + goto free_newinfo;
4757 + }
4758 +
4759 +- ret = translate_compat_table(tmp.name, tmp.valid_hooks,
4760 +- &newinfo, &loc_cpu_entry, tmp.size,
4761 +- tmp.num_entries, tmp.hook_entry,
4762 +- tmp.underflow);
4763 ++ ret = translate_compat_table(&newinfo, &loc_cpu_entry, &tmp);
4764 + if (ret != 0)
4765 + goto free_newinfo;
4766 +
4767 +diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
4768 +index 651c10774d58..92c8f2727ee9 100644
4769 +--- a/net/ipv4/netfilter/ip_tables.c
4770 ++++ b/net/ipv4/netfilter/ip_tables.c
4771 +@@ -168,11 +168,12 @@ get_entry(const void *base, unsigned int offset)
4772 +
4773 + /* All zeroes == unconditional rule. */
4774 + /* Mildly perf critical (only if packet tracing is on) */
4775 +-static inline bool unconditional(const struct ipt_ip *ip)
4776 ++static inline bool unconditional(const struct ipt_entry *e)
4777 + {
4778 + static const struct ipt_ip uncond;
4779 +
4780 +- return memcmp(ip, &uncond, sizeof(uncond)) == 0;
4781 ++ return e->target_offset == sizeof(struct ipt_entry) &&
4782 ++ memcmp(&e->ip, &uncond, sizeof(uncond)) == 0;
4783 + #undef FWINV
4784 + }
4785 +
4786 +@@ -229,11 +230,10 @@ get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
4787 + } else if (s == e) {
4788 + (*rulenum)++;
4789 +
4790 +- if (s->target_offset == sizeof(struct ipt_entry) &&
4791 ++ if (unconditional(s) &&
4792 + strcmp(t->target.u.kernel.target->name,
4793 + XT_STANDARD_TARGET) == 0 &&
4794 +- t->verdict < 0 &&
4795 +- unconditional(&s->ip)) {
4796 ++ t->verdict < 0) {
4797 + /* Tail of chains: STANDARD target (return/policy) */
4798 + *comment = *chainname == hookname
4799 + ? comments[NF_IP_TRACE_COMMENT_POLICY]
4800 +@@ -467,11 +467,10 @@ mark_source_chains(const struct xt_table_info *newinfo,
4801 + e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
4802 +
4803 + /* Unconditional return/END. */
4804 +- if ((e->target_offset == sizeof(struct ipt_entry) &&
4805 ++ if ((unconditional(e) &&
4806 + (strcmp(t->target.u.user.name,
4807 + XT_STANDARD_TARGET) == 0) &&
4808 +- t->verdict < 0 && unconditional(&e->ip)) ||
4809 +- visited) {
4810 ++ t->verdict < 0) || visited) {
4811 + unsigned int oldpos, size;
4812 +
4813 + if ((strcmp(t->target.u.user.name,
4814 +@@ -512,6 +511,8 @@ mark_source_chains(const struct xt_table_info *newinfo,
4815 + size = e->next_offset;
4816 + e = (struct ipt_entry *)
4817 + (entry0 + pos + size);
4818 ++ if (pos + size >= newinfo->size)
4819 ++ return 0;
4820 + e->counters.pcnt = pos;
4821 + pos += size;
4822 + } else {
4823 +@@ -533,6 +534,8 @@ mark_source_chains(const struct xt_table_info *newinfo,
4824 + } else {
4825 + /* ... this is a fallthru */
4826 + newpos = pos + e->next_offset;
4827 ++ if (newpos >= newinfo->size)
4828 ++ return 0;
4829 + }
4830 + e = (struct ipt_entry *)
4831 + (entry0 + newpos);
4832 +@@ -560,27 +563,6 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
4833 + }
4834 +
4835 + static int
4836 +-check_entry(const struct ipt_entry *e, const char *name)
4837 +-{
4838 +- const struct xt_entry_target *t;
4839 +-
4840 +- if (!ip_checkentry(&e->ip)) {
4841 +- duprintf("ip check failed %p %s.\n", e, name);
4842 +- return -EINVAL;
4843 +- }
4844 +-
4845 +- if (e->target_offset + sizeof(struct xt_entry_target) >
4846 +- e->next_offset)
4847 +- return -EINVAL;
4848 +-
4849 +- t = ipt_get_target_c(e);
4850 +- if (e->target_offset + t->u.target_size > e->next_offset)
4851 +- return -EINVAL;
4852 +-
4853 +- return 0;
4854 +-}
4855 +-
4856 +-static int
4857 + check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
4858 + {
4859 + const struct ipt_ip *ip = par->entryinfo;
4860 +@@ -657,10 +639,6 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
4861 + struct xt_mtchk_param mtpar;
4862 + struct xt_entry_match *ematch;
4863 +
4864 +- ret = check_entry(e, name);
4865 +- if (ret)
4866 +- return ret;
4867 +-
4868 + j = 0;
4869 + mtpar.net = net;
4870 + mtpar.table = name;
4871 +@@ -704,7 +682,7 @@ static bool check_underflow(const struct ipt_entry *e)
4872 + const struct xt_entry_target *t;
4873 + unsigned int verdict;
4874 +
4875 +- if (!unconditional(&e->ip))
4876 ++ if (!unconditional(e))
4877 + return false;
4878 + t = ipt_get_target_c(e);
4879 + if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
4880 +@@ -724,9 +702,11 @@ check_entry_size_and_hooks(struct ipt_entry *e,
4881 + unsigned int valid_hooks)
4882 + {
4883 + unsigned int h;
4884 ++ int err;
4885 +
4886 + if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
4887 +- (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
4888 ++ (unsigned char *)e + sizeof(struct ipt_entry) >= limit ||
4889 ++ (unsigned char *)e + e->next_offset > limit) {
4890 + duprintf("Bad offset %p\n", e);
4891 + return -EINVAL;
4892 + }
4893 +@@ -738,6 +718,14 @@ check_entry_size_and_hooks(struct ipt_entry *e,
4894 + return -EINVAL;
4895 + }
4896 +
4897 ++ if (!ip_checkentry(&e->ip))
4898 ++ return -EINVAL;
4899 ++
4900 ++ err = xt_check_entry_offsets(e, e->elems, e->target_offset,
4901 ++ e->next_offset);
4902 ++ if (err)
4903 ++ return err;
4904 ++
4905 + /* Check hooks & underflows */
4906 + for (h = 0; h < NF_INET_NUMHOOKS; h++) {
4907 + if (!(valid_hooks & (1 << h)))
4908 +@@ -746,9 +734,9 @@ check_entry_size_and_hooks(struct ipt_entry *e,
4909 + newinfo->hook_entry[h] = hook_entries[h];
4910 + if ((unsigned char *)e - base == underflows[h]) {
4911 + if (!check_underflow(e)) {
4912 +- pr_err("Underflows must be unconditional and "
4913 +- "use the STANDARD target with "
4914 +- "ACCEPT/DROP\n");
4915 ++ pr_debug("Underflows must be unconditional and "
4916 ++ "use the STANDARD target with "
4917 ++ "ACCEPT/DROP\n");
4918 + return -EINVAL;
4919 + }
4920 + newinfo->underflow[h] = underflows[h];
4921 +@@ -1258,6 +1246,9 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
4922 + /* overflow check */
4923 + if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
4924 + return -ENOMEM;
4925 ++ if (tmp.num_counters == 0)
4926 ++ return -EINVAL;
4927 ++
4928 + tmp.name[sizeof(tmp.name)-1] = 0;
4929 +
4930 + newinfo = xt_alloc_table_info(tmp.size);
4931 +@@ -1299,56 +1290,18 @@ do_add_counters(struct net *net, const void __user *user,
4932 + unsigned int i, curcpu;
4933 + struct xt_counters_info tmp;
4934 + struct xt_counters *paddc;
4935 +- unsigned int num_counters;
4936 +- const char *name;
4937 +- int size;
4938 +- void *ptmp;
4939 + struct xt_table *t;
4940 + const struct xt_table_info *private;
4941 + int ret = 0;
4942 + void *loc_cpu_entry;
4943 + struct ipt_entry *iter;
4944 + unsigned int addend;
4945 +-#ifdef CONFIG_COMPAT
4946 +- struct compat_xt_counters_info compat_tmp;
4947 +-
4948 +- if (compat) {
4949 +- ptmp = &compat_tmp;
4950 +- size = sizeof(struct compat_xt_counters_info);
4951 +- } else
4952 +-#endif
4953 +- {
4954 +- ptmp = &tmp;
4955 +- size = sizeof(struct xt_counters_info);
4956 +- }
4957 +-
4958 +- if (copy_from_user(ptmp, user, size) != 0)
4959 +- return -EFAULT;
4960 +-
4961 +-#ifdef CONFIG_COMPAT
4962 +- if (compat) {
4963 +- num_counters = compat_tmp.num_counters;
4964 +- name = compat_tmp.name;
4965 +- } else
4966 +-#endif
4967 +- {
4968 +- num_counters = tmp.num_counters;
4969 +- name = tmp.name;
4970 +- }
4971 +
4972 +- if (len != size + num_counters * sizeof(struct xt_counters))
4973 +- return -EINVAL;
4974 +-
4975 +- paddc = vmalloc(len - size);
4976 +- if (!paddc)
4977 +- return -ENOMEM;
4978 +-
4979 +- if (copy_from_user(paddc, user + size, len - size) != 0) {
4980 +- ret = -EFAULT;
4981 +- goto free;
4982 +- }
4983 ++ paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
4984 ++ if (IS_ERR(paddc))
4985 ++ return PTR_ERR(paddc);
4986 +
4987 +- t = xt_find_table_lock(net, AF_INET, name);
4988 ++ t = xt_find_table_lock(net, AF_INET, tmp.name);
4989 + if (IS_ERR_OR_NULL(t)) {
4990 + ret = t ? PTR_ERR(t) : -ENOENT;
4991 + goto free;
4992 +@@ -1356,7 +1309,7 @@ do_add_counters(struct net *net, const void __user *user,
4993 +
4994 + local_bh_disable();
4995 + private = t->private;
4996 +- if (private->number != num_counters) {
4997 ++ if (private->number != tmp.num_counters) {
4998 + ret = -EINVAL;
4999 + goto unlock_up_free;
5000 + }
5001 +@@ -1435,7 +1388,6 @@ compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
5002 +
5003 + static int
5004 + compat_find_calc_match(struct xt_entry_match *m,
5005 +- const char *name,
5006 + const struct ipt_ip *ip,
5007 + unsigned int hookmask,
5008 + int *size)
5009 +@@ -1471,21 +1423,19 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
5010 + struct xt_table_info *newinfo,
5011 + unsigned int *size,
5012 + const unsigned char *base,
5013 +- const unsigned char *limit,
5014 +- const unsigned int *hook_entries,
5015 +- const unsigned int *underflows,
5016 +- const char *name)
5017 ++ const unsigned char *limit)
5018 + {
5019 + struct xt_entry_match *ematch;
5020 + struct xt_entry_target *t;
5021 + struct xt_target *target;
5022 + unsigned int entry_offset;
5023 + unsigned int j;
5024 +- int ret, off, h;
5025 ++ int ret, off;
5026 +
5027 + duprintf("check_compat_entry_size_and_hooks %p\n", e);
5028 + if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
5029 +- (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
5030 ++ (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit ||
5031 ++ (unsigned char *)e + e->next_offset > limit) {
5032 + duprintf("Bad offset %p, limit = %p\n", e, limit);
5033 + return -EINVAL;
5034 + }
5035 +@@ -1497,8 +1447,11 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
5036 + return -EINVAL;
5037 + }
5038 +
5039 +- /* For purposes of check_entry casting the compat entry is fine */
5040 +- ret = check_entry((struct ipt_entry *)e, name);
5041 ++ if (!ip_checkentry(&e->ip))
5042 ++ return -EINVAL;
5043 ++
5044 ++ ret = xt_compat_check_entry_offsets(e, e->elems,
5045 ++ e->target_offset, e->next_offset);
5046 + if (ret)
5047 + return ret;
5048 +
5049 +@@ -1506,8 +1459,8 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
5050 + entry_offset = (void *)e - (void *)base;
5051 + j = 0;
5052 + xt_ematch_foreach(ematch, e) {
5053 +- ret = compat_find_calc_match(ematch, name,
5054 +- &e->ip, e->comefrom, &off);
5055 ++ ret = compat_find_calc_match(ematch, &e->ip, e->comefrom,
5056 ++ &off);
5057 + if (ret != 0)
5058 + goto release_matches;
5059 + ++j;
5060 +@@ -1530,17 +1483,6 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
5061 + if (ret)
5062 + goto out;
5063 +
5064 +- /* Check hooks & underflows */
5065 +- for (h = 0; h < NF_INET_NUMHOOKS; h++) {
5066 +- if ((unsigned char *)e - base == hook_entries[h])
5067 +- newinfo->hook_entry[h] = hook_entries[h];
5068 +- if ((unsigned char *)e - base == underflows[h])
5069 +- newinfo->underflow[h] = underflows[h];
5070 +- }
5071 +-
5072 +- /* Clear counters and comefrom */
5073 +- memset(&e->counters, 0, sizeof(e->counters));
5074 +- e->comefrom = 0;
5075 + return 0;
5076 +
5077 + out:
5078 +@@ -1554,19 +1496,18 @@ release_matches:
5079 + return ret;
5080 + }
5081 +
5082 +-static int
5083 ++static void
5084 + compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
5085 +- unsigned int *size, const char *name,
5086 ++ unsigned int *size,
5087 + struct xt_table_info *newinfo, unsigned char *base)
5088 + {
5089 + struct xt_entry_target *t;
5090 + struct xt_target *target;
5091 + struct ipt_entry *de;
5092 + unsigned int origsize;
5093 +- int ret, h;
5094 ++ int h;
5095 + struct xt_entry_match *ematch;
5096 +
5097 +- ret = 0;
5098 + origsize = *size;
5099 + de = (struct ipt_entry *)*dstptr;
5100 + memcpy(de, e, sizeof(struct ipt_entry));
5101 +@@ -1575,198 +1516,104 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
5102 + *dstptr += sizeof(struct ipt_entry);
5103 + *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
5104 +
5105 +- xt_ematch_foreach(ematch, e) {
5106 +- ret = xt_compat_match_from_user(ematch, dstptr, size);
5107 +- if (ret != 0)
5108 +- return ret;
5109 +- }
5110 ++ xt_ematch_foreach(ematch, e)
5111 ++ xt_compat_match_from_user(ematch, dstptr, size);
5112 ++
5113 + de->target_offset = e->target_offset - (origsize - *size);
5114 + t = compat_ipt_get_target(e);
5115 + target = t->u.kernel.target;
5116 + xt_compat_target_from_user(t, dstptr, size);
5117 +
5118 + de->next_offset = e->next_offset - (origsize - *size);
5119 ++
5120 + for (h = 0; h < NF_INET_NUMHOOKS; h++) {
5121 + if ((unsigned char *)de - base < newinfo->hook_entry[h])
5122 + newinfo->hook_entry[h] -= origsize - *size;
5123 + if ((unsigned char *)de - base < newinfo->underflow[h])
5124 + newinfo->underflow[h] -= origsize - *size;
5125 + }
5126 +- return ret;
5127 +-}
5128 +-
5129 +-static int
5130 +-compat_check_entry(struct ipt_entry *e, struct net *net, const char *name)
5131 +-{
5132 +- struct xt_entry_match *ematch;
5133 +- struct xt_mtchk_param mtpar;
5134 +- unsigned int j;
5135 +- int ret = 0;
5136 +-
5137 +- j = 0;
5138 +- mtpar.net = net;
5139 +- mtpar.table = name;
5140 +- mtpar.entryinfo = &e->ip;
5141 +- mtpar.hook_mask = e->comefrom;
5142 +- mtpar.family = NFPROTO_IPV4;
5143 +- xt_ematch_foreach(ematch, e) {
5144 +- ret = check_match(ematch, &mtpar);
5145 +- if (ret != 0)
5146 +- goto cleanup_matches;
5147 +- ++j;
5148 +- }
5149 +-
5150 +- ret = check_target(e, net, name);
5151 +- if (ret)
5152 +- goto cleanup_matches;
5153 +- return 0;
5154 +-
5155 +- cleanup_matches:
5156 +- xt_ematch_foreach(ematch, e) {
5157 +- if (j-- == 0)
5158 +- break;
5159 +- cleanup_match(ematch, net);
5160 +- }
5161 +- return ret;
5162 + }
5163 +
5164 + static int
5165 + translate_compat_table(struct net *net,
5166 +- const char *name,
5167 +- unsigned int valid_hooks,
5168 + struct xt_table_info **pinfo,
5169 + void **pentry0,
5170 +- unsigned int total_size,
5171 +- unsigned int number,
5172 +- unsigned int *hook_entries,
5173 +- unsigned int *underflows)
5174 ++ const struct compat_ipt_replace *compatr)
5175 + {
5176 + unsigned int i, j;
5177 + struct xt_table_info *newinfo, *info;
5178 + void *pos, *entry0, *entry1;
5179 + struct compat_ipt_entry *iter0;
5180 +- struct ipt_entry *iter1;
5181 ++ struct ipt_replace repl;
5182 + unsigned int size;
5183 + int ret;
5184 +
5185 + info = *pinfo;
5186 + entry0 = *pentry0;
5187 +- size = total_size;
5188 +- info->number = number;
5189 +-
5190 +- /* Init all hooks to impossible value. */
5191 +- for (i = 0; i < NF_INET_NUMHOOKS; i++) {
5192 +- info->hook_entry[i] = 0xFFFFFFFF;
5193 +- info->underflow[i] = 0xFFFFFFFF;
5194 +- }
5195 ++ size = compatr->size;
5196 ++ info->number = compatr->num_entries;
5197 +
5198 + duprintf("translate_compat_table: size %u\n", info->size);
5199 + j = 0;
5200 + xt_compat_lock(AF_INET);
5201 +- xt_compat_init_offsets(AF_INET, number);
5202 ++ xt_compat_init_offsets(AF_INET, compatr->num_entries);
5203 + /* Walk through entries, checking offsets. */
5204 +- xt_entry_foreach(iter0, entry0, total_size) {
5205 ++ xt_entry_foreach(iter0, entry0, compatr->size) {
5206 + ret = check_compat_entry_size_and_hooks(iter0, info, &size,
5207 + entry0,
5208 +- entry0 + total_size,
5209 +- hook_entries,
5210 +- underflows,
5211 +- name);
5212 ++ entry0 + compatr->size);
5213 + if (ret != 0)
5214 + goto out_unlock;
5215 + ++j;
5216 + }
5217 +
5218 + ret = -EINVAL;
5219 +- if (j != number) {
5220 ++ if (j != compatr->num_entries) {
5221 + duprintf("translate_compat_table: %u not %u entries\n",
5222 +- j, number);
5223 ++ j, compatr->num_entries);
5224 + goto out_unlock;
5225 + }
5226 +
5227 +- /* Check hooks all assigned */
5228 +- for (i = 0; i < NF_INET_NUMHOOKS; i++) {
5229 +- /* Only hooks which are valid */
5230 +- if (!(valid_hooks & (1 << i)))
5231 +- continue;
5232 +- if (info->hook_entry[i] == 0xFFFFFFFF) {
5233 +- duprintf("Invalid hook entry %u %u\n",
5234 +- i, hook_entries[i]);
5235 +- goto out_unlock;
5236 +- }
5237 +- if (info->underflow[i] == 0xFFFFFFFF) {
5238 +- duprintf("Invalid underflow %u %u\n",
5239 +- i, underflows[i]);
5240 +- goto out_unlock;
5241 +- }
5242 +- }
5243 +-
5244 + ret = -ENOMEM;
5245 + newinfo = xt_alloc_table_info(size);
5246 + if (!newinfo)
5247 + goto out_unlock;
5248 +
5249 +- newinfo->number = number;
5250 ++ newinfo->number = compatr->num_entries;
5251 + for (i = 0; i < NF_INET_NUMHOOKS; i++) {
5252 +- newinfo->hook_entry[i] = info->hook_entry[i];
5253 +- newinfo->underflow[i] = info->underflow[i];
5254 ++ newinfo->hook_entry[i] = compatr->hook_entry[i];
5255 ++ newinfo->underflow[i] = compatr->underflow[i];
5256 + }
5257 + entry1 = newinfo->entries[raw_smp_processor_id()];
5258 + pos = entry1;
5259 +- size = total_size;
5260 +- xt_entry_foreach(iter0, entry0, total_size) {
5261 +- ret = compat_copy_entry_from_user(iter0, &pos, &size,
5262 +- name, newinfo, entry1);
5263 +- if (ret != 0)
5264 +- break;
5265 +- }
5266 ++ size = compatr->size;
5267 ++ xt_entry_foreach(iter0, entry0, compatr->size)
5268 ++ compat_copy_entry_from_user(iter0, &pos, &size,
5269 ++ newinfo, entry1);
5270 ++
5271 ++ /* all module references in entry0 are now gone.
5272 ++ * entry1/newinfo contains a 64bit ruleset that looks exactly as
5273 ++ * generated by 64bit userspace.
5274 ++ *
5275 ++ * Call standard translate_table() to validate all hook_entrys,
5276 ++ * underflows, check for loops, etc.
5277 ++ */
5278 + xt_compat_flush_offsets(AF_INET);
5279 + xt_compat_unlock(AF_INET);
5280 +- if (ret)
5281 +- goto free_newinfo;
5282 +
5283 +- ret = -ELOOP;
5284 +- if (!mark_source_chains(newinfo, valid_hooks, entry1))
5285 +- goto free_newinfo;
5286 ++ memcpy(&repl, compatr, sizeof(*compatr));
5287 +
5288 +- i = 0;
5289 +- xt_entry_foreach(iter1, entry1, newinfo->size) {
5290 +- ret = compat_check_entry(iter1, net, name);
5291 +- if (ret != 0)
5292 +- break;
5293 +- ++i;
5294 +- if (strcmp(ipt_get_target(iter1)->u.user.name,
5295 +- XT_ERROR_TARGET) == 0)
5296 +- ++newinfo->stacksize;
5297 +- }
5298 +- if (ret) {
5299 +- /*
5300 +- * The first i matches need cleanup_entry (calls ->destroy)
5301 +- * because they had called ->check already. The other j-i
5302 +- * entries need only release.
5303 +- */
5304 +- int skip = i;
5305 +- j -= i;
5306 +- xt_entry_foreach(iter0, entry0, newinfo->size) {
5307 +- if (skip-- > 0)
5308 +- continue;
5309 +- if (j-- == 0)
5310 +- break;
5311 +- compat_release_entry(iter0);
5312 +- }
5313 +- xt_entry_foreach(iter1, entry1, newinfo->size) {
5314 +- if (i-- == 0)
5315 +- break;
5316 +- cleanup_entry(iter1, net);
5317 +- }
5318 +- xt_free_table_info(newinfo);
5319 +- return ret;
5320 ++ for (i = 0; i < NF_INET_NUMHOOKS; i++) {
5321 ++ repl.hook_entry[i] = newinfo->hook_entry[i];
5322 ++ repl.underflow[i] = newinfo->underflow[i];
5323 + }
5324 +
5325 +- /* And one copy for every other CPU */
5326 +- for_each_possible_cpu(i)
5327 +- if (newinfo->entries[i] && newinfo->entries[i] != entry1)
5328 +- memcpy(newinfo->entries[i], entry1, newinfo->size);
5329 ++ repl.num_counters = 0;
5330 ++ repl.counters = NULL;
5331 ++ repl.size = newinfo->size;
5332 ++ ret = translate_table(net, newinfo, entry1, &repl);
5333 ++ if (ret)
5334 ++ goto free_newinfo;
5335 +
5336 + *pinfo = newinfo;
5337 + *pentry0 = entry1;
5338 +@@ -1775,17 +1622,16 @@ translate_compat_table(struct net *net,
5339 +
5340 + free_newinfo:
5341 + xt_free_table_info(newinfo);
5342 +-out:
5343 +- xt_entry_foreach(iter0, entry0, total_size) {
5344 ++ return ret;
5345 ++out_unlock:
5346 ++ xt_compat_flush_offsets(AF_INET);
5347 ++ xt_compat_unlock(AF_INET);
5348 ++ xt_entry_foreach(iter0, entry0, compatr->size) {
5349 + if (j-- == 0)
5350 + break;
5351 + compat_release_entry(iter0);
5352 + }
5353 + return ret;
5354 +-out_unlock:
5355 +- xt_compat_flush_offsets(AF_INET);
5356 +- xt_compat_unlock(AF_INET);
5357 +- goto out;
5358 + }
5359 +
5360 + static int
5361 +@@ -1805,6 +1651,9 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
5362 + return -ENOMEM;
5363 + if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
5364 + return -ENOMEM;
5365 ++ if (tmp.num_counters == 0)
5366 ++ return -EINVAL;
5367 ++
5368 + tmp.name[sizeof(tmp.name)-1] = 0;
5369 +
5370 + newinfo = xt_alloc_table_info(tmp.size);
5371 +@@ -1819,10 +1668,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
5372 + goto free_newinfo;
5373 + }
5374 +
5375 +- ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
5376 +- &newinfo, &loc_cpu_entry, tmp.size,
5377 +- tmp.num_entries, tmp.hook_entry,
5378 +- tmp.underflow);
5379 ++ ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
5380 + if (ret != 0)
5381 + goto free_newinfo;
5382 +
5383 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
5384 +index f89087c3cfc8..f3b15bb7fbec 100644
5385 +--- a/net/ipv4/tcp_input.c
5386 ++++ b/net/ipv4/tcp_input.c
5387 +@@ -68,6 +68,7 @@
5388 + #include <linux/module.h>
5389 + #include <linux/sysctl.h>
5390 + #include <linux/kernel.h>
5391 ++#include <linux/reciprocal_div.h>
5392 + #include <net/dst.h>
5393 + #include <net/tcp.h>
5394 + #include <net/inet_common.h>
5395 +@@ -87,7 +88,7 @@ int sysctl_tcp_adv_win_scale __read_mostly = 1;
5396 + EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
5397 +
5398 + /* rfc5961 challenge ack rate limiting */
5399 +-int sysctl_tcp_challenge_ack_limit = 100;
5400 ++int sysctl_tcp_challenge_ack_limit = 1000;
5401 +
5402 + int sysctl_tcp_stdurg __read_mostly;
5403 + int sysctl_tcp_rfc1337 __read_mostly;
5404 +@@ -3288,12 +3289,19 @@ static void tcp_send_challenge_ack(struct sock *sk)
5405 + static u32 challenge_timestamp;
5406 + static unsigned int challenge_count;
5407 + u32 now = jiffies / HZ;
5408 ++ u32 count;
5409 +
5410 + if (now != challenge_timestamp) {
5411 ++ u32 half = (sysctl_tcp_challenge_ack_limit + 1) >> 1;
5412 ++
5413 + challenge_timestamp = now;
5414 +- challenge_count = 0;
5415 ++ ACCESS_ONCE(challenge_count) = half +
5416 ++ reciprocal_divide(prandom_u32(),
5417 ++ sysctl_tcp_challenge_ack_limit);
5418 + }
5419 +- if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
5420 ++ count = ACCESS_ONCE(challenge_count);
5421 ++ if (count > 0) {
5422 ++ ACCESS_ONCE(challenge_count) = count - 1;
5423 + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
5424 + tcp_send_ack(sk);
5425 + }
5426 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
5427 +index 76c80b59e80f..276b28301a6b 100644
5428 +--- a/net/ipv4/tcp_output.c
5429 ++++ b/net/ipv4/tcp_output.c
5430 +@@ -222,7 +222,8 @@ void tcp_select_initial_window(int __space, __u32 mss,
5431 + /* Set window scaling on max possible window
5432 + * See RFC1323 for an explanation of the limit to 14
5433 + */
5434 +- space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
5435 ++ space = max_t(u32, space, sysctl_tcp_rmem[2]);
5436 ++ space = max_t(u32, space, sysctl_rmem_max);
5437 + space = min_t(u32, space, *window_clamp);
5438 + while (space > 65535 && (*rcv_wscale) < 14) {
5439 + space >>= 1;
5440 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
5441 +index 63b536bbf0b0..68174e4d88c7 100644
5442 +--- a/net/ipv4/udp.c
5443 ++++ b/net/ipv4/udp.c
5444 +@@ -1208,6 +1208,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
5445 + int peeked, off = 0;
5446 + int err;
5447 + int is_udplite = IS_UDPLITE(sk);
5448 ++ bool checksum_valid = false;
5449 + bool slow;
5450 +
5451 + if (flags & MSG_ERRQUEUE)
5452 +@@ -1233,11 +1234,12 @@ try_again:
5453 + */
5454 +
5455 + if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
5456 +- if (udp_lib_checksum_complete(skb))
5457 ++ checksum_valid = !udp_lib_checksum_complete(skb);
5458 ++ if (!checksum_valid)
5459 + goto csum_copy_err;
5460 + }
5461 +
5462 +- if (skb_csum_unnecessary(skb))
5463 ++ if (checksum_valid || skb_csum_unnecessary(skb))
5464 + err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
5465 + msg->msg_iov, copied);
5466 + else {
5467 +diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
5468 +index 8d69df16f6a8..107f75283b1b 100644
5469 +--- a/net/ipv6/ip6mr.c
5470 ++++ b/net/ipv6/ip6mr.c
5471 +@@ -1077,6 +1077,7 @@ static struct mfc6_cache *ip6mr_cache_alloc(void)
5472 + struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
5473 + if (c == NULL)
5474 + return NULL;
5475 ++ c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
5476 + c->mfc_un.res.minvif = MAXMIFS;
5477 + return c;
5478 + }
5479 +diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
5480 +index 89a4e4ddd8bb..e214222cd06f 100644
5481 +--- a/net/ipv6/netfilter/ip6_tables.c
5482 ++++ b/net/ipv6/netfilter/ip6_tables.c
5483 +@@ -195,11 +195,12 @@ get_entry(const void *base, unsigned int offset)
5484 +
5485 + /* All zeroes == unconditional rule. */
5486 + /* Mildly perf critical (only if packet tracing is on) */
5487 +-static inline bool unconditional(const struct ip6t_ip6 *ipv6)
5488 ++static inline bool unconditional(const struct ip6t_entry *e)
5489 + {
5490 + static const struct ip6t_ip6 uncond;
5491 +
5492 +- return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
5493 ++ return e->target_offset == sizeof(struct ip6t_entry) &&
5494 ++ memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0;
5495 + }
5496 +
5497 + static inline const struct xt_entry_target *
5498 +@@ -255,11 +256,10 @@ get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
5499 + } else if (s == e) {
5500 + (*rulenum)++;
5501 +
5502 +- if (s->target_offset == sizeof(struct ip6t_entry) &&
5503 ++ if (unconditional(s) &&
5504 + strcmp(t->target.u.kernel.target->name,
5505 + XT_STANDARD_TARGET) == 0 &&
5506 +- t->verdict < 0 &&
5507 +- unconditional(&s->ipv6)) {
5508 ++ t->verdict < 0) {
5509 + /* Tail of chains: STANDARD target (return/policy) */
5510 + *comment = *chainname == hookname
5511 + ? comments[NF_IP6_TRACE_COMMENT_POLICY]
5512 +@@ -477,11 +477,10 @@ mark_source_chains(const struct xt_table_info *newinfo,
5513 + e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
5514 +
5515 + /* Unconditional return/END. */
5516 +- if ((e->target_offset == sizeof(struct ip6t_entry) &&
5517 ++ if ((unconditional(e) &&
5518 + (strcmp(t->target.u.user.name,
5519 + XT_STANDARD_TARGET) == 0) &&
5520 +- t->verdict < 0 &&
5521 +- unconditional(&e->ipv6)) || visited) {
5522 ++ t->verdict < 0) || visited) {
5523 + unsigned int oldpos, size;
5524 +
5525 + if ((strcmp(t->target.u.user.name,
5526 +@@ -522,6 +521,8 @@ mark_source_chains(const struct xt_table_info *newinfo,
5527 + size = e->next_offset;
5528 + e = (struct ip6t_entry *)
5529 + (entry0 + pos + size);
5530 ++ if (pos + size >= newinfo->size)
5531 ++ return 0;
5532 + e->counters.pcnt = pos;
5533 + pos += size;
5534 + } else {
5535 +@@ -543,6 +544,8 @@ mark_source_chains(const struct xt_table_info *newinfo,
5536 + } else {
5537 + /* ... this is a fallthru */
5538 + newpos = pos + e->next_offset;
5539 ++ if (newpos >= newinfo->size)
5540 ++ return 0;
5541 + }
5542 + e = (struct ip6t_entry *)
5543 + (entry0 + newpos);
5544 +@@ -569,27 +572,6 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
5545 + module_put(par.match->me);
5546 + }
5547 +
5548 +-static int
5549 +-check_entry(const struct ip6t_entry *e, const char *name)
5550 +-{
5551 +- const struct xt_entry_target *t;
5552 +-
5553 +- if (!ip6_checkentry(&e->ipv6)) {
5554 +- duprintf("ip_tables: ip check failed %p %s.\n", e, name);
5555 +- return -EINVAL;
5556 +- }
5557 +-
5558 +- if (e->target_offset + sizeof(struct xt_entry_target) >
5559 +- e->next_offset)
5560 +- return -EINVAL;
5561 +-
5562 +- t = ip6t_get_target_c(e);
5563 +- if (e->target_offset + t->u.target_size > e->next_offset)
5564 +- return -EINVAL;
5565 +-
5566 +- return 0;
5567 +-}
5568 +-
5569 + static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
5570 + {
5571 + const struct ip6t_ip6 *ipv6 = par->entryinfo;
5572 +@@ -668,10 +650,6 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
5573 + struct xt_mtchk_param mtpar;
5574 + struct xt_entry_match *ematch;
5575 +
5576 +- ret = check_entry(e, name);
5577 +- if (ret)
5578 +- return ret;
5579 +-
5580 + j = 0;
5581 + mtpar.net = net;
5582 + mtpar.table = name;
5583 +@@ -715,7 +693,7 @@ static bool check_underflow(const struct ip6t_entry *e)
5584 + const struct xt_entry_target *t;
5585 + unsigned int verdict;
5586 +
5587 +- if (!unconditional(&e->ipv6))
5588 ++ if (!unconditional(e))
5589 + return false;
5590 + t = ip6t_get_target_c(e);
5591 + if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
5592 +@@ -735,9 +713,11 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
5593 + unsigned int valid_hooks)
5594 + {
5595 + unsigned int h;
5596 ++ int err;
5597 +
5598 + if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
5599 +- (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
5600 ++ (unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||
5601 ++ (unsigned char *)e + e->next_offset > limit) {
5602 + duprintf("Bad offset %p\n", e);
5603 + return -EINVAL;
5604 + }
5605 +@@ -749,6 +729,14 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
5606 + return -EINVAL;
5607 + }
5608 +
5609 ++ if (!ip6_checkentry(&e->ipv6))
5610 ++ return -EINVAL;
5611 ++
5612 ++ err = xt_check_entry_offsets(e, e->elems, e->target_offset,
5613 ++ e->next_offset);
5614 ++ if (err)
5615 ++ return err;
5616 ++
5617 + /* Check hooks & underflows */
5618 + for (h = 0; h < NF_INET_NUMHOOKS; h++) {
5619 + if (!(valid_hooks & (1 << h)))
5620 +@@ -757,9 +745,9 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
5621 + newinfo->hook_entry[h] = hook_entries[h];
5622 + if ((unsigned char *)e - base == underflows[h]) {
5623 + if (!check_underflow(e)) {
5624 +- pr_err("Underflows must be unconditional and "
5625 +- "use the STANDARD target with "
5626 +- "ACCEPT/DROP\n");
5627 ++ pr_debug("Underflows must be unconditional and "
5628 ++ "use the STANDARD target with "
5629 ++ "ACCEPT/DROP\n");
5630 + return -EINVAL;
5631 + }
5632 + newinfo->underflow[h] = underflows[h];
5633 +@@ -1268,6 +1256,9 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
5634 + /* overflow check */
5635 + if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
5636 + return -ENOMEM;
5637 ++ if (tmp.num_counters == 0)
5638 ++ return -EINVAL;
5639 ++
5640 + tmp.name[sizeof(tmp.name)-1] = 0;
5641 +
5642 + newinfo = xt_alloc_table_info(tmp.size);
5643 +@@ -1309,56 +1300,17 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
5644 + unsigned int i, curcpu;
5645 + struct xt_counters_info tmp;
5646 + struct xt_counters *paddc;
5647 +- unsigned int num_counters;
5648 +- char *name;
5649 +- int size;
5650 +- void *ptmp;
5651 + struct xt_table *t;
5652 + const struct xt_table_info *private;
5653 + int ret = 0;
5654 + const void *loc_cpu_entry;
5655 + struct ip6t_entry *iter;
5656 + unsigned int addend;
5657 +-#ifdef CONFIG_COMPAT
5658 +- struct compat_xt_counters_info compat_tmp;
5659 +-
5660 +- if (compat) {
5661 +- ptmp = &compat_tmp;
5662 +- size = sizeof(struct compat_xt_counters_info);
5663 +- } else
5664 +-#endif
5665 +- {
5666 +- ptmp = &tmp;
5667 +- size = sizeof(struct xt_counters_info);
5668 +- }
5669 +-
5670 +- if (copy_from_user(ptmp, user, size) != 0)
5671 +- return -EFAULT;
5672 +-
5673 +-#ifdef CONFIG_COMPAT
5674 +- if (compat) {
5675 +- num_counters = compat_tmp.num_counters;
5676 +- name = compat_tmp.name;
5677 +- } else
5678 +-#endif
5679 +- {
5680 +- num_counters = tmp.num_counters;
5681 +- name = tmp.name;
5682 +- }
5683 +
5684 +- if (len != size + num_counters * sizeof(struct xt_counters))
5685 +- return -EINVAL;
5686 +-
5687 +- paddc = vmalloc(len - size);
5688 +- if (!paddc)
5689 +- return -ENOMEM;
5690 +-
5691 +- if (copy_from_user(paddc, user + size, len - size) != 0) {
5692 +- ret = -EFAULT;
5693 +- goto free;
5694 +- }
5695 +-
5696 +- t = xt_find_table_lock(net, AF_INET6, name);
5697 ++ paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
5698 ++ if (IS_ERR(paddc))
5699 ++ return PTR_ERR(paddc);
5700 ++ t = xt_find_table_lock(net, AF_INET6, tmp.name);
5701 + if (IS_ERR_OR_NULL(t)) {
5702 + ret = t ? PTR_ERR(t) : -ENOENT;
5703 + goto free;
5704 +@@ -1367,7 +1319,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
5705 +
5706 + local_bh_disable();
5707 + private = t->private;
5708 +- if (private->number != num_counters) {
5709 ++ if (private->number != tmp.num_counters) {
5710 + ret = -EINVAL;
5711 + goto unlock_up_free;
5712 + }
5713 +@@ -1447,7 +1399,6 @@ compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
5714 +
5715 + static int
5716 + compat_find_calc_match(struct xt_entry_match *m,
5717 +- const char *name,
5718 + const struct ip6t_ip6 *ipv6,
5719 + unsigned int hookmask,
5720 + int *size)
5721 +@@ -1483,21 +1434,19 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
5722 + struct xt_table_info *newinfo,
5723 + unsigned int *size,
5724 + const unsigned char *base,
5725 +- const unsigned char *limit,
5726 +- const unsigned int *hook_entries,
5727 +- const unsigned int *underflows,
5728 +- const char *name)
5729 ++ const unsigned char *limit)
5730 + {
5731 + struct xt_entry_match *ematch;
5732 + struct xt_entry_target *t;
5733 + struct xt_target *target;
5734 + unsigned int entry_offset;
5735 + unsigned int j;
5736 +- int ret, off, h;
5737 ++ int ret, off;
5738 +
5739 + duprintf("check_compat_entry_size_and_hooks %p\n", e);
5740 + if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
5741 +- (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
5742 ++ (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
5743 ++ (unsigned char *)e + e->next_offset > limit) {
5744 + duprintf("Bad offset %p, limit = %p\n", e, limit);
5745 + return -EINVAL;
5746 + }
5747 +@@ -1509,8 +1458,11 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
5748 + return -EINVAL;
5749 + }
5750 +
5751 +- /* For purposes of check_entry casting the compat entry is fine */
5752 +- ret = check_entry((struct ip6t_entry *)e, name);
5753 ++ if (!ip6_checkentry(&e->ipv6))
5754 ++ return -EINVAL;
5755 ++
5756 ++ ret = xt_compat_check_entry_offsets(e, e->elems,
5757 ++ e->target_offset, e->next_offset);
5758 + if (ret)
5759 + return ret;
5760 +
5761 +@@ -1518,8 +1470,8 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
5762 + entry_offset = (void *)e - (void *)base;
5763 + j = 0;
5764 + xt_ematch_foreach(ematch, e) {
5765 +- ret = compat_find_calc_match(ematch, name,
5766 +- &e->ipv6, e->comefrom, &off);
5767 ++ ret = compat_find_calc_match(ematch, &e->ipv6, e->comefrom,
5768 ++ &off);
5769 + if (ret != 0)
5770 + goto release_matches;
5771 + ++j;
5772 +@@ -1542,17 +1494,6 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
5773 + if (ret)
5774 + goto out;
5775 +
5776 +- /* Check hooks & underflows */
5777 +- for (h = 0; h < NF_INET_NUMHOOKS; h++) {
5778 +- if ((unsigned char *)e - base == hook_entries[h])
5779 +- newinfo->hook_entry[h] = hook_entries[h];
5780 +- if ((unsigned char *)e - base == underflows[h])
5781 +- newinfo->underflow[h] = underflows[h];
5782 +- }
5783 +-
5784 +- /* Clear counters and comefrom */
5785 +- memset(&e->counters, 0, sizeof(e->counters));
5786 +- e->comefrom = 0;
5787 + return 0;
5788 +
5789 + out:
5790 +@@ -1566,18 +1507,17 @@ release_matches:
5791 + return ret;
5792 + }
5793 +
5794 +-static int
5795 ++static void
5796 + compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
5797 +- unsigned int *size, const char *name,
5798 ++ unsigned int *size,
5799 + struct xt_table_info *newinfo, unsigned char *base)
5800 + {
5801 + struct xt_entry_target *t;
5802 + struct ip6t_entry *de;
5803 + unsigned int origsize;
5804 +- int ret, h;
5805 ++ int h;
5806 + struct xt_entry_match *ematch;
5807 +
5808 +- ret = 0;
5809 + origsize = *size;
5810 + de = (struct ip6t_entry *)*dstptr;
5811 + memcpy(de, e, sizeof(struct ip6t_entry));
5812 +@@ -1586,11 +1526,9 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
5813 + *dstptr += sizeof(struct ip6t_entry);
5814 + *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
5815 +
5816 +- xt_ematch_foreach(ematch, e) {
5817 +- ret = xt_compat_match_from_user(ematch, dstptr, size);
5818 +- if (ret != 0)
5819 +- return ret;
5820 +- }
5821 ++ xt_ematch_foreach(ematch, e)
5822 ++ xt_compat_match_from_user(ematch, dstptr, size);
5823 ++
5824 + de->target_offset = e->target_offset - (origsize - *size);
5825 + t = compat_ip6t_get_target(e);
5826 + xt_compat_target_from_user(t, dstptr, size);
5827 +@@ -1602,181 +1540,82 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
5828 + if ((unsigned char *)de - base < newinfo->underflow[h])
5829 + newinfo->underflow[h] -= origsize - *size;
5830 + }
5831 +- return ret;
5832 +-}
5833 +-
5834 +-static int compat_check_entry(struct ip6t_entry *e, struct net *net,
5835 +- const char *name)
5836 +-{
5837 +- unsigned int j;
5838 +- int ret = 0;
5839 +- struct xt_mtchk_param mtpar;
5840 +- struct xt_entry_match *ematch;
5841 +-
5842 +- j = 0;
5843 +- mtpar.net = net;
5844 +- mtpar.table = name;
5845 +- mtpar.entryinfo = &e->ipv6;
5846 +- mtpar.hook_mask = e->comefrom;
5847 +- mtpar.family = NFPROTO_IPV6;
5848 +- xt_ematch_foreach(ematch, e) {
5849 +- ret = check_match(ematch, &mtpar);
5850 +- if (ret != 0)
5851 +- goto cleanup_matches;
5852 +- ++j;
5853 +- }
5854 +-
5855 +- ret = check_target(e, net, name);
5856 +- if (ret)
5857 +- goto cleanup_matches;
5858 +- return 0;
5859 +-
5860 +- cleanup_matches:
5861 +- xt_ematch_foreach(ematch, e) {
5862 +- if (j-- == 0)
5863 +- break;
5864 +- cleanup_match(ematch, net);
5865 +- }
5866 +- return ret;
5867 + }
5868 +
5869 + static int
5870 + translate_compat_table(struct net *net,
5871 +- const char *name,
5872 +- unsigned int valid_hooks,
5873 + struct xt_table_info **pinfo,
5874 + void **pentry0,
5875 +- unsigned int total_size,
5876 +- unsigned int number,
5877 +- unsigned int *hook_entries,
5878 +- unsigned int *underflows)
5879 ++ const struct compat_ip6t_replace *compatr)
5880 + {
5881 + unsigned int i, j;
5882 + struct xt_table_info *newinfo, *info;
5883 + void *pos, *entry0, *entry1;
5884 + struct compat_ip6t_entry *iter0;
5885 +- struct ip6t_entry *iter1;
5886 ++ struct ip6t_replace repl;
5887 + unsigned int size;
5888 + int ret = 0;
5889 +
5890 + info = *pinfo;
5891 + entry0 = *pentry0;
5892 +- size = total_size;
5893 +- info->number = number;
5894 +-
5895 +- /* Init all hooks to impossible value. */
5896 +- for (i = 0; i < NF_INET_NUMHOOKS; i++) {
5897 +- info->hook_entry[i] = 0xFFFFFFFF;
5898 +- info->underflow[i] = 0xFFFFFFFF;
5899 +- }
5900 ++ size = compatr->size;
5901 ++ info->number = compatr->num_entries;
5902 +
5903 + duprintf("translate_compat_table: size %u\n", info->size);
5904 + j = 0;
5905 + xt_compat_lock(AF_INET6);
5906 +- xt_compat_init_offsets(AF_INET6, number);
5907 ++ xt_compat_init_offsets(AF_INET6, compatr->num_entries);
5908 + /* Walk through entries, checking offsets. */
5909 +- xt_entry_foreach(iter0, entry0, total_size) {
5910 ++ xt_entry_foreach(iter0, entry0, compatr->size) {
5911 + ret = check_compat_entry_size_and_hooks(iter0, info, &size,
5912 + entry0,
5913 +- entry0 + total_size,
5914 +- hook_entries,
5915 +- underflows,
5916 +- name);
5917 ++ entry0 + compatr->size);
5918 + if (ret != 0)
5919 + goto out_unlock;
5920 + ++j;
5921 + }
5922 +
5923 + ret = -EINVAL;
5924 +- if (j != number) {
5925 ++ if (j != compatr->num_entries) {
5926 + duprintf("translate_compat_table: %u not %u entries\n",
5927 +- j, number);
5928 ++ j, compatr->num_entries);
5929 + goto out_unlock;
5930 + }
5931 +
5932 +- /* Check hooks all assigned */
5933 +- for (i = 0; i < NF_INET_NUMHOOKS; i++) {
5934 +- /* Only hooks which are valid */
5935 +- if (!(valid_hooks & (1 << i)))
5936 +- continue;
5937 +- if (info->hook_entry[i] == 0xFFFFFFFF) {
5938 +- duprintf("Invalid hook entry %u %u\n",
5939 +- i, hook_entries[i]);
5940 +- goto out_unlock;
5941 +- }
5942 +- if (info->underflow[i] == 0xFFFFFFFF) {
5943 +- duprintf("Invalid underflow %u %u\n",
5944 +- i, underflows[i]);
5945 +- goto out_unlock;
5946 +- }
5947 +- }
5948 +-
5949 + ret = -ENOMEM;
5950 + newinfo = xt_alloc_table_info(size);
5951 + if (!newinfo)
5952 + goto out_unlock;
5953 +
5954 +- newinfo->number = number;
5955 ++ newinfo->number = compatr->num_entries;
5956 + for (i = 0; i < NF_INET_NUMHOOKS; i++) {
5957 +- newinfo->hook_entry[i] = info->hook_entry[i];
5958 +- newinfo->underflow[i] = info->underflow[i];
5959 ++ newinfo->hook_entry[i] = compatr->hook_entry[i];
5960 ++ newinfo->underflow[i] = compatr->underflow[i];
5961 + }
5962 + entry1 = newinfo->entries[raw_smp_processor_id()];
5963 + pos = entry1;
5964 +- size = total_size;
5965 +- xt_entry_foreach(iter0, entry0, total_size) {
5966 +- ret = compat_copy_entry_from_user(iter0, &pos, &size,
5967 +- name, newinfo, entry1);
5968 +- if (ret != 0)
5969 +- break;
5970 +- }
5971 ++ size = compatr->size;
5972 ++ xt_entry_foreach(iter0, entry0, compatr->size)
5973 ++ compat_copy_entry_from_user(iter0, &pos, &size,
5974 ++ newinfo, entry1);
5975 ++
5976 ++ /* all module references in entry0 are now gone. */
5977 + xt_compat_flush_offsets(AF_INET6);
5978 + xt_compat_unlock(AF_INET6);
5979 +- if (ret)
5980 +- goto free_newinfo;
5981 +
5982 +- ret = -ELOOP;
5983 +- if (!mark_source_chains(newinfo, valid_hooks, entry1))
5984 +- goto free_newinfo;
5985 ++ memcpy(&repl, compatr, sizeof(*compatr));
5986 +
5987 +- i = 0;
5988 +- xt_entry_foreach(iter1, entry1, newinfo->size) {
5989 +- ret = compat_check_entry(iter1, net, name);
5990 +- if (ret != 0)
5991 +- break;
5992 +- ++i;
5993 +- if (strcmp(ip6t_get_target(iter1)->u.user.name,
5994 +- XT_ERROR_TARGET) == 0)
5995 +- ++newinfo->stacksize;
5996 +- }
5997 +- if (ret) {
5998 +- /*
5999 +- * The first i matches need cleanup_entry (calls ->destroy)
6000 +- * because they had called ->check already. The other j-i
6001 +- * entries need only release.
6002 +- */
6003 +- int skip = i;
6004 +- j -= i;
6005 +- xt_entry_foreach(iter0, entry0, newinfo->size) {
6006 +- if (skip-- > 0)
6007 +- continue;
6008 +- if (j-- == 0)
6009 +- break;
6010 +- compat_release_entry(iter0);
6011 +- }
6012 +- xt_entry_foreach(iter1, entry1, newinfo->size) {
6013 +- if (i-- == 0)
6014 +- break;
6015 +- cleanup_entry(iter1, net);
6016 +- }
6017 +- xt_free_table_info(newinfo);
6018 +- return ret;
6019 ++ for (i = 0; i < NF_INET_NUMHOOKS; i++) {
6020 ++ repl.hook_entry[i] = newinfo->hook_entry[i];
6021 ++ repl.underflow[i] = newinfo->underflow[i];
6022 + }
6023 +
6024 +- /* And one copy for every other CPU */
6025 +- for_each_possible_cpu(i)
6026 +- if (newinfo->entries[i] && newinfo->entries[i] != entry1)
6027 +- memcpy(newinfo->entries[i], entry1, newinfo->size);
6028 ++ repl.num_counters = 0;
6029 ++ repl.counters = NULL;
6030 ++ repl.size = newinfo->size;
6031 ++ ret = translate_table(net, newinfo, entry1, &repl);
6032 ++ if (ret)
6033 ++ goto free_newinfo;
6034 +
6035 + *pinfo = newinfo;
6036 + *pentry0 = entry1;
6037 +@@ -1785,17 +1624,16 @@ translate_compat_table(struct net *net,
6038 +
6039 + free_newinfo:
6040 + xt_free_table_info(newinfo);
6041 +-out:
6042 +- xt_entry_foreach(iter0, entry0, total_size) {
6043 ++ return ret;
6044 ++out_unlock:
6045 ++ xt_compat_flush_offsets(AF_INET6);
6046 ++ xt_compat_unlock(AF_INET6);
6047 ++ xt_entry_foreach(iter0, entry0, compatr->size) {
6048 + if (j-- == 0)
6049 + break;
6050 + compat_release_entry(iter0);
6051 + }
6052 + return ret;
6053 +-out_unlock:
6054 +- xt_compat_flush_offsets(AF_INET6);
6055 +- xt_compat_unlock(AF_INET6);
6056 +- goto out;
6057 + }
6058 +
6059 + static int
6060 +@@ -1815,6 +1653,9 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
6061 + return -ENOMEM;
6062 + if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
6063 + return -ENOMEM;
6064 ++ if (tmp.num_counters == 0)
6065 ++ return -EINVAL;
6066 ++
6067 + tmp.name[sizeof(tmp.name)-1] = 0;
6068 +
6069 + newinfo = xt_alloc_table_info(tmp.size);
6070 +@@ -1829,10 +1670,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
6071 + goto free_newinfo;
6072 + }
6073 +
6074 +- ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
6075 +- &newinfo, &loc_cpu_entry, tmp.size,
6076 +- tmp.num_entries, tmp.hook_entry,
6077 +- tmp.underflow);
6078 ++ ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
6079 + if (ret != 0)
6080 + goto free_newinfo;
6081 +
6082 +diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
6083 +index 4ddf67c6355b..d9535bb8fe2e 100644
6084 +--- a/net/ipv6/sit.c
6085 ++++ b/net/ipv6/sit.c
6086 +@@ -530,13 +530,13 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
6087 +
6088 + if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
6089 + ipv4_update_pmtu(skb, dev_net(skb->dev), info,
6090 +- t->parms.link, 0, IPPROTO_IPV6, 0);
6091 ++ t->parms.link, 0, iph->protocol, 0);
6092 + err = 0;
6093 + goto out;
6094 + }
6095 + if (type == ICMP_REDIRECT) {
6096 + ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
6097 +- IPPROTO_IPV6, 0);
6098 ++ iph->protocol, 0);
6099 + err = 0;
6100 + goto out;
6101 + }
6102 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
6103 +index 4659b8ab55d9..41c026f11edc 100644
6104 +--- a/net/ipv6/tcp_ipv6.c
6105 ++++ b/net/ipv6/tcp_ipv6.c
6106 +@@ -1767,7 +1767,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
6107 + destp = ntohs(inet->inet_dport);
6108 + srcp = ntohs(inet->inet_sport);
6109 +
6110 +- if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
6111 ++ if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
6112 ++ icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
6113 ++ icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
6114 + timer_active = 1;
6115 + timer_expires = icsk->icsk_timeout;
6116 + } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
6117 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
6118 +index 3046d0244393..d234e6f80570 100644
6119 +--- a/net/ipv6/udp.c
6120 ++++ b/net/ipv6/udp.c
6121 +@@ -370,6 +370,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
6122 + int peeked, off = 0;
6123 + int err;
6124 + int is_udplite = IS_UDPLITE(sk);
6125 ++ bool checksum_valid = false;
6126 + int is_udp4;
6127 + bool slow;
6128 +
6129 +@@ -401,11 +402,12 @@ try_again:
6130 + */
6131 +
6132 + if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
6133 +- if (udp_lib_checksum_complete(skb))
6134 ++ checksum_valid = !udp_lib_checksum_complete(skb);
6135 ++ if (!checksum_valid)
6136 + goto csum_copy_err;
6137 + }
6138 +
6139 +- if (skb_csum_unnecessary(skb))
6140 ++ if (checksum_valid || skb_csum_unnecessary(skb))
6141 + err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
6142 + msg->msg_iov, copied);
6143 + else {
6144 +diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
6145 +index f8133ff5b081..c95bafa65f5b 100644
6146 +--- a/net/irda/af_irda.c
6147 ++++ b/net/irda/af_irda.c
6148 +@@ -1039,8 +1039,11 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
6149 + }
6150 +
6151 + /* Check if we have opened a local TSAP */
6152 +- if (!self->tsap)
6153 +- irda_open_tsap(self, LSAP_ANY, addr->sir_name);
6154 ++ if (!self->tsap) {
6155 ++ err = irda_open_tsap(self, LSAP_ANY, addr->sir_name);
6156 ++ if (err)
6157 ++ goto out;
6158 ++ }
6159 +
6160 + /* Move to connecting socket, start sending Connect Requests */
6161 + sock->state = SS_CONNECTING;
6162 +diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
6163 +index 6952760881c8..f8765cc84e47 100644
6164 +--- a/net/mac80211/mesh.c
6165 ++++ b/net/mac80211/mesh.c
6166 +@@ -161,6 +161,10 @@ void mesh_sta_cleanup(struct sta_info *sta)
6167 + del_timer_sync(&sta->plink_timer);
6168 + }
6169 +
6170 ++ /* make sure no readers can access nexthop sta from here on */
6171 ++ mesh_path_flush_by_nexthop(sta);
6172 ++ synchronize_net();
6173 ++
6174 + if (changed)
6175 + ieee80211_mbss_info_change_notify(sdata, changed);
6176 + }
6177 +diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
6178 +index 8b03028cca69..51c141b09dba 100644
6179 +--- a/net/netfilter/x_tables.c
6180 ++++ b/net/netfilter/x_tables.c
6181 +@@ -435,6 +435,47 @@ int xt_check_match(struct xt_mtchk_param *par,
6182 + }
6183 + EXPORT_SYMBOL_GPL(xt_check_match);
6184 +
6185 ++/** xt_check_entry_match - check that matches end before start of target
6186 ++ *
6187 ++ * @match: beginning of xt_entry_match
6188 ++ * @target: beginning of this rules target (alleged end of matches)
6189 ++ * @alignment: alignment requirement of match structures
6190 ++ *
6191 ++ * Validates that all matches add up to the beginning of the target,
6192 ++ * and that each match covers at least the base structure size.
6193 ++ *
6194 ++ * Return: 0 on success, negative errno on failure.
6195 ++ */
6196 ++static int xt_check_entry_match(const char *match, const char *target,
6197 ++ const size_t alignment)
6198 ++{
6199 ++ const struct xt_entry_match *pos;
6200 ++ int length = target - match;
6201 ++
6202 ++ if (length == 0) /* no matches */
6203 ++ return 0;
6204 ++
6205 ++ pos = (struct xt_entry_match *)match;
6206 ++ do {
6207 ++ if ((unsigned long)pos % alignment)
6208 ++ return -EINVAL;
6209 ++
6210 ++ if (length < (int)sizeof(struct xt_entry_match))
6211 ++ return -EINVAL;
6212 ++
6213 ++ if (pos->u.match_size < sizeof(struct xt_entry_match))
6214 ++ return -EINVAL;
6215 ++
6216 ++ if (pos->u.match_size > length)
6217 ++ return -EINVAL;
6218 ++
6219 ++ length -= pos->u.match_size;
6220 ++ pos = ((void *)((char *)(pos) + (pos)->u.match_size));
6221 ++ } while (length > 0);
6222 ++
6223 ++ return 0;
6224 ++}
6225 ++
6226 + #ifdef CONFIG_COMPAT
6227 + int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
6228 + {
6229 +@@ -504,13 +545,14 @@ int xt_compat_match_offset(const struct xt_match *match)
6230 + }
6231 + EXPORT_SYMBOL_GPL(xt_compat_match_offset);
6232 +
6233 +-int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
6234 +- unsigned int *size)
6235 ++void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
6236 ++ unsigned int *size)
6237 + {
6238 + const struct xt_match *match = m->u.kernel.match;
6239 + struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
6240 + int pad, off = xt_compat_match_offset(match);
6241 + u_int16_t msize = cm->u.user.match_size;
6242 ++ char name[sizeof(m->u.user.name)];
6243 +
6244 + m = *dstptr;
6245 + memcpy(m, cm, sizeof(*cm));
6246 +@@ -524,10 +566,12 @@ int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
6247 +
6248 + msize += off;
6249 + m->u.user.match_size = msize;
6250 ++ strlcpy(name, match->name, sizeof(name));
6251 ++ module_put(match->me);
6252 ++ strncpy(m->u.user.name, name, sizeof(m->u.user.name));
6253 +
6254 + *size += off;
6255 + *dstptr += msize;
6256 +- return 0;
6257 + }
6258 + EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
6259 +
6260 +@@ -558,8 +602,125 @@ int xt_compat_match_to_user(const struct xt_entry_match *m,
6261 + return 0;
6262 + }
6263 + EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
6264 ++
6265 ++/* non-compat version may have padding after verdict */
6266 ++struct compat_xt_standard_target {
6267 ++ struct compat_xt_entry_target t;
6268 ++ compat_uint_t verdict;
6269 ++};
6270 ++
6271 ++int xt_compat_check_entry_offsets(const void *base, const char *elems,
6272 ++ unsigned int target_offset,
6273 ++ unsigned int next_offset)
6274 ++{
6275 ++ long size_of_base_struct = elems - (const char *)base;
6276 ++ const struct compat_xt_entry_target *t;
6277 ++ const char *e = base;
6278 ++
6279 ++ if (target_offset < size_of_base_struct)
6280 ++ return -EINVAL;
6281 ++
6282 ++ if (target_offset + sizeof(*t) > next_offset)
6283 ++ return -EINVAL;
6284 ++
6285 ++ t = (void *)(e + target_offset);
6286 ++ if (t->u.target_size < sizeof(*t))
6287 ++ return -EINVAL;
6288 ++
6289 ++ if (target_offset + t->u.target_size > next_offset)
6290 ++ return -EINVAL;
6291 ++
6292 ++ if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
6293 ++ COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset)
6294 ++ return -EINVAL;
6295 ++
6296 ++ /* compat_xt_entry match has less strict aligment requirements,
6297 ++ * otherwise they are identical. In case of padding differences
6298 ++ * we need to add compat version of xt_check_entry_match.
6299 ++ */
6300 ++ BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
6301 ++
6302 ++ return xt_check_entry_match(elems, base + target_offset,
6303 ++ __alignof__(struct compat_xt_entry_match));
6304 ++}
6305 ++EXPORT_SYMBOL(xt_compat_check_entry_offsets);
6306 + #endif /* CONFIG_COMPAT */
6307 +
6308 ++/**
6309 ++ * xt_check_entry_offsets - validate arp/ip/ip6t_entry
6310 ++ *
6311 ++ * @base: pointer to arp/ip/ip6t_entry
6312 ++ * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems
6313 ++ * @target_offset: the arp/ip/ip6_t->target_offset
6314 ++ * @next_offset: the arp/ip/ip6_t->next_offset
6315 ++ *
6316 ++ * validates that target_offset and next_offset are sane and that all
6317 ++ * match sizes (if any) align with the target offset.
6318 ++ *
6319 ++ * This function does not validate the targets or matches themselves, it
6320 ++ * only tests that all the offsets and sizes are correct, that all
6321 ++ * match structures are aligned, and that the last structure ends where
6322 ++ * the target structure begins.
6323 ++ *
6324 ++ * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version.
6325 ++ *
6326 ++ * The arp/ip/ip6t_entry structure @base must have passed following tests:
6327 ++ * - it must point to a valid memory location
6328 ++ * - base to base + next_offset must be accessible, i.e. not exceed allocated
6329 ++ * length.
6330 ++ *
6331 ++ * A well-formed entry looks like this:
6332 ++ *
6333 ++ * ip(6)t_entry match [mtdata] match [mtdata] target [tgdata] ip(6)t_entry
6334 ++ * e->elems[]-----' | |
6335 ++ * matchsize | |
6336 ++ * matchsize | |
6337 ++ * | |
6338 ++ * target_offset---------------------------------' |
6339 ++ * next_offset---------------------------------------------------'
6340 ++ *
6341 ++ * elems[]: flexible array member at end of ip(6)/arpt_entry struct.
6342 ++ * This is where matches (if any) and the target reside.
6343 ++ * target_offset: beginning of target.
6344 ++ * next_offset: start of the next rule; also: size of this rule.
6345 ++ * Since targets have a minimum size, target_offset + minlen <= next_offset.
6346 ++ *
6347 ++ * Every match stores its size, sum of sizes must not exceed target_offset.
6348 ++ *
6349 ++ * Return: 0 on success, negative errno on failure.
6350 ++ */
6351 ++int xt_check_entry_offsets(const void *base,
6352 ++ const char *elems,
6353 ++ unsigned int target_offset,
6354 ++ unsigned int next_offset)
6355 ++{
6356 ++ long size_of_base_struct = elems - (const char *)base;
6357 ++ const struct xt_entry_target *t;
6358 ++ const char *e = base;
6359 ++
6360 ++ /* target start is within the ip/ip6/arpt_entry struct */
6361 ++ if (target_offset < size_of_base_struct)
6362 ++ return -EINVAL;
6363 ++
6364 ++ if (target_offset + sizeof(*t) > next_offset)
6365 ++ return -EINVAL;
6366 ++
6367 ++ t = (void *)(e + target_offset);
6368 ++ if (t->u.target_size < sizeof(*t))
6369 ++ return -EINVAL;
6370 ++
6371 ++ if (target_offset + t->u.target_size > next_offset)
6372 ++ return -EINVAL;
6373 ++
6374 ++ if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
6375 ++ XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset)
6376 ++ return -EINVAL;
6377 ++
6378 ++ return xt_check_entry_match(elems, base + target_offset,
6379 ++ __alignof__(struct xt_entry_match));
6380 ++}
6381 ++EXPORT_SYMBOL(xt_check_entry_offsets);
6382 ++
6383 + int xt_check_target(struct xt_tgchk_param *par,
6384 + unsigned int size, u_int8_t proto, bool inv_proto)
6385 + {
6386 +@@ -610,6 +771,80 @@ int xt_check_target(struct xt_tgchk_param *par,
6387 + }
6388 + EXPORT_SYMBOL_GPL(xt_check_target);
6389 +
6390 ++/**
6391 ++ * xt_copy_counters_from_user - copy counters and metadata from userspace
6392 ++ *
6393 ++ * @user: src pointer to userspace memory
6394 ++ * @len: alleged size of userspace memory
6395 ++ * @info: where to store the xt_counters_info metadata
6396 ++ * @compat: true if we setsockopt call is done by 32bit task on 64bit kernel
6397 ++ *
6398 ++ * Copies counter meta data from @user and stores it in @info.
6399 ++ *
6400 ++ * vmallocs memory to hold the counters, then copies the counter data
6401 ++ * from @user to the new memory and returns a pointer to it.
6402 ++ *
6403 ++ * If @compat is true, @info gets converted automatically to the 64bit
6404 ++ * representation.
6405 ++ *
6406 ++ * The metadata associated with the counters is stored in @info.
6407 ++ *
6408 ++ * Return: returns pointer that caller has to test via IS_ERR().
6409 ++ * If IS_ERR is false, caller has to vfree the pointer.
6410 ++ */
6411 ++void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
6412 ++ struct xt_counters_info *info, bool compat)
6413 ++{
6414 ++ void *mem;
6415 ++ u64 size;
6416 ++
6417 ++#ifdef CONFIG_COMPAT
6418 ++ if (compat) {
6419 ++ /* structures only differ in size due to alignment */
6420 ++ struct compat_xt_counters_info compat_tmp;
6421 ++
6422 ++ if (len <= sizeof(compat_tmp))
6423 ++ return ERR_PTR(-EINVAL);
6424 ++
6425 ++ len -= sizeof(compat_tmp);
6426 ++ if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
6427 ++ return ERR_PTR(-EFAULT);
6428 ++
6429 ++ strlcpy(info->name, compat_tmp.name, sizeof(info->name));
6430 ++ info->num_counters = compat_tmp.num_counters;
6431 ++ user += sizeof(compat_tmp);
6432 ++ } else
6433 ++#endif
6434 ++ {
6435 ++ if (len <= sizeof(*info))
6436 ++ return ERR_PTR(-EINVAL);
6437 ++
6438 ++ len -= sizeof(*info);
6439 ++ if (copy_from_user(info, user, sizeof(*info)) != 0)
6440 ++ return ERR_PTR(-EFAULT);
6441 ++
6442 ++ info->name[sizeof(info->name) - 1] = '\0';
6443 ++ user += sizeof(*info);
6444 ++ }
6445 ++
6446 ++ size = sizeof(struct xt_counters);
6447 ++ size *= info->num_counters;
6448 ++
6449 ++ if (size != (u64)len)
6450 ++ return ERR_PTR(-EINVAL);
6451 ++
6452 ++ mem = vmalloc(len);
6453 ++ if (!mem)
6454 ++ return ERR_PTR(-ENOMEM);
6455 ++
6456 ++ if (copy_from_user(mem, user, len) == 0)
6457 ++ return mem;
6458 ++
6459 ++ vfree(mem);
6460 ++ return ERR_PTR(-EFAULT);
6461 ++}
6462 ++EXPORT_SYMBOL_GPL(xt_copy_counters_from_user);
6463 ++
6464 + #ifdef CONFIG_COMPAT
6465 + int xt_compat_target_offset(const struct xt_target *target)
6466 + {
6467 +@@ -625,6 +860,7 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
6468 + struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
6469 + int pad, off = xt_compat_target_offset(target);
6470 + u_int16_t tsize = ct->u.user.target_size;
6471 ++ char name[sizeof(t->u.user.name)];
6472 +
6473 + t = *dstptr;
6474 + memcpy(t, ct, sizeof(*ct));
6475 +@@ -638,6 +874,9 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
6476 +
6477 + tsize += off;
6478 + t->u.user.target_size = tsize;
6479 ++ strlcpy(name, target->name, sizeof(name));
6480 ++ module_put(target->me);
6481 ++ strncpy(t->u.user.name, name, sizeof(t->u.user.name));
6482 +
6483 + *size += off;
6484 + *dstptr += tsize;
6485 +diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
6486 +index 7c94aedd0912..5b1fbe45ff0b 100644
6487 +--- a/net/netlabel/netlabel_kapi.c
6488 ++++ b/net/netlabel/netlabel_kapi.c
6489 +@@ -700,7 +700,11 @@ socket_setattr_return:
6490 + */
6491 + void netlbl_sock_delattr(struct sock *sk)
6492 + {
6493 +- cipso_v4_sock_delattr(sk);
6494 ++ switch (sk->sk_family) {
6495 ++ case AF_INET:
6496 ++ cipso_v4_sock_delattr(sk);
6497 ++ break;
6498 ++ }
6499 + }
6500 +
6501 + /**
6502 +@@ -879,7 +883,11 @@ req_setattr_return:
6503 + */
6504 + void netlbl_req_delattr(struct request_sock *req)
6505 + {
6506 +- cipso_v4_req_delattr(req);
6507 ++ switch (req->rsk_ops->family) {
6508 ++ case AF_INET:
6509 ++ cipso_v4_req_delattr(req);
6510 ++ break;
6511 ++ }
6512 + }
6513 +
6514 + /**
6515 +diff --git a/net/rfkill/rfkill-regulator.c b/net/rfkill/rfkill-regulator.c
6516 +index d11ac79246e4..cf5b145902e5 100644
6517 +--- a/net/rfkill/rfkill-regulator.c
6518 ++++ b/net/rfkill/rfkill-regulator.c
6519 +@@ -30,6 +30,7 @@ struct rfkill_regulator_data {
6520 + static int rfkill_regulator_set_block(void *data, bool blocked)
6521 + {
6522 + struct rfkill_regulator_data *rfkill_data = data;
6523 ++ int ret = 0;
6524 +
6525 + pr_debug("%s: blocked: %d\n", __func__, blocked);
6526 +
6527 +@@ -40,15 +41,16 @@ static int rfkill_regulator_set_block(void *data, bool blocked)
6528 + }
6529 + } else {
6530 + if (!rfkill_data->reg_enabled) {
6531 +- regulator_enable(rfkill_data->vcc);
6532 +- rfkill_data->reg_enabled = true;
6533 ++ ret = regulator_enable(rfkill_data->vcc);
6534 ++ if (!ret)
6535 ++ rfkill_data->reg_enabled = true;
6536 + }
6537 + }
6538 +
6539 + pr_debug("%s: regulator_is_enabled after set_block: %d\n", __func__,
6540 + regulator_is_enabled(rfkill_data->vcc));
6541 +
6542 +- return 0;
6543 ++ return ret;
6544 + }
6545 +
6546 + static struct rfkill_ops rfkill_regulator_ops = {
6547 +diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
6548 +index 8aab894aeabe..730914cdb7a1 100644
6549 +--- a/net/sctp/sm_sideeffect.c
6550 ++++ b/net/sctp/sm_sideeffect.c
6551 +@@ -251,12 +251,13 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
6552 + int error;
6553 + struct sctp_transport *transport = (struct sctp_transport *) peer;
6554 + struct sctp_association *asoc = transport->asoc;
6555 +- struct net *net = sock_net(asoc->base.sk);
6556 ++ struct sock *sk = asoc->base.sk;
6557 ++ struct net *net = sock_net(sk);
6558 +
6559 + /* Check whether a task is in the sock. */
6560 +
6561 +- sctp_bh_lock_sock(asoc->base.sk);
6562 +- if (sock_owned_by_user(asoc->base.sk)) {
6563 ++ sctp_bh_lock_sock(sk);
6564 ++ if (sock_owned_by_user(sk)) {
6565 + SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __func__);
6566 +
6567 + /* Try again later. */
6568 +@@ -279,10 +280,10 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
6569 + transport, GFP_ATOMIC);
6570 +
6571 + if (error)
6572 +- asoc->base.sk->sk_err = -error;
6573 ++ sk->sk_err = -error;
6574 +
6575 + out_unlock:
6576 +- sctp_bh_unlock_sock(asoc->base.sk);
6577 ++ sctp_bh_unlock_sock(sk);
6578 + sctp_transport_put(transport);
6579 + }
6580 +
6581 +@@ -292,11 +293,12 @@ out_unlock:
6582 + static void sctp_generate_timeout_event(struct sctp_association *asoc,
6583 + sctp_event_timeout_t timeout_type)
6584 + {
6585 +- struct net *net = sock_net(asoc->base.sk);
6586 ++ struct sock *sk = asoc->base.sk;
6587 ++ struct net *net = sock_net(sk);
6588 + int error = 0;
6589 +
6590 +- sctp_bh_lock_sock(asoc->base.sk);
6591 +- if (sock_owned_by_user(asoc->base.sk)) {
6592 ++ sctp_bh_lock_sock(sk);
6593 ++ if (sock_owned_by_user(sk)) {
6594 + SCTP_DEBUG_PRINTK("%s:Sock is busy: timer %d\n",
6595 + __func__,
6596 + timeout_type);
6597 +@@ -320,10 +322,10 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc,
6598 + (void *)timeout_type, GFP_ATOMIC);
6599 +
6600 + if (error)
6601 +- asoc->base.sk->sk_err = -error;
6602 ++ sk->sk_err = -error;
6603 +
6604 + out_unlock:
6605 +- sctp_bh_unlock_sock(asoc->base.sk);
6606 ++ sctp_bh_unlock_sock(sk);
6607 + sctp_association_put(asoc);
6608 + }
6609 +
6610 +@@ -373,10 +375,11 @@ void sctp_generate_heartbeat_event(unsigned long data)
6611 + int error = 0;
6612 + struct sctp_transport *transport = (struct sctp_transport *) data;
6613 + struct sctp_association *asoc = transport->asoc;
6614 +- struct net *net = sock_net(asoc->base.sk);
6615 ++ struct sock *sk = asoc->base.sk;
6616 ++ struct net *net = sock_net(sk);
6617 +
6618 +- sctp_bh_lock_sock(asoc->base.sk);
6619 +- if (sock_owned_by_user(asoc->base.sk)) {
6620 ++ sctp_bh_lock_sock(sk);
6621 ++ if (sock_owned_by_user(sk)) {
6622 + SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __func__);
6623 +
6624 + /* Try again later. */
6625 +@@ -397,10 +400,10 @@ void sctp_generate_heartbeat_event(unsigned long data)
6626 + transport, GFP_ATOMIC);
6627 +
6628 + if (error)
6629 +- asoc->base.sk->sk_err = -error;
6630 ++ sk->sk_err = -error;
6631 +
6632 + out_unlock:
6633 +- sctp_bh_unlock_sock(asoc->base.sk);
6634 ++ sctp_bh_unlock_sock(sk);
6635 + sctp_transport_put(transport);
6636 + }
6637 +
6638 +@@ -411,10 +414,11 @@ void sctp_generate_proto_unreach_event(unsigned long data)
6639 + {
6640 + struct sctp_transport *transport = (struct sctp_transport *) data;
6641 + struct sctp_association *asoc = transport->asoc;
6642 +- struct net *net = sock_net(asoc->base.sk);
6643 ++ struct sock *sk = asoc->base.sk;
6644 ++ struct net *net = sock_net(sk);
6645 +
6646 +- sctp_bh_lock_sock(asoc->base.sk);
6647 +- if (sock_owned_by_user(asoc->base.sk)) {
6648 ++ sctp_bh_lock_sock(sk);
6649 ++ if (sock_owned_by_user(sk)) {
6650 + SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __func__);
6651 +
6652 + /* Try again later. */
6653 +@@ -435,7 +439,7 @@ void sctp_generate_proto_unreach_event(unsigned long data)
6654 + asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
6655 +
6656 + out_unlock:
6657 +- sctp_bh_unlock_sock(asoc->base.sk);
6658 ++ sctp_bh_unlock_sock(sk);
6659 + sctp_association_put(asoc);
6660 + }
6661 +
6662 +diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
6663 +index 29b4ba93ab3c..62663a08ffbd 100644
6664 +--- a/net/sunrpc/auth_gss/svcauth_gss.c
6665 ++++ b/net/sunrpc/auth_gss/svcauth_gss.c
6666 +@@ -859,8 +859,8 @@ unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct g
6667 + goto out;
6668 + if (svc_getnl(&buf->head[0]) != seq)
6669 + goto out;
6670 +- /* trim off the mic at the end before returning */
6671 +- xdr_buf_trim(buf, mic.len + 4);
6672 ++ /* trim off the mic and padding at the end before returning */
6673 ++ xdr_buf_trim(buf, round_up_to_quad(mic.len) + 4);
6674 + stat = 0;
6675 + out:
6676 + kfree(mic.data);
6677 +diff --git a/scripts/asn1_compiler.c b/scripts/asn1_compiler.c
6678 +index db0e5cd34c70..91c4117637ae 100644
6679 +--- a/scripts/asn1_compiler.c
6680 ++++ b/scripts/asn1_compiler.c
6681 +@@ -1353,6 +1353,8 @@ static void render_out_of_line_list(FILE *out)
6682 + render_opcode(out, "ASN1_OP_END_SET_OF%s,\n", act);
6683 + render_opcode(out, "_jump_target(%u),\n", entry);
6684 + break;
6685 ++ default:
6686 ++ break;
6687 + }
6688 + if (e->action)
6689 + render_opcode(out, "_action(ACT_%s),\n",
6690 +diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
6691 +index ee625e3a56ba..4f7d13da04a5 100644
6692 +--- a/scripts/recordmcount.c
6693 ++++ b/scripts/recordmcount.c
6694 +@@ -33,10 +33,17 @@
6695 + #include <string.h>
6696 + #include <unistd.h>
6697 +
6698 ++/*
6699 ++ * glibc synced up and added the metag number but didn't add the relocations.
6700 ++ * Work around this in a crude manner for now.
6701 ++ */
6702 + #ifndef EM_METAG
6703 +-/* Remove this when these make it to the standard system elf.h. */
6704 + #define EM_METAG 174
6705 ++#endif
6706 ++#ifndef R_METAG_ADDR32
6707 + #define R_METAG_ADDR32 2
6708 ++#endif
6709 ++#ifndef R_METAG_NONE
6710 + #define R_METAG_NONE 3
6711 + #endif
6712 +
6713 +diff --git a/security/keys/key.c b/security/keys/key.c
6714 +index 8fb7c7bd4657..6595b2dd89fe 100644
6715 +--- a/security/keys/key.c
6716 ++++ b/security/keys/key.c
6717 +@@ -580,7 +580,7 @@ int key_reject_and_link(struct key *key,
6718 +
6719 + mutex_unlock(&key_construction_mutex);
6720 +
6721 +- if (keyring)
6722 ++ if (keyring && link_ret == 0)
6723 + __key_link_end(keyring, key->type, prealloc);
6724 +
6725 + /* wake up anyone waiting for a key to be constructed */
6726 +diff --git a/sound/core/control.c b/sound/core/control.c
6727 +index 3fcead61f0ef..251bc575f5c3 100644
6728 +--- a/sound/core/control.c
6729 ++++ b/sound/core/control.c
6730 +@@ -150,6 +150,8 @@ void snd_ctl_notify(struct snd_card *card, unsigned int mask,
6731 +
6732 + if (snd_BUG_ON(!card || !id))
6733 + return;
6734 ++ if (card->shutdown)
6735 ++ return;
6736 + read_lock(&card->ctl_files_rwlock);
6737 + #if defined(CONFIG_SND_MIXER_OSS) || defined(CONFIG_SND_MIXER_OSS_MODULE)
6738 + card->mixer_oss_change_count++;
6739 +diff --git a/sound/core/timer.c b/sound/core/timer.c
6740 +index 38742e826900..3476895ee1fb 100644
6741 +--- a/sound/core/timer.c
6742 ++++ b/sound/core/timer.c
6743 +@@ -1208,6 +1208,7 @@ static void snd_timer_user_ccallback(struct snd_timer_instance *timeri,
6744 + tu->tstamp = *tstamp;
6745 + if ((tu->filter & (1 << event)) == 0 || !tu->tread)
6746 + return;
6747 ++ memset(&r1, 0, sizeof(r1));
6748 + r1.event = event;
6749 + r1.tstamp = *tstamp;
6750 + r1.val = resolution;
6751 +@@ -1242,6 +1243,7 @@ static void snd_timer_user_tinterrupt(struct snd_timer_instance *timeri,
6752 + }
6753 + if ((tu->filter & (1 << SNDRV_TIMER_EVENT_RESOLUTION)) &&
6754 + tu->last_resolution != resolution) {
6755 ++ memset(&r1, 0, sizeof(r1));
6756 + r1.event = SNDRV_TIMER_EVENT_RESOLUTION;
6757 + r1.tstamp = tstamp;
6758 + r1.val = resolution;
6759 +@@ -1707,6 +1709,7 @@ static int snd_timer_user_params(struct file *file,
6760 + if (tu->timeri->flags & SNDRV_TIMER_IFLG_EARLY_EVENT) {
6761 + if (tu->tread) {
6762 + struct snd_timer_tread tread;
6763 ++ memset(&tread, 0, sizeof(tread));
6764 + tread.event = SNDRV_TIMER_EVENT_EARLY;
6765 + tread.tstamp.tv_sec = 0;
6766 + tread.tstamp.tv_nsec = 0;
6767 +diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
6768 +index 982a2c2faf24..7f400a1d42e4 100644
6769 +--- a/sound/drivers/dummy.c
6770 ++++ b/sound/drivers/dummy.c
6771 +@@ -422,6 +422,7 @@ static int dummy_hrtimer_stop(struct snd_pcm_substream *substream)
6772 +
6773 + static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm)
6774 + {
6775 ++ hrtimer_cancel(&dpcm->timer);
6776 + tasklet_kill(&dpcm->tasklet);
6777 + }
6778 +
6779 +diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
6780 +index ae59dbaa53d9..42d4b13f1fa7 100644
6781 +--- a/sound/pci/au88x0/au88x0_core.c
6782 ++++ b/sound/pci/au88x0/au88x0_core.c
6783 +@@ -1442,9 +1442,8 @@ static int vortex_wtdma_bufshift(vortex_t * vortex, int wtdma)
6784 + int page, p, pp, delta, i;
6785 +
6786 + page =
6787 +- (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)) &
6788 +- WT_SUBBUF_MASK)
6789 +- >> WT_SUBBUF_SHIFT;
6790 ++ (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2))
6791 ++ >> WT_SUBBUF_SHIFT) & WT_SUBBUF_MASK;
6792 + if (dma->nr_periods >= 4)
6793 + delta = (page - dma->period_real) & 3;
6794 + else {
6795 +diff --git a/sound/pci/oxygen/oxygen_mixer.c b/sound/pci/oxygen/oxygen_mixer.c
6796 +index c0dbb52d45be..1e4bcb900fc6 100644
6797 +--- a/sound/pci/oxygen/oxygen_mixer.c
6798 ++++ b/sound/pci/oxygen/oxygen_mixer.c
6799 +@@ -88,7 +88,7 @@ static int dac_mute_put(struct snd_kcontrol *ctl,
6800 + int changed;
6801 +
6802 + mutex_lock(&chip->mutex);
6803 +- changed = !value->value.integer.value[0] != chip->dac_mute;
6804 ++ changed = (!value->value.integer.value[0]) != chip->dac_mute;
6805 + if (changed) {
6806 + chip->dac_mute = !value->value.integer.value[0];
6807 + chip->model.update_dac_mute(chip);
6808 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
6809 +index 4f865e122c21..f71c4ad425c6 100644
6810 +--- a/virt/kvm/kvm_main.c
6811 ++++ b/virt/kvm/kvm_main.c
6812 +@@ -2447,7 +2447,7 @@ static long kvm_vm_ioctl(struct file *filp,
6813 + if (copy_from_user(&routing, argp, sizeof(routing)))
6814 + goto out;
6815 + r = -EINVAL;
6816 +- if (routing.nr >= KVM_MAX_IRQ_ROUTES)
6817 ++ if (routing.nr > KVM_MAX_IRQ_ROUTES)
6818 + goto out;
6819 + if (routing.flags)
6820 + goto out;