Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:3.12 commit in: /
Date: Tue, 29 Nov 2016 17:45:33
Message-Id: 1480441466.ba7372d114117fe69256e0716b71fbcf3df440c5.alicef@gentoo
1 commit: ba7372d114117fe69256e0716b71fbcf3df440c5
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Tue Nov 29 17:44:26 2016 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Tue Nov 29 17:44:26 2016 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ba7372d1
7
8 Linux patch 3.12.68
9
10 0000_README | 4 +
11 1067_linux-3.12.68.patch | 4739 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 4743 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 1f30ddb..b783e9c 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -310,6 +310,10 @@ Patch: 1066_linux-3.12.67.patch
19 From: http://www.kernel.org
20 Desc: Linux 3.12.67
21
22 +Patch: 1067_linux-3.12.68.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 3.12.68
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1067_linux-3.12.68.patch b/1067_linux-3.12.68.patch
31 new file mode 100644
32 index 0000000..b04202a
33 --- /dev/null
34 +++ b/1067_linux-3.12.68.patch
35 @@ -0,0 +1,4739 @@
36 +diff --git a/Makefile b/Makefile
37 +index 32dbd8513eee..6d86f39be8ce 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 3
42 + PATCHLEVEL = 12
43 +-SUBLEVEL = 67
44 ++SUBLEVEL = 68
45 + EXTRAVERSION =
46 + NAME = One Giant Leap for Frogkind
47 +
48 +@@ -378,11 +378,12 @@ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
49 + -Werror-implicit-function-declaration \
50 + -Wno-format-security \
51 + -fno-delete-null-pointer-checks \
52 +- -std=gnu89
53 ++ -std=gnu89 $(call cc-option,-fno-PIE)
54 ++
55 +
56 + KBUILD_AFLAGS_KERNEL :=
57 + KBUILD_CFLAGS_KERNEL :=
58 +-KBUILD_AFLAGS := -D__ASSEMBLY__
59 ++KBUILD_AFLAGS := -D__ASSEMBLY__ $(call cc-option,-fno-PIE)
60 + KBUILD_AFLAGS_MODULE := -DMODULE
61 + KBUILD_CFLAGS_MODULE := -DMODULE
62 + KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
63 +diff --git a/arch/arm/include/asm/floppy.h b/arch/arm/include/asm/floppy.h
64 +index c9f03eccc9d8..b5a0466db549 100644
65 +--- a/arch/arm/include/asm/floppy.h
66 ++++ b/arch/arm/include/asm/floppy.h
67 +@@ -17,7 +17,7 @@
68 +
69 + #define fd_outb(val,port) \
70 + do { \
71 +- if ((port) == FD_DOR) \
72 ++ if ((port) == (u32)FD_DOR) \
73 + fd_setdor((val)); \
74 + else \
75 + outb((val),(port)); \
76 +diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
77 +index 883a162083af..05863e3ee2e7 100644
78 +--- a/arch/mips/include/asm/kvm_host.h
79 ++++ b/arch/mips/include/asm/kvm_host.h
80 +@@ -375,7 +375,10 @@ struct kvm_vcpu_arch {
81 + /* Host KSEG0 address of the EI/DI offset */
82 + void *kseg0_commpage;
83 +
84 +- u32 io_gpr; /* GPR used as IO source/target */
85 ++ /* Resume PC after MMIO completion */
86 ++ unsigned long io_pc;
87 ++ /* GPR used as IO source/target */
88 ++ u32 io_gpr;
89 +
90 + /* Used to calibrate the virutal count register for the guest */
91 + int32_t host_cp0_count;
92 +@@ -386,8 +389,6 @@ struct kvm_vcpu_arch {
93 + /* Bitmask of pending exceptions to be cleared */
94 + unsigned long pending_exceptions_clr;
95 +
96 +- unsigned long pending_load_cause;
97 +-
98 + /* Save/Restore the entryhi register when are are preempted/scheduled back in */
99 + unsigned long preempt_entryhi;
100 +
101 +diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
102 +index 8ab9958767bb..716285497e0e 100644
103 +--- a/arch/mips/kvm/kvm_mips_emul.c
104 ++++ b/arch/mips/kvm/kvm_mips_emul.c
105 +@@ -254,15 +254,15 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
106 + struct mips_coproc *cop0 = vcpu->arch.cop0;
107 + enum emulation_result er = EMULATE_DONE;
108 +
109 +- if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
110 ++ if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
111 ++ kvm_clear_c0_guest_status(cop0, ST0_ERL);
112 ++ vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
113 ++ } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
114 + kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
115 + kvm_read_c0_guest_epc(cop0));
116 + kvm_clear_c0_guest_status(cop0, ST0_EXL);
117 + vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
118 +
119 +- } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
120 +- kvm_clear_c0_guest_status(cop0, ST0_ERL);
121 +- vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
122 + } else {
123 + printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
124 + vcpu->arch.pc);
125 +@@ -325,7 +325,7 @@ static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
126 + bool user;
127 +
128 + /* No need to flush for entries which are already invalid */
129 +- if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V))
130 ++ if (!((tlb->tlb_lo0 | tlb->tlb_lo1) & MIPS3_PG_V))
131 + return;
132 + /* User address space doesn't need flushing for KSeg2/3 changes */
133 + user = tlb->tlb_hi < KVM_GUEST_KSEG0;
134 +@@ -372,10 +372,8 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
135 + }
136 +
137 + tlb = &vcpu->arch.guest_tlb[index];
138 +-#if 1
139 +
140 + kvm_mips_invalidate_guest_tlb(vcpu, tlb);
141 +-#endif
142 +
143 + tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
144 + tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
145 +@@ -414,9 +412,7 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
146 +
147 + tlb = &vcpu->arch.guest_tlb[index];
148 +
149 +-#if 1
150 + kvm_mips_invalidate_guest_tlb(vcpu, tlb);
151 +-#endif
152 +
153 + tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
154 + tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
155 +@@ -822,6 +818,7 @@ kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
156 + struct kvm_run *run, struct kvm_vcpu *vcpu)
157 + {
158 + enum emulation_result er = EMULATE_DO_MMIO;
159 ++ unsigned long curr_pc;
160 + int32_t op, base, rt, offset;
161 + uint32_t bytes;
162 +
163 +@@ -830,7 +827,18 @@ kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
164 + offset = inst & 0xffff;
165 + op = (inst >> 26) & 0x3f;
166 +
167 +- vcpu->arch.pending_load_cause = cause;
168 ++ /*
169 ++ * Find the resume PC now while we have safe and easy access to the
170 ++ * prior branch instruction, and save it for
171 ++ * kvm_mips_complete_mmio_load() to restore later.
172 ++ */
173 ++ curr_pc = vcpu->arch.pc;
174 ++ er = update_pc(vcpu, cause);
175 ++ if (er == EMULATE_FAIL)
176 ++ return er;
177 ++ vcpu->arch.io_pc = vcpu->arch.pc;
178 ++ vcpu->arch.pc = curr_pc;
179 ++
180 + vcpu->arch.io_gpr = rt;
181 +
182 + switch (op) {
183 +@@ -1659,7 +1667,6 @@ kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
184 + {
185 + unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
186 + enum emulation_result er = EMULATE_DONE;
187 +- unsigned long curr_pc;
188 +
189 + if (run->mmio.len > sizeof(*gpr)) {
190 + printk("Bad MMIO length: %d", run->mmio.len);
191 +@@ -1667,14 +1674,8 @@ kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
192 + goto done;
193 + }
194 +
195 +- /*
196 +- * Update PC and hold onto current PC in case there is
197 +- * an error and we want to rollback the PC
198 +- */
199 +- curr_pc = vcpu->arch.pc;
200 +- er = update_pc(vcpu, vcpu->arch.pending_load_cause);
201 +- if (er == EMULATE_FAIL)
202 +- return er;
203 ++ /* Restore saved resume PC */
204 ++ vcpu->arch.pc = vcpu->arch.io_pc;
205 +
206 + switch (run->mmio.len) {
207 + case 4:
208 +@@ -1696,12 +1697,6 @@ kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
209 + break;
210 + }
211 +
212 +- if (vcpu->arch.pending_load_cause & CAUSEF_BD)
213 +- kvm_debug
214 +- ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
215 +- vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
216 +- vcpu->mmio_needed);
217 +-
218 + done:
219 + return er;
220 + }
221 +diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
222 +index e205ef598e97..c247cf5a31cb 100644
223 +--- a/arch/mips/mm/init.c
224 ++++ b/arch/mips/mm/init.c
225 +@@ -74,6 +74,7 @@
226 + */
227 + unsigned long empty_zero_page, zero_page_mask;
228 + EXPORT_SYMBOL_GPL(empty_zero_page);
229 ++EXPORT_SYMBOL(zero_page_mask);
230 +
231 + /*
232 + * Not static inline because used by IP27 special magic initialization code
233 +diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
234 +index e767ab733e32..69caa82c50d3 100644
235 +--- a/arch/parisc/kernel/syscall.S
236 ++++ b/arch/parisc/kernel/syscall.S
237 +@@ -106,8 +106,6 @@ linux_gateway_entry:
238 + mtsp %r0,%sr4 /* get kernel space into sr4 */
239 + mtsp %r0,%sr5 /* get kernel space into sr5 */
240 + mtsp %r0,%sr6 /* get kernel space into sr6 */
241 +- mfsp %sr7,%r1 /* save user sr7 */
242 +- mtsp %r1,%sr3 /* and store it in sr3 */
243 +
244 + #ifdef CONFIG_64BIT
245 + /* for now we can *always* set the W bit on entry to the syscall
246 +@@ -133,6 +131,14 @@ linux_gateway_entry:
247 + depdi 0, 31, 32, %r21
248 + 1:
249 + #endif
250 ++
251 ++ /* We use a rsm/ssm pair to prevent sr3 from being clobbered
252 ++ * by external interrupts.
253 ++ */
254 ++ mfsp %sr7,%r1 /* save user sr7 */
255 ++ rsm PSW_SM_I, %r0 /* disable interrupts */
256 ++ mtsp %r1,%sr3 /* and store it in sr3 */
257 ++
258 + mfctl %cr30,%r1
259 + xor %r1,%r30,%r30 /* ye olde xor trick */
260 + xor %r1,%r30,%r1
261 +@@ -147,6 +153,7 @@ linux_gateway_entry:
262 + */
263 +
264 + mtsp %r0,%sr7 /* get kernel space into sr7 */
265 ++ ssm PSW_SM_I, %r0 /* enable interrupts */
266 + STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */
267 + mfctl %cr30,%r1 /* get task ptr in %r1 */
268 + LDREG TI_TASK(%r1),%r1
269 +diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
270 +index 5eeffeefae06..d73124df5d32 100644
271 +--- a/arch/s390/hypfs/hypfs_diag.c
272 ++++ b/arch/s390/hypfs/hypfs_diag.c
273 +@@ -517,11 +517,11 @@ static int diag224(void *ptr)
274 + static int diag224_get_name_table(void)
275 + {
276 + /* memory must be below 2GB */
277 +- diag224_cpu_names = kmalloc(PAGE_SIZE, GFP_KERNEL | GFP_DMA);
278 ++ diag224_cpu_names = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
279 + if (!diag224_cpu_names)
280 + return -ENOMEM;
281 + if (diag224(diag224_cpu_names)) {
282 +- kfree(diag224_cpu_names);
283 ++ free_page((unsigned long) diag224_cpu_names);
284 + return -EOPNOTSUPP;
285 + }
286 + EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16);
287 +@@ -530,7 +530,7 @@ static int diag224_get_name_table(void)
288 +
289 + static void diag224_delete_name_table(void)
290 + {
291 +- kfree(diag224_cpu_names);
292 ++ free_page((unsigned long) diag224_cpu_names);
293 + }
294 +
295 + static int diag224_idx2name(int index, char *name)
296 +diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
297 +index ad446b0c55b6..1b30d5488f82 100644
298 +--- a/arch/s390/mm/init.c
299 ++++ b/arch/s390/mm/init.c
300 +@@ -43,6 +43,7 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
301 +
302 + unsigned long empty_zero_page, zero_page_mask;
303 + EXPORT_SYMBOL(empty_zero_page);
304 ++EXPORT_SYMBOL(zero_page_mask);
305 +
306 + static void __init setup_zero_pages(void)
307 + {
308 +diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h
309 +index f668797ae234..4994815fccc7 100644
310 +--- a/arch/sparc/include/asm/mmu_64.h
311 ++++ b/arch/sparc/include/asm/mmu_64.h
312 +@@ -92,7 +92,8 @@ struct tsb_config {
313 + typedef struct {
314 + spinlock_t lock;
315 + unsigned long sparc64_ctx_val;
316 +- unsigned long huge_pte_count;
317 ++ unsigned long hugetlb_pte_count;
318 ++ unsigned long thp_pte_count;
319 + struct tsb_config tsb_block[MM_NUM_TSBS];
320 + struct hv_tsb_descr tsb_descr[MM_NUM_TSBS];
321 + } mm_context_t;
322 +diff --git a/arch/sparc/kernel/dtlb_prot.S b/arch/sparc/kernel/dtlb_prot.S
323 +index d668ca149e64..4087a62f96b0 100644
324 +--- a/arch/sparc/kernel/dtlb_prot.S
325 ++++ b/arch/sparc/kernel/dtlb_prot.S
326 +@@ -25,13 +25,13 @@
327 +
328 + /* PROT ** ICACHE line 2: More real fault processing */
329 + ldxa [%g4] ASI_DMMU, %g5 ! Put tagaccess in %g5
330 ++ srlx %g5, PAGE_SHIFT, %g5
331 ++ sllx %g5, PAGE_SHIFT, %g5 ! Clear context ID bits
332 + bgu,pn %xcc, winfix_trampoline ! Yes, perform winfixup
333 + mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4
334 + ba,pt %xcc, sparc64_realfault_common ! Nope, normal fault
335 + nop
336 + nop
337 +- nop
338 +- nop
339 +
340 + /* PROT ** ICACHE line 3: Unused... */
341 + nop
342 +diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c
343 +index 48565c11e82a..6d0dacb5812d 100644
344 +--- a/arch/sparc/kernel/jump_label.c
345 ++++ b/arch/sparc/kernel/jump_label.c
346 +@@ -13,19 +13,30 @@
347 + void arch_jump_label_transform(struct jump_entry *entry,
348 + enum jump_label_type type)
349 + {
350 +- u32 val;
351 + u32 *insn = (u32 *) (unsigned long) entry->code;
352 ++ u32 val;
353 +
354 + if (type == JUMP_LABEL_ENABLE) {
355 + s32 off = (s32)entry->target - (s32)entry->code;
356 ++ bool use_v9_branch = false;
357 ++
358 ++ BUG_ON(off & 3);
359 +
360 + #ifdef CONFIG_SPARC64
361 +- /* ba,pt %xcc, . + (off << 2) */
362 +- val = 0x10680000 | ((u32) off >> 2);
363 +-#else
364 +- /* ba . + (off << 2) */
365 +- val = 0x10800000 | ((u32) off >> 2);
366 ++ if (off <= 0xfffff && off >= -0x100000)
367 ++ use_v9_branch = true;
368 + #endif
369 ++ if (use_v9_branch) {
370 ++ /* WDISP19 - target is . + immed << 2 */
371 ++ /* ba,pt %xcc, . + off */
372 ++ val = 0x10680000 | (((u32) off >> 2) & 0x7ffff);
373 ++ } else {
374 ++ /* WDISP22 - target is . + immed << 2 */
375 ++ BUG_ON(off > 0x7fffff);
376 ++ BUG_ON(off < -0x800000);
377 ++ /* ba . + off */
378 ++ val = 0x10800000 | (((u32) off >> 2) & 0x3fffff);
379 ++ }
380 + } else {
381 + val = 0x01000000;
382 + }
383 +diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S
384 +index ef0d8e9e1210..f22bec0db645 100644
385 +--- a/arch/sparc/kernel/ktlb.S
386 ++++ b/arch/sparc/kernel/ktlb.S
387 +@@ -20,6 +20,10 @@ kvmap_itlb:
388 + mov TLB_TAG_ACCESS, %g4
389 + ldxa [%g4] ASI_IMMU, %g4
390 +
391 ++ /* The kernel executes in context zero, therefore we do not
392 ++ * need to clear the context ID bits out of %g4 here.
393 ++ */
394 ++
395 + /* sun4v_itlb_miss branches here with the missing virtual
396 + * address already loaded into %g4
397 + */
398 +@@ -128,6 +132,10 @@ kvmap_dtlb:
399 + mov TLB_TAG_ACCESS, %g4
400 + ldxa [%g4] ASI_DMMU, %g4
401 +
402 ++ /* The kernel executes in context zero, therefore we do not
403 ++ * need to clear the context ID bits out of %g4 here.
404 ++ */
405 ++
406 + /* sun4v_dtlb_miss branches here with the missing virtual
407 + * address already loaded into %g4
408 + */
409 +@@ -251,6 +259,10 @@ kvmap_dtlb_longpath:
410 + nop
411 + .previous
412 +
413 ++ /* The kernel executes in context zero, therefore we do not
414 ++ * need to clear the context ID bits out of %g5 here.
415 ++ */
416 ++
417 + be,pt %xcc, sparc64_realfault_common
418 + mov FAULT_CODE_DTLB, %g4
419 + ba,pt %xcc, winfix_trampoline
420 +diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
421 +index be98685c14c6..d568c8207af7 100644
422 +--- a/arch/sparc/kernel/tsb.S
423 ++++ b/arch/sparc/kernel/tsb.S
424 +@@ -29,13 +29,17 @@
425 + */
426 + tsb_miss_dtlb:
427 + mov TLB_TAG_ACCESS, %g4
428 ++ ldxa [%g4] ASI_DMMU, %g4
429 ++ srlx %g4, PAGE_SHIFT, %g4
430 + ba,pt %xcc, tsb_miss_page_table_walk
431 +- ldxa [%g4] ASI_DMMU, %g4
432 ++ sllx %g4, PAGE_SHIFT, %g4
433 +
434 + tsb_miss_itlb:
435 + mov TLB_TAG_ACCESS, %g4
436 ++ ldxa [%g4] ASI_IMMU, %g4
437 ++ srlx %g4, PAGE_SHIFT, %g4
438 + ba,pt %xcc, tsb_miss_page_table_walk
439 +- ldxa [%g4] ASI_IMMU, %g4
440 ++ sllx %g4, PAGE_SHIFT, %g4
441 +
442 + /* At this point we have:
443 + * %g1 -- PAGE_SIZE TSB entry address
444 +@@ -284,6 +288,10 @@ tsb_do_dtlb_fault:
445 + nop
446 + .previous
447 +
448 ++ /* Clear context ID bits. */
449 ++ srlx %g5, PAGE_SHIFT, %g5
450 ++ sllx %g5, PAGE_SHIFT, %g5
451 ++
452 + be,pt %xcc, sparc64_realfault_common
453 + mov FAULT_CODE_DTLB, %g4
454 + ba,pt %xcc, winfix_trampoline
455 +diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
456 +index c7009d7762b1..a21917c8f44f 100644
457 +--- a/arch/sparc/mm/fault_64.c
458 ++++ b/arch/sparc/mm/fault_64.c
459 +@@ -478,14 +478,14 @@ good_area:
460 + up_read(&mm->mmap_sem);
461 +
462 + mm_rss = get_mm_rss(mm);
463 +-#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
464 +- mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE));
465 ++#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
466 ++ mm_rss -= (mm->context.thp_pte_count * (HPAGE_SIZE / PAGE_SIZE));
467 + #endif
468 + if (unlikely(mm_rss >
469 + mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
470 + tsb_grow(mm, MM_TSB_BASE, mm_rss);
471 + #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
472 +- mm_rss = mm->context.huge_pte_count;
473 ++ mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count;
474 + if (unlikely(mm_rss >
475 + mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) {
476 + if (mm->context.tsb_block[MM_TSB_HUGE].tsb)
477 +diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
478 +index d941cd024f22..387ae1e9b462 100644
479 +--- a/arch/sparc/mm/hugetlbpage.c
480 ++++ b/arch/sparc/mm/hugetlbpage.c
481 +@@ -184,7 +184,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
482 + int i;
483 +
484 + if (!pte_present(*ptep) && pte_present(entry))
485 +- mm->context.huge_pte_count++;
486 ++ mm->context.hugetlb_pte_count++;
487 +
488 + addr &= HPAGE_MASK;
489 + for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
490 +@@ -203,7 +203,7 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
491 +
492 + entry = *ptep;
493 + if (pte_present(entry))
494 +- mm->context.huge_pte_count--;
495 ++ mm->context.hugetlb_pte_count--;
496 +
497 + addr &= HPAGE_MASK;
498 +
499 +diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
500 +index 9633e0706d6e..4650a3840305 100644
501 +--- a/arch/sparc/mm/init_64.c
502 ++++ b/arch/sparc/mm/init_64.c
503 +@@ -353,7 +353,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
504 + spin_lock_irqsave(&mm->context.lock, flags);
505 +
506 + #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
507 +- if (mm->context.huge_pte_count && is_hugetlb_pte(pte))
508 ++ if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) &&
509 ++ is_hugetlb_pte(pte))
510 + __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
511 + address, pte_val(pte));
512 + else
513 +diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
514 +index c24d0aa2b615..56b820924b07 100644
515 +--- a/arch/sparc/mm/tlb.c
516 ++++ b/arch/sparc/mm/tlb.c
517 +@@ -166,9 +166,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
518 +
519 + if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
520 + if (pmd_val(pmd) & _PAGE_PMD_HUGE)
521 +- mm->context.huge_pte_count++;
522 ++ mm->context.thp_pte_count++;
523 + else
524 +- mm->context.huge_pte_count--;
525 ++ mm->context.thp_pte_count--;
526 +
527 + /* Do not try to allocate the TSB hash table if we
528 + * don't have one already. We have various locks held
529 +diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
530 +index 10a69f47745a..48a09e48d444 100644
531 +--- a/arch/sparc/mm/tsb.c
532 ++++ b/arch/sparc/mm/tsb.c
533 +@@ -26,6 +26,20 @@ static inline int tag_compare(unsigned long tag, unsigned long vaddr)
534 + return (tag == (vaddr >> 22));
535 + }
536 +
537 ++static void flush_tsb_kernel_range_scan(unsigned long start, unsigned long end)
538 ++{
539 ++ unsigned long idx;
540 ++
541 ++ for (idx = 0; idx < KERNEL_TSB_NENTRIES; idx++) {
542 ++ struct tsb *ent = &swapper_tsb[idx];
543 ++ unsigned long match = idx << 13;
544 ++
545 ++ match |= (ent->tag << 22);
546 ++ if (match >= start && match < end)
547 ++ ent->tag = (1UL << TSB_TAG_INVALID_BIT);
548 ++ }
549 ++}
550 ++
551 + /* TSB flushes need only occur on the processor initiating the address
552 + * space modification, not on each cpu the address space has run on.
553 + * Only the TLB flush needs that treatment.
554 +@@ -35,6 +49,9 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
555 + {
556 + unsigned long v;
557 +
558 ++ if ((end - start) >> PAGE_SHIFT >= 2 * KERNEL_TSB_NENTRIES)
559 ++ return flush_tsb_kernel_range_scan(start, end);
560 ++
561 + for (v = start; v < end; v += PAGE_SIZE) {
562 + unsigned long hash = tsb_hash(v, PAGE_SHIFT,
563 + KERNEL_TSB_NENTRIES);
564 +@@ -467,7 +484,7 @@ retry_tsb_alloc:
565 + int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
566 + {
567 + #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
568 +- unsigned long huge_pte_count;
569 ++ unsigned long total_huge_pte_count;
570 + #endif
571 + unsigned int i;
572 +
573 +@@ -476,12 +493,14 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
574 + mm->context.sparc64_ctx_val = 0UL;
575 +
576 + #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
577 +- /* We reset it to zero because the fork() page copying
578 ++ /* We reset them to zero because the fork() page copying
579 + * will re-increment the counters as the parent PTEs are
580 + * copied into the child address space.
581 + */
582 +- huge_pte_count = mm->context.huge_pte_count;
583 +- mm->context.huge_pte_count = 0;
584 ++ total_huge_pte_count = mm->context.hugetlb_pte_count +
585 ++ mm->context.thp_pte_count;
586 ++ mm->context.hugetlb_pte_count = 0;
587 ++ mm->context.thp_pte_count = 0;
588 + #endif
589 +
590 + /* copy_mm() copies over the parent's mm_struct before calling
591 +@@ -497,8 +516,8 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
592 + tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm));
593 +
594 + #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
595 +- if (unlikely(huge_pte_count))
596 +- tsb_grow(mm, MM_TSB_HUGE, huge_pte_count);
597 ++ if (unlikely(total_huge_pte_count))
598 ++ tsb_grow(mm, MM_TSB_HUGE, total_huge_pte_count);
599 + #endif
600 +
601 + if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
602 +diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
603 +index b4f4733abc6e..5d2fd6cd3189 100644
604 +--- a/arch/sparc/mm/ultra.S
605 ++++ b/arch/sparc/mm/ultra.S
606 +@@ -30,7 +30,7 @@
607 + .text
608 + .align 32
609 + .globl __flush_tlb_mm
610 +-__flush_tlb_mm: /* 18 insns */
611 ++__flush_tlb_mm: /* 19 insns */
612 + /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
613 + ldxa [%o1] ASI_DMMU, %g2
614 + cmp %g2, %o0
615 +@@ -81,7 +81,7 @@ __flush_tlb_page: /* 22 insns */
616 +
617 + .align 32
618 + .globl __flush_tlb_pending
619 +-__flush_tlb_pending: /* 26 insns */
620 ++__flush_tlb_pending: /* 27 insns */
621 + /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
622 + rdpr %pstate, %g7
623 + sllx %o1, 3, %o1
624 +@@ -113,12 +113,14 @@ __flush_tlb_pending: /* 26 insns */
625 +
626 + .align 32
627 + .globl __flush_tlb_kernel_range
628 +-__flush_tlb_kernel_range: /* 16 insns */
629 ++__flush_tlb_kernel_range: /* 31 insns */
630 + /* %o0=start, %o1=end */
631 + cmp %o0, %o1
632 + be,pn %xcc, 2f
633 ++ sub %o1, %o0, %o3
634 ++ srlx %o3, 18, %o4
635 ++ brnz,pn %o4, __spitfire_flush_tlb_kernel_range_slow
636 + sethi %hi(PAGE_SIZE), %o4
637 +- sub %o1, %o0, %o3
638 + sub %o3, %o4, %o3
639 + or %o0, 0x20, %o0 ! Nucleus
640 + 1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
641 +@@ -131,6 +133,41 @@ __flush_tlb_kernel_range: /* 16 insns */
642 + retl
643 + nop
644 + nop
645 ++ nop
646 ++ nop
647 ++ nop
648 ++ nop
649 ++ nop
650 ++ nop
651 ++ nop
652 ++ nop
653 ++ nop
654 ++ nop
655 ++ nop
656 ++ nop
657 ++ nop
658 ++
659 ++__spitfire_flush_tlb_kernel_range_slow:
660 ++ mov 63 * 8, %o4
661 ++1: ldxa [%o4] ASI_ITLB_DATA_ACCESS, %o3
662 ++ andcc %o3, 0x40, %g0 /* _PAGE_L_4U */
663 ++ bne,pn %xcc, 2f
664 ++ mov TLB_TAG_ACCESS, %o3
665 ++ stxa %g0, [%o3] ASI_IMMU
666 ++ stxa %g0, [%o4] ASI_ITLB_DATA_ACCESS
667 ++ membar #Sync
668 ++2: ldxa [%o4] ASI_DTLB_DATA_ACCESS, %o3
669 ++ andcc %o3, 0x40, %g0
670 ++ bne,pn %xcc, 2f
671 ++ mov TLB_TAG_ACCESS, %o3
672 ++ stxa %g0, [%o3] ASI_DMMU
673 ++ stxa %g0, [%o4] ASI_DTLB_DATA_ACCESS
674 ++ membar #Sync
675 ++2: sub %o4, 8, %o4
676 ++ brgez,pt %o4, 1b
677 ++ nop
678 ++ retl
679 ++ nop
680 +
681 + __spitfire_flush_tlb_mm_slow:
682 + rdpr %pstate, %g1
683 +@@ -285,6 +322,40 @@ __cheetah_flush_tlb_pending: /* 27 insns */
684 + retl
685 + wrpr %g7, 0x0, %pstate
686 +
687 ++__cheetah_flush_tlb_kernel_range: /* 31 insns */
688 ++ /* %o0=start, %o1=end */
689 ++ cmp %o0, %o1
690 ++ be,pn %xcc, 2f
691 ++ sub %o1, %o0, %o3
692 ++ srlx %o3, 18, %o4
693 ++ brnz,pn %o4, 3f
694 ++ sethi %hi(PAGE_SIZE), %o4
695 ++ sub %o3, %o4, %o3
696 ++ or %o0, 0x20, %o0 ! Nucleus
697 ++1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
698 ++ stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
699 ++ membar #Sync
700 ++ brnz,pt %o3, 1b
701 ++ sub %o3, %o4, %o3
702 ++2: sethi %hi(KERNBASE), %o3
703 ++ flush %o3
704 ++ retl
705 ++ nop
706 ++3: mov 0x80, %o4
707 ++ stxa %g0, [%o4] ASI_DMMU_DEMAP
708 ++ membar #Sync
709 ++ stxa %g0, [%o4] ASI_IMMU_DEMAP
710 ++ membar #Sync
711 ++ retl
712 ++ nop
713 ++ nop
714 ++ nop
715 ++ nop
716 ++ nop
717 ++ nop
718 ++ nop
719 ++ nop
720 ++
721 + #ifdef DCACHE_ALIASING_POSSIBLE
722 + __cheetah_flush_dcache_page: /* 11 insns */
723 + sethi %hi(PAGE_OFFSET), %g1
724 +@@ -309,19 +380,28 @@ __hypervisor_tlb_tl0_error:
725 + ret
726 + restore
727 +
728 +-__hypervisor_flush_tlb_mm: /* 10 insns */
729 ++__hypervisor_flush_tlb_mm: /* 19 insns */
730 + mov %o0, %o2 /* ARG2: mmu context */
731 + mov 0, %o0 /* ARG0: CPU lists unimplemented */
732 + mov 0, %o1 /* ARG1: CPU lists unimplemented */
733 + mov HV_MMU_ALL, %o3 /* ARG3: flags */
734 + mov HV_FAST_MMU_DEMAP_CTX, %o5
735 + ta HV_FAST_TRAP
736 +- brnz,pn %o0, __hypervisor_tlb_tl0_error
737 ++ brnz,pn %o0, 1f
738 + mov HV_FAST_MMU_DEMAP_CTX, %o1
739 + retl
740 + nop
741 ++1: sethi %hi(__hypervisor_tlb_tl0_error), %o5
742 ++ jmpl %o5 + %lo(__hypervisor_tlb_tl0_error), %g0
743 ++ nop
744 ++ nop
745 ++ nop
746 ++ nop
747 ++ nop
748 ++ nop
749 ++ nop
750 +
751 +-__hypervisor_flush_tlb_page: /* 11 insns */
752 ++__hypervisor_flush_tlb_page: /* 22 insns */
753 + /* %o0 = context, %o1 = vaddr */
754 + mov %o0, %g2
755 + mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */
756 +@@ -330,12 +410,23 @@ __hypervisor_flush_tlb_page: /* 11 insns */
757 + srlx %o0, PAGE_SHIFT, %o0
758 + sllx %o0, PAGE_SHIFT, %o0
759 + ta HV_MMU_UNMAP_ADDR_TRAP
760 +- brnz,pn %o0, __hypervisor_tlb_tl0_error
761 ++ brnz,pn %o0, 1f
762 + mov HV_MMU_UNMAP_ADDR_TRAP, %o1
763 + retl
764 + nop
765 ++1: sethi %hi(__hypervisor_tlb_tl0_error), %o2
766 ++ jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
767 ++ nop
768 ++ nop
769 ++ nop
770 ++ nop
771 ++ nop
772 ++ nop
773 ++ nop
774 ++ nop
775 ++ nop
776 +
777 +-__hypervisor_flush_tlb_pending: /* 16 insns */
778 ++__hypervisor_flush_tlb_pending: /* 27 insns */
779 + /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
780 + sllx %o1, 3, %g1
781 + mov %o2, %g2
782 +@@ -347,31 +438,57 @@ __hypervisor_flush_tlb_pending: /* 16 insns */
783 + srlx %o0, PAGE_SHIFT, %o0
784 + sllx %o0, PAGE_SHIFT, %o0
785 + ta HV_MMU_UNMAP_ADDR_TRAP
786 +- brnz,pn %o0, __hypervisor_tlb_tl0_error
787 ++ brnz,pn %o0, 1f
788 + mov HV_MMU_UNMAP_ADDR_TRAP, %o1
789 + brnz,pt %g1, 1b
790 + nop
791 + retl
792 + nop
793 ++1: sethi %hi(__hypervisor_tlb_tl0_error), %o2
794 ++ jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
795 ++ nop
796 ++ nop
797 ++ nop
798 ++ nop
799 ++ nop
800 ++ nop
801 ++ nop
802 ++ nop
803 ++ nop
804 +
805 +-__hypervisor_flush_tlb_kernel_range: /* 16 insns */
806 ++__hypervisor_flush_tlb_kernel_range: /* 31 insns */
807 + /* %o0=start, %o1=end */
808 + cmp %o0, %o1
809 + be,pn %xcc, 2f
810 +- sethi %hi(PAGE_SIZE), %g3
811 +- mov %o0, %g1
812 +- sub %o1, %g1, %g2
813 ++ sub %o1, %o0, %g2
814 ++ srlx %g2, 18, %g3
815 ++ brnz,pn %g3, 4f
816 ++ mov %o0, %g1
817 ++ sethi %hi(PAGE_SIZE), %g3
818 + sub %g2, %g3, %g2
819 + 1: add %g1, %g2, %o0 /* ARG0: virtual address */
820 + mov 0, %o1 /* ARG1: mmu context */
821 + mov HV_MMU_ALL, %o2 /* ARG2: flags */
822 + ta HV_MMU_UNMAP_ADDR_TRAP
823 +- brnz,pn %o0, __hypervisor_tlb_tl0_error
824 ++ brnz,pn %o0, 3f
825 + mov HV_MMU_UNMAP_ADDR_TRAP, %o1
826 + brnz,pt %g2, 1b
827 + sub %g2, %g3, %g2
828 + 2: retl
829 + nop
830 ++3: sethi %hi(__hypervisor_tlb_tl0_error), %o2
831 ++ jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
832 ++ nop
833 ++4: mov 0, %o0 /* ARG0: CPU lists unimplemented */
834 ++ mov 0, %o1 /* ARG1: CPU lists unimplemented */
835 ++ mov 0, %o2 /* ARG2: mmu context == nucleus */
836 ++ mov HV_MMU_ALL, %o3 /* ARG3: flags */
837 ++ mov HV_FAST_MMU_DEMAP_CTX, %o5
838 ++ ta HV_FAST_TRAP
839 ++ brnz,pn %o0, 3b
840 ++ mov HV_FAST_MMU_DEMAP_CTX, %o1
841 ++ retl
842 ++ nop
843 +
844 + #ifdef DCACHE_ALIASING_POSSIBLE
845 + /* XXX Niagara and friends have an 8K cache, so no aliasing is
846 +@@ -394,43 +511,6 @@ tlb_patch_one:
847 + retl
848 + nop
849 +
850 +- .globl cheetah_patch_cachetlbops
851 +-cheetah_patch_cachetlbops:
852 +- save %sp, -128, %sp
853 +-
854 +- sethi %hi(__flush_tlb_mm), %o0
855 +- or %o0, %lo(__flush_tlb_mm), %o0
856 +- sethi %hi(__cheetah_flush_tlb_mm), %o1
857 +- or %o1, %lo(__cheetah_flush_tlb_mm), %o1
858 +- call tlb_patch_one
859 +- mov 19, %o2
860 +-
861 +- sethi %hi(__flush_tlb_page), %o0
862 +- or %o0, %lo(__flush_tlb_page), %o0
863 +- sethi %hi(__cheetah_flush_tlb_page), %o1
864 +- or %o1, %lo(__cheetah_flush_tlb_page), %o1
865 +- call tlb_patch_one
866 +- mov 22, %o2
867 +-
868 +- sethi %hi(__flush_tlb_pending), %o0
869 +- or %o0, %lo(__flush_tlb_pending), %o0
870 +- sethi %hi(__cheetah_flush_tlb_pending), %o1
871 +- or %o1, %lo(__cheetah_flush_tlb_pending), %o1
872 +- call tlb_patch_one
873 +- mov 27, %o2
874 +-
875 +-#ifdef DCACHE_ALIASING_POSSIBLE
876 +- sethi %hi(__flush_dcache_page), %o0
877 +- or %o0, %lo(__flush_dcache_page), %o0
878 +- sethi %hi(__cheetah_flush_dcache_page), %o1
879 +- or %o1, %lo(__cheetah_flush_dcache_page), %o1
880 +- call tlb_patch_one
881 +- mov 11, %o2
882 +-#endif /* DCACHE_ALIASING_POSSIBLE */
883 +-
884 +- ret
885 +- restore
886 +-
887 + #ifdef CONFIG_SMP
888 + /* These are all called by the slaves of a cross call, at
889 + * trap level 1, with interrupts fully disabled.
890 +@@ -447,7 +527,7 @@ cheetah_patch_cachetlbops:
891 + */
892 + .align 32
893 + .globl xcall_flush_tlb_mm
894 +-xcall_flush_tlb_mm: /* 21 insns */
895 ++xcall_flush_tlb_mm: /* 24 insns */
896 + mov PRIMARY_CONTEXT, %g2
897 + ldxa [%g2] ASI_DMMU, %g3
898 + srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4
899 +@@ -469,9 +549,12 @@ xcall_flush_tlb_mm: /* 21 insns */
900 + nop
901 + nop
902 + nop
903 ++ nop
904 ++ nop
905 ++ nop
906 +
907 + .globl xcall_flush_tlb_page
908 +-xcall_flush_tlb_page: /* 17 insns */
909 ++xcall_flush_tlb_page: /* 20 insns */
910 + /* %g5=context, %g1=vaddr */
911 + mov PRIMARY_CONTEXT, %g4
912 + ldxa [%g4] ASI_DMMU, %g2
913 +@@ -490,15 +573,20 @@ xcall_flush_tlb_page: /* 17 insns */
914 + retry
915 + nop
916 + nop
917 ++ nop
918 ++ nop
919 ++ nop
920 +
921 + .globl xcall_flush_tlb_kernel_range
922 +-xcall_flush_tlb_kernel_range: /* 25 insns */
923 ++xcall_flush_tlb_kernel_range: /* 44 insns */
924 + sethi %hi(PAGE_SIZE - 1), %g2
925 + or %g2, %lo(PAGE_SIZE - 1), %g2
926 + andn %g1, %g2, %g1
927 + andn %g7, %g2, %g7
928 + sub %g7, %g1, %g3
929 +- add %g2, 1, %g2
930 ++ srlx %g3, 18, %g2
931 ++ brnz,pn %g2, 2f
932 ++ add %g2, 1, %g2
933 + sub %g3, %g2, %g3
934 + or %g1, 0x20, %g1 ! Nucleus
935 + 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
936 +@@ -507,8 +595,25 @@ xcall_flush_tlb_kernel_range: /* 25 insns */
937 + brnz,pt %g3, 1b
938 + sub %g3, %g2, %g3
939 + retry
940 +- nop
941 +- nop
942 ++2: mov 63 * 8, %g1
943 ++1: ldxa [%g1] ASI_ITLB_DATA_ACCESS, %g2
944 ++ andcc %g2, 0x40, %g0 /* _PAGE_L_4U */
945 ++ bne,pn %xcc, 2f
946 ++ mov TLB_TAG_ACCESS, %g2
947 ++ stxa %g0, [%g2] ASI_IMMU
948 ++ stxa %g0, [%g1] ASI_ITLB_DATA_ACCESS
949 ++ membar #Sync
950 ++2: ldxa [%g1] ASI_DTLB_DATA_ACCESS, %g2
951 ++ andcc %g2, 0x40, %g0
952 ++ bne,pn %xcc, 2f
953 ++ mov TLB_TAG_ACCESS, %g2
954 ++ stxa %g0, [%g2] ASI_DMMU
955 ++ stxa %g0, [%g1] ASI_DTLB_DATA_ACCESS
956 ++ membar #Sync
957 ++2: sub %g1, 8, %g1
958 ++ brgez,pt %g1, 1b
959 ++ nop
960 ++ retry
961 + nop
962 + nop
963 + nop
964 +@@ -637,6 +742,52 @@ xcall_fetch_glob_pmu_n4:
965 +
966 + retry
967 +
968 ++__cheetah_xcall_flush_tlb_kernel_range: /* 44 insns */
969 ++ sethi %hi(PAGE_SIZE - 1), %g2
970 ++ or %g2, %lo(PAGE_SIZE - 1), %g2
971 ++ andn %g1, %g2, %g1
972 ++ andn %g7, %g2, %g7
973 ++ sub %g7, %g1, %g3
974 ++ srlx %g3, 18, %g2
975 ++ brnz,pn %g2, 2f
976 ++ add %g2, 1, %g2
977 ++ sub %g3, %g2, %g3
978 ++ or %g1, 0x20, %g1 ! Nucleus
979 ++1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
980 ++ stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
981 ++ membar #Sync
982 ++ brnz,pt %g3, 1b
983 ++ sub %g3, %g2, %g3
984 ++ retry
985 ++2: mov 0x80, %g2
986 ++ stxa %g0, [%g2] ASI_DMMU_DEMAP
987 ++ membar #Sync
988 ++ stxa %g0, [%g2] ASI_IMMU_DEMAP
989 ++ membar #Sync
990 ++ retry
991 ++ nop
992 ++ nop
993 ++ nop
994 ++ nop
995 ++ nop
996 ++ nop
997 ++ nop
998 ++ nop
999 ++ nop
1000 ++ nop
1001 ++ nop
1002 ++ nop
1003 ++ nop
1004 ++ nop
1005 ++ nop
1006 ++ nop
1007 ++ nop
1008 ++ nop
1009 ++ nop
1010 ++ nop
1011 ++ nop
1012 ++ nop
1013 ++
1014 + #ifdef DCACHE_ALIASING_POSSIBLE
1015 + .align 32
1016 + .globl xcall_flush_dcache_page_cheetah
1017 +@@ -700,7 +851,7 @@ __hypervisor_tlb_xcall_error:
1018 + ba,a,pt %xcc, rtrap
1019 +
1020 + .globl __hypervisor_xcall_flush_tlb_mm
1021 +-__hypervisor_xcall_flush_tlb_mm: /* 21 insns */
1022 ++__hypervisor_xcall_flush_tlb_mm: /* 24 insns */
1023 + /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
1024 + mov %o0, %g2
1025 + mov %o1, %g3
1026 +@@ -714,7 +865,7 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
1027 + mov HV_FAST_MMU_DEMAP_CTX, %o5
1028 + ta HV_FAST_TRAP
1029 + mov HV_FAST_MMU_DEMAP_CTX, %g6
1030 +- brnz,pn %o0, __hypervisor_tlb_xcall_error
1031 ++ brnz,pn %o0, 1f
1032 + mov %o0, %g5
1033 + mov %g2, %o0
1034 + mov %g3, %o1
1035 +@@ -723,9 +874,12 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
1036 + mov %g7, %o5
1037 + membar #Sync
1038 + retry
1039 ++1: sethi %hi(__hypervisor_tlb_xcall_error), %g4
1040 ++ jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
1041 ++ nop
1042 +
1043 + .globl __hypervisor_xcall_flush_tlb_page
1044 +-__hypervisor_xcall_flush_tlb_page: /* 17 insns */
1045 ++__hypervisor_xcall_flush_tlb_page: /* 20 insns */
1046 + /* %g5=ctx, %g1=vaddr */
1047 + mov %o0, %g2
1048 + mov %o1, %g3
1049 +@@ -737,42 +891,64 @@ __hypervisor_xcall_flush_tlb_page: /* 17 insns */
1050 + sllx %o0, PAGE_SHIFT, %o0
1051 + ta HV_MMU_UNMAP_ADDR_TRAP
1052 + mov HV_MMU_UNMAP_ADDR_TRAP, %g6
1053 +- brnz,a,pn %o0, __hypervisor_tlb_xcall_error
1054 ++ brnz,a,pn %o0, 1f
1055 + mov %o0, %g5
1056 + mov %g2, %o0
1057 + mov %g3, %o1
1058 + mov %g4, %o2
1059 + membar #Sync
1060 + retry
1061 ++1: sethi %hi(__hypervisor_tlb_xcall_error), %g4
1062 ++ jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
1063 ++ nop
1064 +
1065 + .globl __hypervisor_xcall_flush_tlb_kernel_range
1066 +-__hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */
1067 ++__hypervisor_xcall_flush_tlb_kernel_range: /* 44 insns */
1068 + /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
1069 + sethi %hi(PAGE_SIZE - 1), %g2
1070 + or %g2, %lo(PAGE_SIZE - 1), %g2
1071 + andn %g1, %g2, %g1
1072 + andn %g7, %g2, %g7
1073 + sub %g7, %g1, %g3
1074 ++ srlx %g3, 18, %g7
1075 + add %g2, 1, %g2
1076 + sub %g3, %g2, %g3
1077 + mov %o0, %g2
1078 + mov %o1, %g4
1079 +- mov %o2, %g7
1080 ++ brnz,pn %g7, 2f
1081 ++ mov %o2, %g7
1082 + 1: add %g1, %g3, %o0 /* ARG0: virtual address */
1083 + mov 0, %o1 /* ARG1: mmu context */
1084 + mov HV_MMU_ALL, %o2 /* ARG2: flags */
1085 + ta HV_MMU_UNMAP_ADDR_TRAP
1086 + mov HV_MMU_UNMAP_ADDR_TRAP, %g6
1087 +- brnz,pn %o0, __hypervisor_tlb_xcall_error
1088 ++ brnz,pn %o0, 1f
1089 + mov %o0, %g5
1090 + sethi %hi(PAGE_SIZE), %o2
1091 + brnz,pt %g3, 1b
1092 + sub %g3, %o2, %g3
1093 +- mov %g2, %o0
1094 ++5: mov %g2, %o0
1095 + mov %g4, %o1
1096 + mov %g7, %o2
1097 + membar #Sync
1098 + retry
1099 ++1: sethi %hi(__hypervisor_tlb_xcall_error), %g4
1100 ++ jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
1101 ++ nop
1102 ++2: mov %o3, %g1
1103 ++ mov %o5, %g3
1104 ++ mov 0, %o0 /* ARG0: CPU lists unimplemented */
1105 ++ mov 0, %o1 /* ARG1: CPU lists unimplemented */
1106 ++ mov 0, %o2 /* ARG2: mmu context == nucleus */
1107 ++ mov HV_MMU_ALL, %o3 /* ARG3: flags */
1108 ++ mov HV_FAST_MMU_DEMAP_CTX, %o5
1109 ++ ta HV_FAST_TRAP
1110 ++ mov %g1, %o3
1111 ++ brz,pt %o0, 5b
1112 ++ mov %g3, %o5
1113 ++ mov HV_FAST_MMU_DEMAP_CTX, %g6
1114 ++ ba,pt %xcc, 1b
1115 ++ clr %g5
1116 +
1117 + /* These just get rescheduled to PIL vectors. */
1118 + .globl xcall_call_function
1119 +@@ -809,6 +985,58 @@ xcall_kgdb_capture:
1120 +
1121 + #endif /* CONFIG_SMP */
1122 +
1123 ++ .globl cheetah_patch_cachetlbops
1124 ++cheetah_patch_cachetlbops:
1125 ++ save %sp, -128, %sp
1126 ++
1127 ++ sethi %hi(__flush_tlb_mm), %o0
1128 ++ or %o0, %lo(__flush_tlb_mm), %o0
1129 ++ sethi %hi(__cheetah_flush_tlb_mm), %o1
1130 ++ or %o1, %lo(__cheetah_flush_tlb_mm), %o1
1131 ++ call tlb_patch_one
1132 ++ mov 19, %o2
1133 ++
1134 ++ sethi %hi(__flush_tlb_page), %o0
1135 ++ or %o0, %lo(__flush_tlb_page), %o0
1136 ++ sethi %hi(__cheetah_flush_tlb_page), %o1
1137 ++ or %o1, %lo(__cheetah_flush_tlb_page), %o1
1138 ++ call tlb_patch_one
1139 ++ mov 22, %o2
1140 ++
1141 ++ sethi %hi(__flush_tlb_pending), %o0
1142 ++ or %o0, %lo(__flush_tlb_pending), %o0
1143 ++ sethi %hi(__cheetah_flush_tlb_pending), %o1
1144 ++ or %o1, %lo(__cheetah_flush_tlb_pending), %o1
1145 ++ call tlb_patch_one
1146 ++ mov 27, %o2
1147 ++
1148 ++ sethi %hi(__flush_tlb_kernel_range), %o0
1149 ++ or %o0, %lo(__flush_tlb_kernel_range), %o0
1150 ++ sethi %hi(__cheetah_flush_tlb_kernel_range), %o1
1151 ++ or %o1, %lo(__cheetah_flush_tlb_kernel_range), %o1
1152 ++ call tlb_patch_one
1153 ++ mov 31, %o2
1154 ++
1155 ++#ifdef DCACHE_ALIASING_POSSIBLE
1156 ++ sethi %hi(__flush_dcache_page), %o0
1157 ++ or %o0, %lo(__flush_dcache_page), %o0
1158 ++ sethi %hi(__cheetah_flush_dcache_page), %o1
1159 ++ or %o1, %lo(__cheetah_flush_dcache_page), %o1
1160 ++ call tlb_patch_one
1161 ++ mov 11, %o2
1162 ++#endif /* DCACHE_ALIASING_POSSIBLE */
1163 ++
1164 ++#ifdef CONFIG_SMP
1165 ++ sethi %hi(xcall_flush_tlb_kernel_range), %o0
1166 ++ or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
1167 ++ sethi %hi(__cheetah_xcall_flush_tlb_kernel_range), %o1
1168 ++ or %o1, %lo(__cheetah_xcall_flush_tlb_kernel_range), %o1
1169 ++ call tlb_patch_one
1170 ++ mov 44, %o2
1171 ++#endif /* CONFIG_SMP */
1172 ++
1173 ++ ret
1174 ++ restore
1175 +
1176 + .globl hypervisor_patch_cachetlbops
1177 + hypervisor_patch_cachetlbops:
1178 +@@ -819,28 +1047,28 @@ hypervisor_patch_cachetlbops:
1179 + sethi %hi(__hypervisor_flush_tlb_mm), %o1
1180 + or %o1, %lo(__hypervisor_flush_tlb_mm), %o1
1181 + call tlb_patch_one
1182 +- mov 10, %o2
1183 ++ mov 19, %o2
1184 +
1185 + sethi %hi(__flush_tlb_page), %o0
1186 + or %o0, %lo(__flush_tlb_page), %o0
1187 + sethi %hi(__hypervisor_flush_tlb_page), %o1
1188 + or %o1, %lo(__hypervisor_flush_tlb_page), %o1
1189 + call tlb_patch_one
1190 +- mov 11, %o2
1191 ++ mov 22, %o2
1192 +
1193 + sethi %hi(__flush_tlb_pending), %o0
1194 + or %o0, %lo(__flush_tlb_pending), %o0
1195 + sethi %hi(__hypervisor_flush_tlb_pending), %o1
1196 + or %o1, %lo(__hypervisor_flush_tlb_pending), %o1
1197 + call tlb_patch_one
1198 +- mov 16, %o2
1199 ++ mov 27, %o2
1200 +
1201 + sethi %hi(__flush_tlb_kernel_range), %o0
1202 + or %o0, %lo(__flush_tlb_kernel_range), %o0
1203 + sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1
1204 + or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
1205 + call tlb_patch_one
1206 +- mov 16, %o2
1207 ++ mov 31, %o2
1208 +
1209 + #ifdef DCACHE_ALIASING_POSSIBLE
1210 + sethi %hi(__flush_dcache_page), %o0
1211 +@@ -857,21 +1085,21 @@ hypervisor_patch_cachetlbops:
1212 + sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1
1213 + or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
1214 + call tlb_patch_one
1215 +- mov 21, %o2
1216 ++ mov 24, %o2
1217 +
1218 + sethi %hi(xcall_flush_tlb_page), %o0
1219 + or %o0, %lo(xcall_flush_tlb_page), %o0
1220 + sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1
1221 + or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
1222 + call tlb_patch_one
1223 +- mov 17, %o2
1224 ++ mov 20, %o2
1225 +
1226 + sethi %hi(xcall_flush_tlb_kernel_range), %o0
1227 + or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
1228 + sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
1229 + or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
1230 + call tlb_patch_one
1231 +- mov 25, %o2
1232 ++ mov 44, %o2
1233 + #endif /* CONFIG_SMP */
1234 +
1235 + ret
1236 +diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
1237 +index 68c05398bba9..7aadd3cea843 100644
1238 +--- a/arch/x86/include/asm/hugetlb.h
1239 ++++ b/arch/x86/include/asm/hugetlb.h
1240 +@@ -4,6 +4,7 @@
1241 + #include <asm/page.h>
1242 + #include <asm-generic/hugetlb.h>
1243 +
1244 ++#define hugepages_supported() cpu_has_pse
1245 +
1246 + static inline int is_hugepage_only_range(struct mm_struct *mm,
1247 + unsigned long addr,
1248 +diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
1249 +index 5838fa911aa0..d4d6eb8c08a8 100644
1250 +--- a/arch/x86/include/asm/uaccess.h
1251 ++++ b/arch/x86/include/asm/uaccess.h
1252 +@@ -384,7 +384,7 @@ do { \
1253 + asm volatile("1: mov"itype" %1,%"rtype"0\n" \
1254 + "2:\n" \
1255 + _ASM_EXTABLE_EX(1b, 2b) \
1256 +- : ltype(x) : "m" (__m(addr)))
1257 ++ : ltype(x) : "m" (__m(addr)), "0" (0))
1258 +
1259 + #define __put_user_nocheck(x, ptr, size) \
1260 + ({ \
1261 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1262 +index 06b37a671b12..8562aff68884 100644
1263 +--- a/arch/x86/kvm/x86.c
1264 ++++ b/arch/x86/kvm/x86.c
1265 +@@ -178,7 +178,18 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
1266 + struct kvm_shared_msrs *locals
1267 + = container_of(urn, struct kvm_shared_msrs, urn);
1268 + struct kvm_shared_msr_values *values;
1269 ++ unsigned long flags;
1270 +
1271 ++ /*
1272 ++ * Disabling irqs at this point since the following code could be
1273 ++ * interrupted and executed through kvm_arch_hardware_disable()
1274 ++ */
1275 ++ local_irq_save(flags);
1276 ++ if (locals->registered) {
1277 ++ locals->registered = false;
1278 ++ user_return_notifier_unregister(urn);
1279 ++ }
1280 ++ local_irq_restore(flags);
1281 + for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
1282 + values = &locals->values[slot];
1283 + if (values->host != values->curr) {
1284 +@@ -186,8 +197,6 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
1285 + values->curr = values->host;
1286 + }
1287 + }
1288 +- locals->registered = false;
1289 +- user_return_notifier_unregister(urn);
1290 + }
1291 +
1292 + static void shared_msr_update(unsigned slot, u32 msr)
1293 +@@ -3225,6 +3234,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
1294 + };
1295 + case KVM_SET_VAPIC_ADDR: {
1296 + struct kvm_vapic_addr va;
1297 ++ int idx;
1298 +
1299 + r = -EINVAL;
1300 + if (!irqchip_in_kernel(vcpu->kvm))
1301 +@@ -3232,7 +3242,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
1302 + r = -EFAULT;
1303 + if (copy_from_user(&va, argp, sizeof va))
1304 + goto out;
1305 ++ idx = srcu_read_lock(&vcpu->kvm->srcu);
1306 + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
1307 ++ srcu_read_unlock(&vcpu->kvm->srcu, idx);
1308 + break;
1309 + }
1310 + case KVM_X86_SETUP_MCE: {
1311 +@@ -6662,11 +6674,13 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
1312 +
1313 + void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
1314 + {
1315 ++ void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
1316 ++
1317 + kvmclock_reset(vcpu);
1318 +
1319 +- free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
1320 + fx_free(vcpu);
1321 + kvm_x86_ops->vcpu_free(vcpu);
1322 ++ free_cpumask_var(wbinvd_dirty_mask);
1323 + }
1324 +
1325 + struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1326 +diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
1327 +index fdc3ba28ca38..53b061c9ad7e 100644
1328 +--- a/arch/x86/xen/mmu.c
1329 ++++ b/arch/x86/xen/mmu.c
1330 +@@ -1187,7 +1187,7 @@ static void __init xen_cleanhighmap(unsigned long vaddr,
1331 +
1332 + /* NOTE: The loop is more greedy than the cleanup_highmap variant.
1333 + * We include the PMD passed in on _both_ boundaries. */
1334 +- for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE));
1335 ++ for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
1336 + pmd++, vaddr += PMD_SIZE) {
1337 + if (pmd_none(*pmd))
1338 + continue;
1339 +diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
1340 +index 8ec37bbdd699..74529dc575a2 100644
1341 +--- a/drivers/acpi/apei/ghes.c
1342 ++++ b/drivers/acpi/apei/ghes.c
1343 +@@ -677,7 +677,7 @@ static int ghes_proc(struct ghes *ghes)
1344 + ghes_do_proc(ghes, ghes->estatus);
1345 + out:
1346 + ghes_clear_estatus(ghes);
1347 +- return 0;
1348 ++ return rc;
1349 + }
1350 +
1351 + static void ghes_add_timer(struct ghes *ghes)
1352 +diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
1353 +index 55635edf563b..342cb53db293 100644
1354 +--- a/drivers/block/drbd/drbd_main.c
1355 ++++ b/drivers/block/drbd/drbd_main.c
1356 +@@ -1771,7 +1771,7 @@ int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
1357 + * do we need to block DRBD_SIG if sock == &meta.socket ??
1358 + * otherwise wake_asender() might interrupt some send_*Ack !
1359 + */
1360 +- rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
1361 ++ rv = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
1362 + if (rv == -EAGAIN) {
1363 + if (we_should_drop_the_connection(tconn, sock))
1364 + break;
1365 +diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
1366 +index f6b96ba57b32..15a3ec940723 100644
1367 +--- a/drivers/char/virtio_console.c
1368 ++++ b/drivers/char/virtio_console.c
1369 +@@ -1533,19 +1533,29 @@ static void remove_port_data(struct port *port)
1370 + spin_lock_irq(&port->inbuf_lock);
1371 + /* Remove unused data this port might have received. */
1372 + discard_port_data(port);
1373 ++ spin_unlock_irq(&port->inbuf_lock);
1374 +
1375 + /* Remove buffers we queued up for the Host to send us data in. */
1376 +- while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
1377 +- free_buf(buf, true);
1378 +- spin_unlock_irq(&port->inbuf_lock);
1379 ++ do {
1380 ++ spin_lock_irq(&port->inbuf_lock);
1381 ++ buf = virtqueue_detach_unused_buf(port->in_vq);
1382 ++ spin_unlock_irq(&port->inbuf_lock);
1383 ++ if (buf)
1384 ++ free_buf(buf, true);
1385 ++ } while (buf);
1386 +
1387 + spin_lock_irq(&port->outvq_lock);
1388 + reclaim_consumed_buffers(port);
1389 ++ spin_unlock_irq(&port->outvq_lock);
1390 +
1391 + /* Free pending buffers from the out-queue. */
1392 +- while ((buf = virtqueue_detach_unused_buf(port->out_vq)))
1393 +- free_buf(buf, true);
1394 +- spin_unlock_irq(&port->outvq_lock);
1395 ++ do {
1396 ++ spin_lock_irq(&port->outvq_lock);
1397 ++ buf = virtqueue_detach_unused_buf(port->out_vq);
1398 ++ spin_unlock_irq(&port->outvq_lock);
1399 ++ if (buf)
1400 ++ free_buf(buf, true);
1401 ++ } while (buf);
1402 + }
1403 +
1404 + /*
1405 +diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
1406 +index 4af0a7bad7f2..2a260443061d 100644
1407 +--- a/drivers/firewire/net.c
1408 ++++ b/drivers/firewire/net.c
1409 +@@ -73,13 +73,13 @@ struct rfc2734_header {
1410 +
1411 + #define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30)
1412 + #define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff))
1413 +-#define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16)
1414 ++#define fwnet_get_hdr_dg_size(h) ((((h)->w0 & 0x0fff0000) >> 16) + 1)
1415 + #define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff))
1416 + #define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16)
1417 +
1418 +-#define fwnet_set_hdr_lf(lf) ((lf) << 30)
1419 ++#define fwnet_set_hdr_lf(lf) ((lf) << 30)
1420 + #define fwnet_set_hdr_ether_type(et) (et)
1421 +-#define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16)
1422 ++#define fwnet_set_hdr_dg_size(dgs) (((dgs) - 1) << 16)
1423 + #define fwnet_set_hdr_fg_off(fgo) (fgo)
1424 +
1425 + #define fwnet_set_hdr_dgl(dgl) ((dgl) << 16)
1426 +@@ -591,6 +591,9 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
1427 + int retval;
1428 + u16 ether_type;
1429 +
1430 ++ if (len <= RFC2374_UNFRAG_HDR_SIZE)
1431 ++ return 0;
1432 ++
1433 + hdr.w0 = be32_to_cpu(buf[0]);
1434 + lf = fwnet_get_hdr_lf(&hdr);
1435 + if (lf == RFC2374_HDR_UNFRAG) {
1436 +@@ -615,7 +618,12 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
1437 + return fwnet_finish_incoming_packet(net, skb, source_node_id,
1438 + is_broadcast, ether_type);
1439 + }
1440 ++
1441 + /* A datagram fragment has been received, now the fun begins. */
1442 ++
1443 ++ if (len <= RFC2374_FRAG_HDR_SIZE)
1444 ++ return 0;
1445 ++
1446 + hdr.w1 = ntohl(buf[1]);
1447 + buf += 2;
1448 + len -= RFC2374_FRAG_HDR_SIZE;
1449 +@@ -627,7 +635,10 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
1450 + fg_off = fwnet_get_hdr_fg_off(&hdr);
1451 + }
1452 + datagram_label = fwnet_get_hdr_dgl(&hdr);
1453 +- dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */
1454 ++ dg_size = fwnet_get_hdr_dg_size(&hdr);
1455 ++
1456 ++ if (fg_off + len > dg_size)
1457 ++ return 0;
1458 +
1459 + spin_lock_irqsave(&dev->lock, flags);
1460 +
1461 +@@ -735,6 +746,22 @@ static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
1462 + fw_send_response(card, r, rcode);
1463 + }
1464 +
1465 ++static int gasp_source_id(__be32 *p)
1466 ++{
1467 ++ return be32_to_cpu(p[0]) >> 16;
1468 ++}
1469 ++
1470 ++static u32 gasp_specifier_id(__be32 *p)
1471 ++{
1472 ++ return (be32_to_cpu(p[0]) & 0xffff) << 8 |
1473 ++ (be32_to_cpu(p[1]) & 0xff000000) >> 24;
1474 ++}
1475 ++
1476 ++static u32 gasp_version(__be32 *p)
1477 ++{
1478 ++ return be32_to_cpu(p[1]) & 0xffffff;
1479 ++}
1480 ++
1481 + static void fwnet_receive_broadcast(struct fw_iso_context *context,
1482 + u32 cycle, size_t header_length, void *header, void *data)
1483 + {
1484 +@@ -744,9 +771,6 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
1485 + __be32 *buf_ptr;
1486 + int retval;
1487 + u32 length;
1488 +- u16 source_node_id;
1489 +- u32 specifier_id;
1490 +- u32 ver;
1491 + unsigned long offset;
1492 + unsigned long flags;
1493 +
1494 +@@ -763,22 +787,17 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
1495 +
1496 + spin_unlock_irqrestore(&dev->lock, flags);
1497 +
1498 +- specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8
1499 +- | (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24;
1500 +- ver = be32_to_cpu(buf_ptr[1]) & 0xffffff;
1501 +- source_node_id = be32_to_cpu(buf_ptr[0]) >> 16;
1502 +-
1503 +- if (specifier_id == IANA_SPECIFIER_ID &&
1504 +- (ver == RFC2734_SW_VERSION
1505 ++ if (length > IEEE1394_GASP_HDR_SIZE &&
1506 ++ gasp_specifier_id(buf_ptr) == IANA_SPECIFIER_ID &&
1507 ++ (gasp_version(buf_ptr) == RFC2734_SW_VERSION
1508 + #if IS_ENABLED(CONFIG_IPV6)
1509 +- || ver == RFC3146_SW_VERSION
1510 ++ || gasp_version(buf_ptr) == RFC3146_SW_VERSION
1511 + #endif
1512 +- )) {
1513 +- buf_ptr += 2;
1514 +- length -= IEEE1394_GASP_HDR_SIZE;
1515 +- fwnet_incoming_packet(dev, buf_ptr, length, source_node_id,
1516 ++ ))
1517 ++ fwnet_incoming_packet(dev, buf_ptr + 2,
1518 ++ length - IEEE1394_GASP_HDR_SIZE,
1519 ++ gasp_source_id(buf_ptr),
1520 + context->card->generation, true);
1521 +- }
1522 +
1523 + packet.payload_length = dev->rcv_buffer_size;
1524 + packet.interrupt = 1;
1525 +diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
1526 +index 1bef6dc77478..6d521497e3b4 100644
1527 +--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
1528 ++++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
1529 +@@ -204,7 +204,7 @@ int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
1530 + return 0;
1531 +
1532 + err:
1533 +- list_for_each_entry_reverse(subdrv, &subdrv->list, list) {
1534 ++ list_for_each_entry_continue_reverse(subdrv, &exynos_drm_subdrv_list, list) {
1535 + if (subdrv->close)
1536 + subdrv->close(dev, subdrv->dev, file);
1537 + }
1538 +diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
1539 +index 7dcf2ffddccf..a10125442041 100644
1540 +--- a/drivers/gpu/drm/radeon/ni.c
1541 ++++ b/drivers/gpu/drm/radeon/ni.c
1542 +@@ -1322,9 +1322,7 @@ static void cayman_pcie_gart_fini(struct radeon_device *rdev)
1543 + void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
1544 + int ring, u32 cp_int_cntl)
1545 + {
1546 +- u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
1547 +-
1548 +- WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
1549 ++ WREG32(SRBM_GFX_CNTL, RINGID(ring));
1550 + WREG32(CP_INT_CNTL, cp_int_cntl);
1551 + }
1552 +
1553 +diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
1554 +index c1281fc39040..3265792f1990 100644
1555 +--- a/drivers/gpu/drm/radeon/si_dpm.c
1556 ++++ b/drivers/gpu/drm/radeon/si_dpm.c
1557 +@@ -2934,6 +2934,49 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
1558 + int i;
1559 + struct si_dpm_quirk *p = si_dpm_quirk_list;
1560 +
1561 ++ /* limit all SI kickers */
1562 ++ if (rdev->family == CHIP_PITCAIRN) {
1563 ++ if ((rdev->pdev->revision == 0x81) ||
1564 ++ (rdev->pdev->device == 0x6810) ||
1565 ++ (rdev->pdev->device == 0x6811) ||
1566 ++ (rdev->pdev->device == 0x6816) ||
1567 ++ (rdev->pdev->device == 0x6817) ||
1568 ++ (rdev->pdev->device == 0x6806))
1569 ++ max_mclk = 120000;
1570 ++ } else if (rdev->family == CHIP_VERDE) {
1571 ++ if ((rdev->pdev->revision == 0x81) ||
1572 ++ (rdev->pdev->revision == 0x83) ||
1573 ++ (rdev->pdev->revision == 0x87) ||
1574 ++ (rdev->pdev->device == 0x6820) ||
1575 ++ (rdev->pdev->device == 0x6821) ||
1576 ++ (rdev->pdev->device == 0x6822) ||
1577 ++ (rdev->pdev->device == 0x6823) ||
1578 ++ (rdev->pdev->device == 0x682A) ||
1579 ++ (rdev->pdev->device == 0x682B)) {
1580 ++ max_sclk = 75000;
1581 ++ max_mclk = 80000;
1582 ++ }
1583 ++ } else if (rdev->family == CHIP_OLAND) {
1584 ++ if ((rdev->pdev->revision == 0xC7) ||
1585 ++ (rdev->pdev->revision == 0x80) ||
1586 ++ (rdev->pdev->revision == 0x81) ||
1587 ++ (rdev->pdev->revision == 0x83) ||
1588 ++ (rdev->pdev->device == 0x6604) ||
1589 ++ (rdev->pdev->device == 0x6605)) {
1590 ++ max_sclk = 75000;
1591 ++ max_mclk = 80000;
1592 ++ }
1593 ++ } else if (rdev->family == CHIP_HAINAN) {
1594 ++ if ((rdev->pdev->revision == 0x81) ||
1595 ++ (rdev->pdev->revision == 0x83) ||
1596 ++ (rdev->pdev->revision == 0xC3) ||
1597 ++ (rdev->pdev->device == 0x6664) ||
1598 ++ (rdev->pdev->device == 0x6665) ||
1599 ++ (rdev->pdev->device == 0x6667)) {
1600 ++ max_sclk = 75000;
1601 ++ max_mclk = 80000;
1602 ++ }
1603 ++ }
1604 + /* Apply dpm quirks */
1605 + while (p && p->chip_device != 0) {
1606 + if (rdev->pdev->vendor == p->chip_vendor &&
1607 +@@ -3008,16 +3051,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
1608 + ps->performance_levels[i].sclk = max_sclk;
1609 + }
1610 + }
1611 +- /* limit mclk on all R7 370 parts for stability */
1612 +- if (rdev->pdev->device == 0x6811 &&
1613 +- rdev->pdev->revision == 0x81)
1614 +- max_mclk = 120000;
1615 +- /* limit sclk/mclk on Jet parts for stability */
1616 +- if (rdev->pdev->device == 0x6665 &&
1617 +- rdev->pdev->revision == 0xc3) {
1618 +- max_sclk = 75000;
1619 +- max_mclk = 80000;
1620 +- }
1621 +
1622 + /* XXX validate the min clocks required for display */
1623 +
1624 +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
1625 +index d7d54e7449fa..d183ff679fe5 100644
1626 +--- a/drivers/hid/hid-core.c
1627 ++++ b/drivers/hid/hid-core.c
1628 +@@ -707,6 +707,7 @@ static void hid_scan_collection(struct hid_parser *parser, unsigned type)
1629 + (hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3 ||
1630 + hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2 ||
1631 + hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP ||
1632 ++ hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP ||
1633 + hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3 ||
1634 + hid->product == USB_DEVICE_ID_MS_POWER_COVER) &&
1635 + hid->group == HID_GROUP_MULTITOUCH)
1636 +@@ -1818,6 +1819,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1637 + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3) },
1638 + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2) },
1639 + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP) },
1640 ++ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP) },
1641 + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3) },
1642 + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K) },
1643 + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
1644 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
1645 +index 132ed653b54e..16583e6621d4 100644
1646 +--- a/drivers/hid/hid-ids.h
1647 ++++ b/drivers/hid/hid-ids.h
1648 +@@ -165,6 +165,8 @@
1649 + #define USB_DEVICE_ID_ATEN_2PORTKVM 0x2204
1650 + #define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205
1651 + #define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208
1652 ++#define USB_DEVICE_ID_ATEN_CS682 0x2213
1653 ++#define USB_DEVICE_ID_ATEN_CS692 0x8021
1654 +
1655 + #define USB_VENDOR_ID_ATMEL 0x03eb
1656 + #define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
1657 +@@ -661,6 +663,7 @@
1658 + #define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3 0x07dc
1659 + #define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2 0x07e2
1660 + #define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP 0x07dd
1661 ++#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP 0x07e9
1662 + #define USB_DEVICE_ID_MS_TYPE_COVER_3 0x07de
1663 + #define USB_DEVICE_ID_MS_POWER_COVER 0x07da
1664 +
1665 +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
1666 +index 5fbb46fe6ebf..bd7460541486 100644
1667 +--- a/drivers/hid/hid-input.c
1668 ++++ b/drivers/hid/hid-input.c
1669 +@@ -895,6 +895,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
1670 + case HID_UP_HPVENDOR2:
1671 + set_bit(EV_REP, input->evbit);
1672 + switch (usage->hid & HID_USAGE) {
1673 ++ case 0x001: map_key_clear(KEY_MICMUTE); break;
1674 + case 0x003: map_key_clear(KEY_BRIGHTNESSDOWN); break;
1675 + case 0x004: map_key_clear(KEY_BRIGHTNESSUP); break;
1676 + default: goto ignore;
1677 +diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
1678 +index 8dfc58ac9d52..607e57122458 100644
1679 +--- a/drivers/hid/hid-microsoft.c
1680 ++++ b/drivers/hid/hid-microsoft.c
1681 +@@ -268,6 +268,8 @@ static const struct hid_device_id ms_devices[] = {
1682 + .driver_data = MS_HIDINPUT },
1683 + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP),
1684 + .driver_data = MS_HIDINPUT },
1685 ++ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP),
1686 ++ .driver_data = MS_HIDINPUT },
1687 + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3),
1688 + .driver_data = MS_HIDINPUT },
1689 + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER),
1690 +diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
1691 +index d63f7e45b539..3fd5fa9385ae 100644
1692 +--- a/drivers/hid/usbhid/hid-quirks.c
1693 ++++ b/drivers/hid/usbhid/hid-quirks.c
1694 +@@ -60,6 +60,8 @@ static const struct hid_blacklist {
1695 + { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
1696 + { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET },
1697 + { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET },
1698 ++ { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682, HID_QUIRK_NOGET },
1699 ++ { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS692, HID_QUIRK_NOGET },
1700 + { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK, HID_QUIRK_NOGET },
1701 + { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET },
1702 + { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET },
1703 +@@ -89,6 +91,7 @@ static const struct hid_blacklist {
1704 + { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3, HID_QUIRK_NO_INIT_REPORTS },
1705 + { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2, HID_QUIRK_NO_INIT_REPORTS },
1706 + { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP, HID_QUIRK_NO_INIT_REPORTS },
1707 ++ { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP, HID_QUIRK_NO_INIT_REPORTS },
1708 + { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
1709 + { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS },
1710 + { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
1711 +diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
1712 +index 665b7dac6b7d..74d7025a05e6 100644
1713 +--- a/drivers/hv/hv_util.c
1714 ++++ b/drivers/hv/hv_util.c
1715 +@@ -276,10 +276,14 @@ static void heartbeat_onchannelcallback(void *context)
1716 + u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
1717 + struct icmsg_negotiate *negop = NULL;
1718 +
1719 +- vmbus_recvpacket(channel, hbeat_txf_buf,
1720 +- PAGE_SIZE, &recvlen, &requestid);
1721 ++ while (1) {
1722 ++
1723 ++ vmbus_recvpacket(channel, hbeat_txf_buf,
1724 ++ PAGE_SIZE, &recvlen, &requestid);
1725 ++
1726 ++ if (!recvlen)
1727 ++ break;
1728 +
1729 +- if (recvlen > 0) {
1730 + icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
1731 + sizeof(struct vmbuspipe_hdr)];
1732 +
1733 +diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
1734 +index c410217fbe89..951a4f6a3b11 100644
1735 +--- a/drivers/infiniband/core/cm.c
1736 ++++ b/drivers/infiniband/core/cm.c
1737 +@@ -79,6 +79,8 @@ static struct ib_cm {
1738 + __be32 random_id_operand;
1739 + struct list_head timewait_list;
1740 + struct workqueue_struct *wq;
1741 ++ /* Sync on cm change port state */
1742 ++ spinlock_t state_lock;
1743 + } cm;
1744 +
1745 + /* Counter indexes ordered by attribute ID */
1746 +@@ -160,6 +162,8 @@ struct cm_port {
1747 + struct ib_mad_agent *mad_agent;
1748 + struct kobject port_obj;
1749 + u8 port_num;
1750 ++ struct list_head cm_priv_prim_list;
1751 ++ struct list_head cm_priv_altr_list;
1752 + struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
1753 + };
1754 +
1755 +@@ -237,6 +241,12 @@ struct cm_id_private {
1756 + u8 service_timeout;
1757 + u8 target_ack_delay;
1758 +
1759 ++ struct list_head prim_list;
1760 ++ struct list_head altr_list;
1761 ++ /* Indicates that the send port mad is registered and av is set */
1762 ++ int prim_send_port_not_ready;
1763 ++ int altr_send_port_not_ready;
1764 ++
1765 + struct list_head work_list;
1766 + atomic_t work_count;
1767 + };
1768 +@@ -255,19 +265,46 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
1769 + struct ib_mad_agent *mad_agent;
1770 + struct ib_mad_send_buf *m;
1771 + struct ib_ah *ah;
1772 ++ struct cm_av *av;
1773 ++ unsigned long flags, flags2;
1774 ++ int ret = 0;
1775 +
1776 ++ /* don't let the port to be released till the agent is down */
1777 ++ spin_lock_irqsave(&cm.state_lock, flags2);
1778 ++ spin_lock_irqsave(&cm.lock, flags);
1779 ++ if (!cm_id_priv->prim_send_port_not_ready)
1780 ++ av = &cm_id_priv->av;
1781 ++ else if (!cm_id_priv->altr_send_port_not_ready &&
1782 ++ (cm_id_priv->alt_av.port))
1783 ++ av = &cm_id_priv->alt_av;
1784 ++ else {
1785 ++ pr_info("%s: not valid CM id\n", __func__);
1786 ++ ret = -ENODEV;
1787 ++ spin_unlock_irqrestore(&cm.lock, flags);
1788 ++ goto out;
1789 ++ }
1790 ++ spin_unlock_irqrestore(&cm.lock, flags);
1791 ++ /* Make sure the port haven't released the mad yet */
1792 + mad_agent = cm_id_priv->av.port->mad_agent;
1793 +- ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
1794 +- if (IS_ERR(ah))
1795 +- return PTR_ERR(ah);
1796 ++ if (!mad_agent) {
1797 ++ pr_info("%s: not a valid MAD agent\n", __func__);
1798 ++ ret = -ENODEV;
1799 ++ goto out;
1800 ++ }
1801 ++ ah = ib_create_ah(mad_agent->qp->pd, &av->ah_attr);
1802 ++ if (IS_ERR(ah)) {
1803 ++ ret = PTR_ERR(ah);
1804 ++ goto out;
1805 ++ }
1806 +
1807 + m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
1808 +- cm_id_priv->av.pkey_index,
1809 ++ av->pkey_index,
1810 + 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
1811 + GFP_ATOMIC);
1812 + if (IS_ERR(m)) {
1813 + ib_destroy_ah(ah);
1814 +- return PTR_ERR(m);
1815 ++ ret = PTR_ERR(m);
1816 ++ goto out;
1817 + }
1818 +
1819 + /* Timeout set by caller if response is expected. */
1820 +@@ -277,7 +314,10 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
1821 + atomic_inc(&cm_id_priv->refcount);
1822 + m->context[0] = cm_id_priv;
1823 + *msg = m;
1824 +- return 0;
1825 ++
1826 ++out:
1827 ++ spin_unlock_irqrestore(&cm.state_lock, flags2);
1828 ++ return ret;
1829 + }
1830 +
1831 + static int cm_alloc_response_msg(struct cm_port *port,
1832 +@@ -346,7 +386,8 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
1833 + grh, &av->ah_attr);
1834 + }
1835 +
1836 +-static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
1837 ++static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av,
1838 ++ struct cm_id_private *cm_id_priv)
1839 + {
1840 + struct cm_device *cm_dev;
1841 + struct cm_port *port = NULL;
1842 +@@ -376,7 +417,18 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
1843 + ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path,
1844 + &av->ah_attr);
1845 + av->timeout = path->packet_life_time + 1;
1846 +- return 0;
1847 ++
1848 ++ spin_lock_irqsave(&cm.lock, flags);
1849 ++ if (&cm_id_priv->av == av)
1850 ++ list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
1851 ++ else if (&cm_id_priv->alt_av == av)
1852 ++ list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
1853 ++ else
1854 ++ ret = -EINVAL;
1855 ++
1856 ++ spin_unlock_irqrestore(&cm.lock, flags);
1857 ++
1858 ++ return ret;
1859 + }
1860 +
1861 + static int cm_alloc_id(struct cm_id_private *cm_id_priv)
1862 +@@ -716,6 +768,8 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
1863 + spin_lock_init(&cm_id_priv->lock);
1864 + init_completion(&cm_id_priv->comp);
1865 + INIT_LIST_HEAD(&cm_id_priv->work_list);
1866 ++ INIT_LIST_HEAD(&cm_id_priv->prim_list);
1867 ++ INIT_LIST_HEAD(&cm_id_priv->altr_list);
1868 + atomic_set(&cm_id_priv->work_count, -1);
1869 + atomic_set(&cm_id_priv->refcount, 1);
1870 + return &cm_id_priv->id;
1871 +@@ -914,6 +968,15 @@ retest:
1872 + break;
1873 + }
1874 +
1875 ++ spin_lock_irq(&cm.lock);
1876 ++ if (!list_empty(&cm_id_priv->altr_list) &&
1877 ++ (!cm_id_priv->altr_send_port_not_ready))
1878 ++ list_del(&cm_id_priv->altr_list);
1879 ++ if (!list_empty(&cm_id_priv->prim_list) &&
1880 ++ (!cm_id_priv->prim_send_port_not_ready))
1881 ++ list_del(&cm_id_priv->prim_list);
1882 ++ spin_unlock_irq(&cm.lock);
1883 ++
1884 + cm_free_id(cm_id->local_id);
1885 + cm_deref_id(cm_id_priv);
1886 + wait_for_completion(&cm_id_priv->comp);
1887 +@@ -1137,12 +1200,13 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
1888 + goto out;
1889 + }
1890 +
1891 +- ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
1892 ++ ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av,
1893 ++ cm_id_priv);
1894 + if (ret)
1895 + goto error1;
1896 + if (param->alternate_path) {
1897 + ret = cm_init_av_by_path(param->alternate_path,
1898 +- &cm_id_priv->alt_av);
1899 ++ &cm_id_priv->alt_av, cm_id_priv);
1900 + if (ret)
1901 + goto error1;
1902 + }
1903 +@@ -1562,7 +1626,8 @@ static int cm_req_handler(struct cm_work *work)
1904 +
1905 + cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
1906 + cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
1907 +- ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
1908 ++ ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
1909 ++ cm_id_priv);
1910 + if (ret) {
1911 + ib_get_cached_gid(work->port->cm_dev->ib_device,
1912 + work->port->port_num, 0, &work->path[0].sgid);
1913 +@@ -1572,7 +1637,8 @@ static int cm_req_handler(struct cm_work *work)
1914 + goto rejected;
1915 + }
1916 + if (req_msg->alt_local_lid) {
1917 +- ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
1918 ++ ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av,
1919 ++ cm_id_priv);
1920 + if (ret) {
1921 + ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
1922 + &work->path[0].sgid,
1923 +@@ -2627,7 +2693,8 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id,
1924 + goto out;
1925 + }
1926 +
1927 +- ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
1928 ++ ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av,
1929 ++ cm_id_priv);
1930 + if (ret)
1931 + goto out;
1932 + cm_id_priv->alt_av.timeout =
1933 +@@ -2739,7 +2806,8 @@ static int cm_lap_handler(struct cm_work *work)
1934 + cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
1935 + work->mad_recv_wc->recv_buf.grh,
1936 + &cm_id_priv->av);
1937 +- cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
1938 ++ cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av,
1939 ++ cm_id_priv);
1940 + ret = atomic_inc_and_test(&cm_id_priv->work_count);
1941 + if (!ret)
1942 + list_add_tail(&work->list, &cm_id_priv->work_list);
1943 +@@ -2931,7 +2999,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
1944 + return -EINVAL;
1945 +
1946 + cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1947 +- ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
1948 ++ ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv);
1949 + if (ret)
1950 + goto out;
1951 +
1952 +@@ -3352,7 +3420,9 @@ out:
1953 + static int cm_migrate(struct ib_cm_id *cm_id)
1954 + {
1955 + struct cm_id_private *cm_id_priv;
1956 ++ struct cm_av tmp_av;
1957 + unsigned long flags;
1958 ++ int tmp_send_port_not_ready;
1959 + int ret = 0;
1960 +
1961 + cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1962 +@@ -3361,7 +3431,14 @@ static int cm_migrate(struct ib_cm_id *cm_id)
1963 + (cm_id->lap_state == IB_CM_LAP_UNINIT ||
1964 + cm_id->lap_state == IB_CM_LAP_IDLE)) {
1965 + cm_id->lap_state = IB_CM_LAP_IDLE;
1966 ++ /* Swap address vector */
1967 ++ tmp_av = cm_id_priv->av;
1968 + cm_id_priv->av = cm_id_priv->alt_av;
1969 ++ cm_id_priv->alt_av = tmp_av;
1970 ++ /* Swap port send ready state */
1971 ++ tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
1972 ++ cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
1973 ++ cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
1974 + } else
1975 + ret = -EINVAL;
1976 + spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1977 +@@ -3767,6 +3844,9 @@ static void cm_add_one(struct ib_device *ib_device)
1978 + port->cm_dev = cm_dev;
1979 + port->port_num = i;
1980 +
1981 ++ INIT_LIST_HEAD(&port->cm_priv_prim_list);
1982 ++ INIT_LIST_HEAD(&port->cm_priv_altr_list);
1983 ++
1984 + ret = cm_create_port_fs(port);
1985 + if (ret)
1986 + goto error1;
1987 +@@ -3813,6 +3893,8 @@ static void cm_remove_one(struct ib_device *ib_device)
1988 + {
1989 + struct cm_device *cm_dev;
1990 + struct cm_port *port;
1991 ++ struct cm_id_private *cm_id_priv;
1992 ++ struct ib_mad_agent *cur_mad_agent;
1993 + struct ib_port_modify port_modify = {
1994 + .clr_port_cap_mask = IB_PORT_CM_SUP
1995 + };
1996 +@@ -3830,10 +3912,22 @@ static void cm_remove_one(struct ib_device *ib_device)
1997 + for (i = 1; i <= ib_device->phys_port_cnt; i++) {
1998 + port = cm_dev->port[i-1];
1999 + ib_modify_port(ib_device, port->port_num, 0, &port_modify);
2000 +- ib_unregister_mad_agent(port->mad_agent);
2001 ++ /* Mark all the cm_id's as not valid */
2002 ++ spin_lock_irq(&cm.lock);
2003 ++ list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
2004 ++ cm_id_priv->altr_send_port_not_ready = 1;
2005 ++ list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
2006 ++ cm_id_priv->prim_send_port_not_ready = 1;
2007 ++ spin_unlock_irq(&cm.lock);
2008 + flush_workqueue(cm.wq);
2009 ++ spin_lock_irq(&cm.state_lock);
2010 ++ cur_mad_agent = port->mad_agent;
2011 ++ port->mad_agent = NULL;
2012 ++ spin_unlock_irq(&cm.state_lock);
2013 ++ ib_unregister_mad_agent(cur_mad_agent);
2014 + cm_remove_port_fs(port);
2015 + }
2016 ++
2017 + device_unregister(cm_dev->device);
2018 + kfree(cm_dev);
2019 + }
2020 +@@ -3846,6 +3940,7 @@ static int __init ib_cm_init(void)
2021 + INIT_LIST_HEAD(&cm.device_list);
2022 + rwlock_init(&cm.device_lock);
2023 + spin_lock_init(&cm.lock);
2024 ++ spin_lock_init(&cm.state_lock);
2025 + cm.listen_service_table = RB_ROOT;
2026 + cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
2027 + cm.remote_id_table = RB_ROOT;
2028 +diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
2029 +index ee5222168b68..2afdd52f29d1 100644
2030 +--- a/drivers/infiniband/core/uverbs_main.c
2031 ++++ b/drivers/infiniband/core/uverbs_main.c
2032 +@@ -237,12 +237,9 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
2033 + container_of(uobj, struct ib_uqp_object, uevent.uobject);
2034 +
2035 + idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
2036 +- if (qp != qp->real_qp) {
2037 +- ib_close_qp(qp);
2038 +- } else {
2039 ++ if (qp == qp->real_qp)
2040 + ib_uverbs_detach_umcast(qp, uqp);
2041 +- ib_destroy_qp(qp);
2042 +- }
2043 ++ ib_destroy_qp(qp);
2044 + ib_uverbs_release_uevent(file, &uqp->uevent);
2045 + kfree(uqp);
2046 + }
2047 +diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
2048 +index d5e60f44ba5a..5b8a62c6bc8d 100644
2049 +--- a/drivers/infiniband/hw/mlx4/cq.c
2050 ++++ b/drivers/infiniband/hw/mlx4/cq.c
2051 +@@ -239,11 +239,14 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
2052 + if (context)
2053 + if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
2054 + err = -EFAULT;
2055 +- goto err_dbmap;
2056 ++ goto err_cq_free;
2057 + }
2058 +
2059 + return &cq->ibcq;
2060 +
2061 ++err_cq_free:
2062 ++ mlx4_cq_free(dev->dev, &cq->mcq);
2063 ++
2064 + err_dbmap:
2065 + if (context)
2066 + mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
2067 +diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
2068 +index 706833ab7e7e..e5a6d839f1d1 100644
2069 +--- a/drivers/infiniband/hw/mlx5/cq.c
2070 ++++ b/drivers/infiniband/hw/mlx5/cq.c
2071 +@@ -684,8 +684,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
2072 + if (err)
2073 + goto err_create;
2074 + } else {
2075 +- /* for now choose 64 bytes till we have a proper interface */
2076 +- cqe_size = 64;
2077 ++ cqe_size = cache_line_size() == 128 ? 128 : 64;
2078 + err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
2079 + &index, &inlen);
2080 + if (err)
2081 +diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
2082 +index b1a6cb3a2809..1300a377aca8 100644
2083 +--- a/drivers/infiniband/hw/mlx5/main.c
2084 ++++ b/drivers/infiniband/hw/mlx5/main.c
2085 +@@ -959,12 +959,13 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
2086 + {
2087 + struct mlx5_ib_dev *ibdev = container_of(dev, struct mlx5_ib_dev, mdev);
2088 + struct ib_event ibev;
2089 ++ bool fatal = false;
2090 + u8 port = 0;
2091 +
2092 + switch (event) {
2093 + case MLX5_DEV_EVENT_SYS_ERROR:
2094 +- ibdev->ib_active = false;
2095 + ibev.event = IB_EVENT_DEVICE_FATAL;
2096 ++ fatal = true;
2097 + break;
2098 +
2099 + case MLX5_DEV_EVENT_PORT_UP:
2100 +@@ -1012,6 +1013,9 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
2101 +
2102 + if (ibdev->ib_active)
2103 + ib_dispatch_event(&ibev);
2104 ++
2105 ++ if (fatal)
2106 ++ ibdev->ib_active = false;
2107 + }
2108 +
2109 + static void get_ext_port_caps(struct mlx5_ib_dev *dev)
2110 +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
2111 +index d9ab5c5e8e82..ccb36fb565de 100644
2112 +--- a/drivers/input/serio/i8042-x86ia64io.h
2113 ++++ b/drivers/input/serio/i8042-x86ia64io.h
2114 +@@ -776,6 +776,13 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
2115 + DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
2116 + },
2117 + },
2118 ++ {
2119 ++ /* Schenker XMG C504 - Elantech touchpad */
2120 ++ .matches = {
2121 ++ DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
2122 ++ DMI_MATCH(DMI_PRODUCT_NAME, "C504"),
2123 ++ },
2124 ++ },
2125 + { }
2126 + };
2127 +
2128 +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
2129 +index 73353a97aafb..71f9cd108590 100644
2130 +--- a/drivers/iommu/amd_iommu.c
2131 ++++ b/drivers/iommu/amd_iommu.c
2132 +@@ -2032,6 +2032,9 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom)
2133 + kfree(dom->aperture[i]);
2134 + }
2135 +
2136 ++ if (dom->domain.id)
2137 ++ domain_id_free(dom->domain.id);
2138 ++
2139 + kfree(dom);
2140 + }
2141 +
2142 +diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
2143 +index bf2a908d74cf..452ef7bc630c 100644
2144 +--- a/drivers/media/usb/dvb-usb/dib0700_core.c
2145 ++++ b/drivers/media/usb/dvb-usb/dib0700_core.c
2146 +@@ -674,7 +674,7 @@ static void dib0700_rc_urb_completion(struct urb *purb)
2147 + {
2148 + struct dvb_usb_device *d = purb->context;
2149 + struct dib0700_rc_response *poll_reply;
2150 +- u32 uninitialized_var(keycode);
2151 ++ u32 keycode;
2152 + u8 toggle;
2153 +
2154 + deb_info("%s()\n", __func__);
2155 +@@ -713,7 +713,8 @@ static void dib0700_rc_urb_completion(struct urb *purb)
2156 + if ((poll_reply->system == 0x00) && (poll_reply->data == 0x00)
2157 + && (poll_reply->not_data == 0xff)) {
2158 + poll_reply->data_state = 2;
2159 +- break;
2160 ++ rc_repeat(d->rc_dev);
2161 ++ goto resubmit;
2162 + }
2163 +
2164 + if ((poll_reply->system ^ poll_reply->not_system) != 0xff) {
2165 +diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
2166 +index f421586f29fb..a1f0f73245c5 100644
2167 +--- a/drivers/mfd/mfd-core.c
2168 ++++ b/drivers/mfd/mfd-core.c
2169 +@@ -265,6 +265,8 @@ int mfd_clone_cell(const char *cell, const char **clones, size_t n_clones)
2170 + clones[i]);
2171 + }
2172 +
2173 ++ put_device(dev);
2174 ++
2175 + return 0;
2176 + }
2177 + EXPORT_SYMBOL(mfd_clone_cell);
2178 +diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
2179 +index 4b7ea3fb143c..1f8f856946cd 100644
2180 +--- a/drivers/misc/mei/nfc.c
2181 ++++ b/drivers/misc/mei/nfc.c
2182 +@@ -292,7 +292,7 @@ static int mei_nfc_if_version(struct mei_nfc_dev *ndev)
2183 + return -ENOMEM;
2184 +
2185 + bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length);
2186 +- if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) {
2187 ++ if (bytes_recv < if_version_length) {
2188 + dev_err(&dev->pdev->dev, "Could not read IF version\n");
2189 + ret = -EIO;
2190 + goto err;
2191 +diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
2192 +index e1fa3ef735e0..f8aac3044670 100644
2193 +--- a/drivers/mmc/host/mxs-mmc.c
2194 ++++ b/drivers/mmc/host/mxs-mmc.c
2195 +@@ -675,13 +675,13 @@ static int mxs_mmc_probe(struct platform_device *pdev)
2196 +
2197 + platform_set_drvdata(pdev, mmc);
2198 +
2199 ++ spin_lock_init(&host->lock);
2200 ++
2201 + ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0,
2202 + DRIVER_NAME, host);
2203 + if (ret)
2204 + goto out_free_dma;
2205 +
2206 +- spin_lock_init(&host->lock);
2207 +-
2208 + ret = mmc_add_host(mmc);
2209 + if (ret)
2210 + goto out_free_dma;
2211 +diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
2212 +index 85cd77c9cd12..87bf356d274a 100644
2213 +--- a/drivers/mtd/ubi/fastmap.c
2214 ++++ b/drivers/mtd/ubi/fastmap.c
2215 +@@ -438,10 +438,11 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
2216 + unsigned long long ec = be64_to_cpu(ech->ec);
2217 + unmap_peb(ai, pnum);
2218 + dbg_bld("Adding PEB to free: %i", pnum);
2219 ++
2220 + if (err == UBI_IO_FF_BITFLIPS)
2221 +- add_aeb(ai, free, pnum, ec, 1);
2222 +- else
2223 +- add_aeb(ai, free, pnum, ec, 0);
2224 ++ scrub = 1;
2225 ++
2226 ++ add_aeb(ai, free, pnum, ec, scrub);
2227 + continue;
2228 + } else if (err == 0 || err == UBI_IO_BITFLIPS) {
2229 + dbg_bld("Found non empty PEB:%i in pool", pnum);
2230 +diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
2231 +index 73be7f3982e6..af9e7d775348 100644
2232 +--- a/drivers/net/ethernet/smsc/smc91x.c
2233 ++++ b/drivers/net/ethernet/smsc/smc91x.c
2234 +@@ -533,7 +533,7 @@ static inline void smc_rcv(struct net_device *dev)
2235 + #define smc_special_lock(lock, flags) spin_lock_irqsave(lock, flags)
2236 + #define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags)
2237 + #else
2238 +-#define smc_special_trylock(lock, flags) (flags == flags)
2239 ++#define smc_special_trylock(lock, flags) ((void)flags, true)
2240 + #define smc_special_lock(lock, flags) do { flags = 0; } while (0)
2241 + #define smc_special_unlock(lock, flags) do { flags = 0; } while (0)
2242 + #endif
2243 +diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
2244 +index 98ce4feb9a79..576c3236fa40 100644
2245 +--- a/drivers/net/macvtap.c
2246 ++++ b/drivers/net/macvtap.c
2247 +@@ -67,7 +67,7 @@ static struct cdev macvtap_cdev;
2248 + static const struct proto_ops macvtap_socket_ops;
2249 +
2250 + #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
2251 +- NETIF_F_TSO6 | NETIF_F_UFO)
2252 ++ NETIF_F_TSO6)
2253 + #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
2254 + #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST)
2255 +
2256 +@@ -566,6 +566,8 @@ static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
2257 + gso_type = SKB_GSO_TCPV6;
2258 + break;
2259 + case VIRTIO_NET_HDR_GSO_UDP:
2260 ++ pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n",
2261 ++ current->comm);
2262 + gso_type = SKB_GSO_UDP;
2263 + if (skb->protocol == htons(ETH_P_IPV6))
2264 + ipv6_proxy_select_ident(skb);
2265 +@@ -613,8 +615,6 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
2266 + vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2267 + else if (sinfo->gso_type & SKB_GSO_TCPV6)
2268 + vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2269 +- else if (sinfo->gso_type & SKB_GSO_UDP)
2270 +- vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
2271 + else
2272 + BUG();
2273 + if (sinfo->gso_type & SKB_GSO_TCP_ECN)
2274 +@@ -962,9 +962,6 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
2275 + if (arg & TUN_F_TSO6)
2276 + feature_mask |= NETIF_F_TSO6;
2277 + }
2278 +-
2279 +- if (arg & TUN_F_UFO)
2280 +- feature_mask |= NETIF_F_UFO;
2281 + }
2282 +
2283 + /* tun/tap driver inverts the usage for TSO offloads, where
2284 +@@ -975,7 +972,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
2285 + * When user space turns off TSO, we turn off GSO/LRO so that
2286 + * user-space will not receive TSO frames.
2287 + */
2288 +- if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO))
2289 ++ if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6))
2290 + features |= RX_OFFLOADS;
2291 + else
2292 + features &= ~RX_OFFLOADS;
2293 +@@ -1076,7 +1073,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
2294 + case TUNSETOFFLOAD:
2295 + /* let the user check for future flags */
2296 + if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
2297 +- TUN_F_TSO_ECN | TUN_F_UFO))
2298 ++ TUN_F_TSO_ECN))
2299 + return -EINVAL;
2300 +
2301 + rtnl_lock();
2302 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
2303 +index 813750d09680..46f9cb21ec56 100644
2304 +--- a/drivers/net/tun.c
2305 ++++ b/drivers/net/tun.c
2306 +@@ -173,7 +173,7 @@ struct tun_struct {
2307 + struct net_device *dev;
2308 + netdev_features_t set_features;
2309 + #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
2310 +- NETIF_F_TSO6|NETIF_F_UFO)
2311 ++ NETIF_F_TSO6)
2312 +
2313 + int vnet_hdr_sz;
2314 + int sndbuf;
2315 +@@ -1113,10 +1113,20 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
2316 + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
2317 + break;
2318 + case VIRTIO_NET_HDR_GSO_UDP:
2319 ++ {
2320 ++ static bool warned;
2321 ++
2322 ++ if (!warned) {
2323 ++ warned = true;
2324 ++ netdev_warn(tun->dev,
2325 ++ "%s: using disabled UFO feature; please fix this program\n",
2326 ++ current->comm);
2327 ++ }
2328 + skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
2329 + if (skb->protocol == htons(ETH_P_IPV6))
2330 + ipv6_proxy_select_ident(skb);
2331 + break;
2332 ++ }
2333 + default:
2334 + tun->dev->stats.rx_frame_errors++;
2335 + kfree_skb(skb);
2336 +@@ -1220,8 +1230,6 @@ static ssize_t tun_put_user(struct tun_struct *tun,
2337 + gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2338 + else if (sinfo->gso_type & SKB_GSO_TCPV6)
2339 + gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2340 +- else if (sinfo->gso_type & SKB_GSO_UDP)
2341 +- gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
2342 + else {
2343 + pr_err("unexpected GSO type: "
2344 + "0x%x, gso_size %d, hdr_len %d\n",
2345 +@@ -1750,11 +1758,6 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
2346 + features |= NETIF_F_TSO6;
2347 + arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
2348 + }
2349 +-
2350 +- if (arg & TUN_F_UFO) {
2351 +- features |= NETIF_F_UFO;
2352 +- arg &= ~TUN_F_UFO;
2353 +- }
2354 + }
2355 +
2356 + /* This gives the user a way to test for new features in future by
2357 +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
2358 +index 5d080516d0c5..421642af8d06 100644
2359 +--- a/drivers/net/virtio_net.c
2360 ++++ b/drivers/net/virtio_net.c
2361 +@@ -438,8 +438,17 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
2362 + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2363 + break;
2364 + case VIRTIO_NET_HDR_GSO_UDP:
2365 ++ {
2366 ++ static bool warned;
2367 ++
2368 ++ if (!warned) {
2369 ++ warned = true;
2370 ++ netdev_warn(dev,
2371 ++ "host using disabled UFO feature; please fix it\n");
2372 ++ }
2373 + skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
2374 + break;
2375 ++ }
2376 + case VIRTIO_NET_HDR_GSO_TCPV6:
2377 + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
2378 + break;
2379 +@@ -754,8 +763,6 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
2380 + hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2381 + else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
2382 + hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2383 +- else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
2384 +- hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
2385 + else
2386 + BUG();
2387 + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
2388 +@@ -1572,7 +1579,7 @@ static int virtnet_probe(struct virtio_device *vdev)
2389 + dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
2390 +
2391 + if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
2392 +- dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
2393 ++ dev->hw_features |= NETIF_F_TSO
2394 + | NETIF_F_TSO_ECN | NETIF_F_TSO6;
2395 + }
2396 + /* Individual feature bits: what can host handle? */
2397 +@@ -1582,11 +1589,9 @@ static int virtnet_probe(struct virtio_device *vdev)
2398 + dev->hw_features |= NETIF_F_TSO6;
2399 + if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
2400 + dev->hw_features |= NETIF_F_TSO_ECN;
2401 +- if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
2402 +- dev->hw_features |= NETIF_F_UFO;
2403 +
2404 + if (gso)
2405 +- dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
2406 ++ dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
2407 + /* (!csum && gso) case will be fixed by register_netdev() */
2408 + }
2409 + if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
2410 +@@ -1621,8 +1626,7 @@ static int virtnet_probe(struct virtio_device *vdev)
2411 + /* If we can receive ANY GSO packets, we must allocate large ones. */
2412 + if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
2413 + virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
2414 +- virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
2415 +- virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
2416 ++ virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
2417 + vi->big_packets = true;
2418 +
2419 + if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
2420 +@@ -1808,9 +1812,9 @@ static struct virtio_device_id id_table[] = {
2421 + static unsigned int features[] = {
2422 + VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
2423 + VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
2424 +- VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
2425 ++ VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_TSO6,
2426 + VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
2427 +- VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
2428 ++ VIRTIO_NET_F_GUEST_ECN,
2429 + VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
2430 + VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
2431 + VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
2432 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
2433 +index 019dbc1fae11..cb245bd510a2 100644
2434 +--- a/drivers/pci/quirks.c
2435 ++++ b/drivers/pci/quirks.c
2436 +@@ -339,19 +339,52 @@ static void quirk_s3_64M(struct pci_dev *dev)
2437 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M);
2438 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M);
2439 +
2440 ++static void quirk_io(struct pci_dev *dev, int pos, unsigned size,
2441 ++ const char *name)
2442 ++{
2443 ++ u32 region;
2444 ++ struct pci_bus_region bus_region;
2445 ++ struct resource *res = dev->resource + pos;
2446 ++
2447 ++ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), &region);
2448 ++
2449 ++ if (!region)
2450 ++ return;
2451 ++
2452 ++ res->name = pci_name(dev);
2453 ++ res->flags = region & ~PCI_BASE_ADDRESS_IO_MASK;
2454 ++ res->flags |=
2455 ++ (IORESOURCE_IO | IORESOURCE_PCI_FIXED | IORESOURCE_SIZEALIGN);
2456 ++ region &= ~(size - 1);
2457 ++
2458 ++ /* Convert from PCI bus to resource space */
2459 ++ bus_region.start = region;
2460 ++ bus_region.end = region + size - 1;
2461 ++ pcibios_bus_to_resource(dev, res, &bus_region);
2462 ++
2463 ++ dev_info(&dev->dev, FW_BUG "%s quirk: reg 0x%x: %pR\n",
2464 ++ name, PCI_BASE_ADDRESS_0 + (pos << 2), res);
2465 ++}
2466 ++
2467 + /*
2468 + * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS
2469 + * ver. 1.33 20070103) don't set the correct ISA PCI region header info.
2470 + * BAR0 should be 8 bytes; instead, it may be set to something like 8k
2471 + * (which conflicts w/ BAR1's memory range).
2472 ++ *
2473 ++ * CS553x's ISA PCI BARs may also be read-only (ref:
2474 ++ * https://bugzilla.kernel.org/show_bug.cgi?id=85991 - Comment #4 forward).
2475 + */
2476 + static void quirk_cs5536_vsa(struct pci_dev *dev)
2477 + {
2478 ++ static char *name = "CS5536 ISA bridge";
2479 ++
2480 + if (pci_resource_len(dev, 0) != 8) {
2481 +- struct resource *res = &dev->resource[0];
2482 +- res->end = res->start + 8 - 1;
2483 +- dev_info(&dev->dev, "CS5536 ISA bridge bug detected "
2484 +- "(incorrect header); workaround applied.\n");
2485 ++ quirk_io(dev, 0, 8, name); /* SMB */
2486 ++ quirk_io(dev, 1, 256, name); /* GPIO */
2487 ++ quirk_io(dev, 2, 64, name); /* MFGPT */
2488 ++ dev_info(&dev->dev, "%s bug detected (incorrect header); workaround applied\n",
2489 ++ name);
2490 + }
2491 + }
2492 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
2493 +diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
2494 +index 2ca95042a0b9..c244e7dc6d66 100644
2495 +--- a/drivers/pwm/core.c
2496 ++++ b/drivers/pwm/core.c
2497 +@@ -293,6 +293,8 @@ int pwmchip_remove(struct pwm_chip *chip)
2498 + unsigned int i;
2499 + int ret = 0;
2500 +
2501 ++ pwmchip_sysfs_unexport_children(chip);
2502 ++
2503 + mutex_lock(&pwm_lock);
2504 +
2505 + for (i = 0; i < chip->npwm; i++) {
2506 +diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
2507 +index 8c20332d4825..809b5ab9074c 100644
2508 +--- a/drivers/pwm/sysfs.c
2509 ++++ b/drivers/pwm/sysfs.c
2510 +@@ -348,6 +348,24 @@ void pwmchip_sysfs_unexport(struct pwm_chip *chip)
2511 + }
2512 + }
2513 +
2514 ++void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
2515 ++{
2516 ++ struct device *parent;
2517 ++ unsigned int i;
2518 ++
2519 ++ parent = class_find_device(&pwm_class, NULL, chip,
2520 ++ pwmchip_sysfs_match);
2521 ++ if (!parent)
2522 ++ return;
2523 ++
2524 ++ for (i = 0; i < chip->npwm; i++) {
2525 ++ struct pwm_device *pwm = &chip->pwms[i];
2526 ++
2527 ++ if (test_bit(PWMF_EXPORTED, &pwm->flags))
2528 ++ pwm_unexport_child(parent, pwm);
2529 ++ }
2530 ++}
2531 ++
2532 + static int __init pwm_sysfs_init(void)
2533 + {
2534 + return class_register(&pwm_class);
2535 +diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
2536 +index 66dda86e62e1..8d9477cc3227 100644
2537 +--- a/drivers/scsi/arcmsr/arcmsr_hba.c
2538 ++++ b/drivers/scsi/arcmsr/arcmsr_hba.c
2539 +@@ -2069,18 +2069,9 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
2540 + struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
2541 + struct CommandControlBlock *ccb;
2542 + int target = cmd->device->id;
2543 +- int lun = cmd->device->lun;
2544 +- uint8_t scsicmd = cmd->cmnd[0];
2545 + cmd->scsi_done = done;
2546 + cmd->host_scribble = NULL;
2547 + cmd->result = 0;
2548 +- if ((scsicmd == SYNCHRONIZE_CACHE) ||(scsicmd == SEND_DIAGNOSTIC)){
2549 +- if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
2550 +- cmd->result = (DID_NO_CONNECT << 16);
2551 +- }
2552 +- cmd->scsi_done(cmd);
2553 +- return 0;
2554 +- }
2555 + if (target == 16) {
2556 + /* virtual device for iop message transfer */
2557 + arcmsr_handle_virtual_command(acb, cmd);
2558 +diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
2559 +index deb1ed816c49..50e8d5912776 100644
2560 +--- a/drivers/scsi/megaraid/megaraid_sas.h
2561 ++++ b/drivers/scsi/megaraid/megaraid_sas.h
2562 +@@ -1637,7 +1637,7 @@ struct megasas_instance_template {
2563 + };
2564 +
2565 + #define MEGASAS_IS_LOGICAL(scp) \
2566 +- (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1
2567 ++ ((scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
2568 +
2569 + #define MEGASAS_DEV_INDEX(inst, scp) \
2570 + ((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
2571 +diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
2572 +index 8c3270c809c8..11eafc3f4ca0 100644
2573 +--- a/drivers/scsi/megaraid/megaraid_sas_base.c
2574 ++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
2575 +@@ -1537,16 +1537,13 @@ megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd
2576 + goto out_done;
2577 + }
2578 +
2579 +- switch (scmd->cmnd[0]) {
2580 +- case SYNCHRONIZE_CACHE:
2581 +- /*
2582 +- * FW takes care of flush cache on its own
2583 +- * No need to send it down
2584 +- */
2585 ++ /*
2586 ++ * FW takes care of flush cache on its own for Virtual Disk.
2587 ++ * No need to send it down for VD. For JBOD send SYNCHRONIZE_CACHE to FW.
2588 ++ */
2589 ++ if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd)) {
2590 + scmd->result = DID_OK << 16;
2591 + goto out_done;
2592 +- default:
2593 +- break;
2594 + }
2595 +
2596 + if (instance->instancet->build_and_issue_cmd(instance, scmd)) {
2597 +diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
2598 +index 01c0ffa31276..39f2d7d138cf 100644
2599 +--- a/drivers/scsi/scsi_debug.c
2600 ++++ b/drivers/scsi/scsi_debug.c
2601 +@@ -3502,6 +3502,7 @@ static void __exit scsi_debug_exit(void)
2602 + bus_unregister(&pseudo_lld_bus);
2603 + root_device_unregister(pseudo_primary);
2604 +
2605 ++ vfree(map_storep);
2606 + if (dif_storep)
2607 + vfree(dif_storep);
2608 +
2609 +diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
2610 +index 69fd236345cb..a29a383d160d 100644
2611 +--- a/drivers/staging/android/binder.c
2612 ++++ b/drivers/staging/android/binder.c
2613 +@@ -994,7 +994,7 @@ static int binder_dec_node(struct binder_node *node, int strong, int internal)
2614 +
2615 +
2616 + static struct binder_ref *binder_get_ref(struct binder_proc *proc,
2617 +- uint32_t desc)
2618 ++ u32 desc, bool need_strong_ref)
2619 + {
2620 + struct rb_node *n = proc->refs_by_desc.rb_node;
2621 + struct binder_ref *ref;
2622 +@@ -1002,12 +1002,16 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
2623 + while (n) {
2624 + ref = rb_entry(n, struct binder_ref, rb_node_desc);
2625 +
2626 +- if (desc < ref->desc)
2627 ++ if (desc < ref->desc) {
2628 + n = n->rb_left;
2629 +- else if (desc > ref->desc)
2630 ++ } else if (desc > ref->desc) {
2631 + n = n->rb_right;
2632 +- else
2633 ++ } else if (need_strong_ref && !ref->strong) {
2634 ++ binder_user_error("tried to use weak ref as strong ref\n");
2635 ++ return NULL;
2636 ++ } else {
2637 + return ref;
2638 ++ }
2639 + }
2640 + return NULL;
2641 + }
2642 +@@ -1270,7 +1274,10 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
2643 + } break;
2644 + case BINDER_TYPE_HANDLE:
2645 + case BINDER_TYPE_WEAK_HANDLE: {
2646 +- struct binder_ref *ref = binder_get_ref(proc, fp->handle);
2647 ++ struct binder_ref *ref;
2648 ++
2649 ++ ref = binder_get_ref(proc, fp->handle,
2650 ++ fp->type == BINDER_TYPE_HANDLE);
2651 + if (ref == NULL) {
2652 + pr_err("transaction release %d bad handle %d\n",
2653 + debug_id, fp->handle);
2654 +@@ -1362,7 +1369,7 @@ static void binder_transaction(struct binder_proc *proc,
2655 + } else {
2656 + if (tr->target.handle) {
2657 + struct binder_ref *ref;
2658 +- ref = binder_get_ref(proc, tr->target.handle);
2659 ++ ref = binder_get_ref(proc, tr->target.handle, true);
2660 + if (ref == NULL) {
2661 + binder_user_error("%d:%d got transaction to invalid handle\n",
2662 + proc->pid, thread->pid);
2663 +@@ -1534,7 +1541,9 @@ static void binder_transaction(struct binder_proc *proc,
2664 + fp->type = BINDER_TYPE_HANDLE;
2665 + else
2666 + fp->type = BINDER_TYPE_WEAK_HANDLE;
2667 ++ fp->binder = NULL;
2668 + fp->handle = ref->desc;
2669 ++ fp->cookie = NULL;
2670 + binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
2671 + &thread->todo);
2672 +
2673 +@@ -1546,7 +1555,10 @@ static void binder_transaction(struct binder_proc *proc,
2674 + } break;
2675 + case BINDER_TYPE_HANDLE:
2676 + case BINDER_TYPE_WEAK_HANDLE: {
2677 +- struct binder_ref *ref = binder_get_ref(proc, fp->handle);
2678 ++ struct binder_ref *ref;
2679 ++
2680 ++ ref = binder_get_ref(proc, fp->handle,
2681 ++ fp->type == BINDER_TYPE_HANDLE);
2682 + if (ref == NULL) {
2683 + binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2684 + proc->pid,
2685 +@@ -1574,7 +1586,9 @@ static void binder_transaction(struct binder_proc *proc,
2686 + return_error = BR_FAILED_REPLY;
2687 + goto err_binder_get_ref_for_node_failed;
2688 + }
2689 ++ fp->binder = NULL;
2690 + fp->handle = new_ref->desc;
2691 ++ fp->cookie = NULL;
2692 + binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
2693 + trace_binder_transaction_ref_to_ref(t, ref,
2694 + new_ref);
2695 +@@ -1621,6 +1635,7 @@ static void binder_transaction(struct binder_proc *proc,
2696 + binder_debug(BINDER_DEBUG_TRANSACTION,
2697 + " fd %d -> %d\n", fp->handle, target_fd);
2698 + /* TODO: fput? */
2699 ++ fp->binder = NULL;
2700 + fp->handle = target_fd;
2701 + } break;
2702 +
2703 +@@ -1739,7 +1754,9 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
2704 + ref->desc);
2705 + }
2706 + } else
2707 +- ref = binder_get_ref(proc, target);
2708 ++ ref = binder_get_ref(proc, target,
2709 ++ cmd == BC_ACQUIRE ||
2710 ++ cmd == BC_RELEASE);
2711 + if (ref == NULL) {
2712 + binder_user_error("%d:%d refcount change on invalid ref %d\n",
2713 + proc->pid, thread->pid, target);
2714 +@@ -1934,7 +1951,7 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
2715 + if (get_user(cookie, (void __user * __user *)ptr))
2716 + return -EFAULT;
2717 + ptr += sizeof(void *);
2718 +- ref = binder_get_ref(proc, target);
2719 ++ ref = binder_get_ref(proc, target, false);
2720 + if (ref == NULL) {
2721 + binder_user_error("%d:%d %s invalid ref %d\n",
2722 + proc->pid, thread->pid,
2723 +diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
2724 +index bc23d66a7a1e..1ff17352abde 100644
2725 +--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
2726 ++++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
2727 +@@ -646,6 +646,7 @@ static void ad5933_work(struct work_struct *work)
2728 + struct iio_dev *indio_dev = i2c_get_clientdata(st->client);
2729 + signed short buf[2];
2730 + unsigned char status;
2731 ++ int ret;
2732 +
2733 + mutex_lock(&indio_dev->mlock);
2734 + if (st->state == AD5933_CTRL_INIT_START_FREQ) {
2735 +@@ -653,19 +654,22 @@ static void ad5933_work(struct work_struct *work)
2736 + ad5933_cmd(st, AD5933_CTRL_START_SWEEP);
2737 + st->state = AD5933_CTRL_START_SWEEP;
2738 + schedule_delayed_work(&st->work, st->poll_time_jiffies);
2739 +- mutex_unlock(&indio_dev->mlock);
2740 +- return;
2741 ++ goto out;
2742 + }
2743 +
2744 +- ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status);
2745 ++ ret = ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status);
2746 ++ if (ret)
2747 ++ goto out;
2748 +
2749 + if (status & AD5933_STAT_DATA_VALID) {
2750 + int scan_count = bitmap_weight(indio_dev->active_scan_mask,
2751 + indio_dev->masklength);
2752 +- ad5933_i2c_read(st->client,
2753 ++ ret = ad5933_i2c_read(st->client,
2754 + test_bit(1, indio_dev->active_scan_mask) ?
2755 + AD5933_REG_REAL_DATA : AD5933_REG_IMAG_DATA,
2756 + scan_count * 2, (u8 *)buf);
2757 ++ if (ret)
2758 ++ goto out;
2759 +
2760 + if (scan_count == 2) {
2761 + buf[0] = be16_to_cpu(buf[0]);
2762 +@@ -677,8 +681,7 @@ static void ad5933_work(struct work_struct *work)
2763 + } else {
2764 + /* no data available - try again later */
2765 + schedule_delayed_work(&st->work, st->poll_time_jiffies);
2766 +- mutex_unlock(&indio_dev->mlock);
2767 +- return;
2768 ++ goto out;
2769 + }
2770 +
2771 + if (status & AD5933_STAT_SWEEP_DONE) {
2772 +@@ -690,7 +693,7 @@ static void ad5933_work(struct work_struct *work)
2773 + ad5933_cmd(st, AD5933_CTRL_INC_FREQ);
2774 + schedule_delayed_work(&st->work, st->poll_time_jiffies);
2775 + }
2776 +-
2777 ++out:
2778 + mutex_unlock(&indio_dev->mlock);
2779 + }
2780 +
2781 +diff --git a/drivers/staging/nvec/nvec_ps2.c b/drivers/staging/nvec/nvec_ps2.c
2782 +index 06dbb02085a9..90e7d841825b 100644
2783 +--- a/drivers/staging/nvec/nvec_ps2.c
2784 ++++ b/drivers/staging/nvec/nvec_ps2.c
2785 +@@ -104,13 +104,12 @@ static int nvec_mouse_probe(struct platform_device *pdev)
2786 + {
2787 + struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
2788 + struct serio *ser_dev;
2789 +- char mouse_reset[] = { NVEC_PS2, SEND_COMMAND, PSMOUSE_RST, 3 };
2790 +
2791 + ser_dev = kzalloc(sizeof(struct serio), GFP_KERNEL);
2792 + if (ser_dev == NULL)
2793 + return -ENOMEM;
2794 +
2795 +- ser_dev->id.type = SERIO_PS_PSTHRU;
2796 ++ ser_dev->id.type = SERIO_8042;
2797 + ser_dev->write = ps2_sendcommand;
2798 + ser_dev->start = ps2_startstreaming;
2799 + ser_dev->stop = ps2_stopstreaming;
2800 +@@ -125,9 +124,6 @@ static int nvec_mouse_probe(struct platform_device *pdev)
2801 +
2802 + serio_register_port(ser_dev);
2803 +
2804 +- /* mouse reset */
2805 +- nvec_write_async(nvec, mouse_reset, sizeof(mouse_reset));
2806 +-
2807 + return 0;
2808 + }
2809 +
2810 +diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
2811 +index 6458e11e8e9d..b6877aa58b0f 100644
2812 +--- a/drivers/tty/tty_ldisc.c
2813 ++++ b/drivers/tty/tty_ldisc.c
2814 +@@ -415,6 +415,10 @@ EXPORT_SYMBOL_GPL(tty_ldisc_flush);
2815 + * they are not on hot paths so a little discipline won't do
2816 + * any harm.
2817 + *
2818 ++ * The line discipline-related tty_struct fields are reset to
2819 ++ * prevent the ldisc driver from re-using stale information for
2820 ++ * the new ldisc instance.
2821 ++ *
2822 + * Locking: takes termios_rwsem
2823 + */
2824 +
2825 +@@ -423,6 +427,9 @@ static void tty_set_termios_ldisc(struct tty_struct *tty, int num)
2826 + down_write(&tty->termios_rwsem);
2827 + tty->termios.c_line = num;
2828 + up_write(&tty->termios_rwsem);
2829 ++
2830 ++ tty->disc_data = NULL;
2831 ++ tty->receive_room = 0;
2832 + }
2833 +
2834 + /**
2835 +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
2836 +index 19aba5091408..75c059c56a23 100644
2837 +--- a/drivers/tty/vt/vt.c
2838 ++++ b/drivers/tty/vt/vt.c
2839 +@@ -863,10 +863,15 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
2840 + if (new_cols == vc->vc_cols && new_rows == vc->vc_rows)
2841 + return 0;
2842 +
2843 ++ if (new_screen_size > (4 << 20))
2844 ++ return -EINVAL;
2845 + newscreen = kmalloc(new_screen_size, GFP_USER);
2846 + if (!newscreen)
2847 + return -ENOMEM;
2848 +
2849 ++ if (vc == sel_cons)
2850 ++ clear_selection();
2851 ++
2852 + old_rows = vc->vc_rows;
2853 + old_row_size = vc->vc_size_row;
2854 +
2855 +@@ -1164,7 +1169,7 @@ static void csi_J(struct vc_data *vc, int vpar)
2856 + break;
2857 + case 3: /* erase scroll-back buffer (and whole display) */
2858 + scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char,
2859 +- vc->vc_screenbuf_size >> 1);
2860 ++ vc->vc_screenbuf_size);
2861 + set_origin(vc);
2862 + if (CON_IS_VISIBLE(vc))
2863 + update_screen(vc);
2864 +diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
2865 +index 2aae0d61bb19..0a974d448a56 100644
2866 +--- a/drivers/usb/gadget/u_ether.c
2867 ++++ b/drivers/usb/gadget/u_ether.c
2868 +@@ -583,13 +583,6 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
2869 +
2870 + req->length = length;
2871 +
2872 +- /* throttle high/super speed IRQ rate back slightly */
2873 +- if (gadget_is_dualspeed(dev->gadget))
2874 +- req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
2875 +- dev->gadget->speed == USB_SPEED_SUPER)
2876 +- ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0)
2877 +- : 0;
2878 +-
2879 + retval = usb_ep_queue(in, req, GFP_ATOMIC);
2880 + switch (retval) {
2881 + default:
2882 +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
2883 +index aedc7e479a23..1ee8c97ae6be 100644
2884 +--- a/drivers/usb/host/xhci-pci.c
2885 ++++ b/drivers/usb/host/xhci-pci.c
2886 +@@ -37,6 +37,7 @@
2887 +
2888 + #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
2889 + #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
2890 ++#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI 0x9cb1
2891 + #define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5
2892 + #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
2893 + #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
2894 +@@ -129,7 +130,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
2895 + xhci->quirks |= XHCI_SPURIOUS_REBOOT;
2896 + }
2897 + if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
2898 +- pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
2899 ++ (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI ||
2900 ++ pdev->device == PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI)) {
2901 + xhci->quirks |= XHCI_SPURIOUS_REBOOT;
2902 + xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
2903 + }
2904 +diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
2905 +index cce32e91fd9e..83bee312df8d 100644
2906 +--- a/drivers/usb/musb/musb_cppi41.c
2907 ++++ b/drivers/usb/musb/musb_cppi41.c
2908 +@@ -234,6 +234,7 @@ static void cppi41_dma_callback(void *private_data)
2909 + cppi41_trans_done(cppi41_channel);
2910 + } else {
2911 + struct cppi41_dma_controller *controller;
2912 ++ int is_hs = 0;
2913 + /*
2914 + * On AM335x it has been observed that the TX interrupt fires
2915 + * too early that means the TXFIFO is not yet empty but the DMA
2916 +@@ -246,7 +247,14 @@ static void cppi41_dma_callback(void *private_data)
2917 + */
2918 + controller = cppi41_channel->controller;
2919 +
2920 +- if (musb->g.speed == USB_SPEED_HIGH) {
2921 ++ if (is_host_active(musb)) {
2922 ++ if (musb->port1_status & USB_PORT_STAT_HIGH_SPEED)
2923 ++ is_hs = 1;
2924 ++ } else {
2925 ++ if (musb->g.speed == USB_SPEED_HIGH)
2926 ++ is_hs = 1;
2927 ++ }
2928 ++ if (is_hs) {
2929 + unsigned wait = 25;
2930 +
2931 + do {
2932 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
2933 +index f5e4fda7f902..188e50446514 100644
2934 +--- a/drivers/usb/serial/cp210x.c
2935 ++++ b/drivers/usb/serial/cp210x.c
2936 +@@ -919,7 +919,9 @@ static int cp210x_tiocmget(struct tty_struct *tty)
2937 + unsigned int control;
2938 + int result;
2939 +
2940 +- cp210x_get_config(port, CP210X_GET_MDMSTS, &control, 1);
2941 ++ result = cp210x_get_config(port, CP210X_GET_MDMSTS, &control, 1);
2942 ++ if (result)
2943 ++ return result;
2944 +
2945 + result = ((control & CONTROL_DTR) ? TIOCM_DTR : 0)
2946 + |((control & CONTROL_RTS) ? TIOCM_RTS : 0)
2947 +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
2948 +index e5545c5ced89..62ec56e379a0 100644
2949 +--- a/drivers/usb/serial/ftdi_sio.c
2950 ++++ b/drivers/usb/serial/ftdi_sio.c
2951 +@@ -1000,7 +1000,8 @@ static struct usb_device_id id_table_combined [] = {
2952 + /* ekey Devices */
2953 + { USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) },
2954 + /* Infineon Devices */
2955 +- { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
2956 ++ { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC1798_PID, 1) },
2957 ++ { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC2X7_PID, 1) },
2958 + /* GE Healthcare devices */
2959 + { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) },
2960 + /* Active Research (Actisense) devices */
2961 +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
2962 +index 48db84f25cc9..db1a9b3a5f38 100644
2963 +--- a/drivers/usb/serial/ftdi_sio_ids.h
2964 ++++ b/drivers/usb/serial/ftdi_sio_ids.h
2965 +@@ -626,8 +626,9 @@
2966 + /*
2967 + * Infineon Technologies
2968 + */
2969 +-#define INFINEON_VID 0x058b
2970 +-#define INFINEON_TRIBOARD_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
2971 ++#define INFINEON_VID 0x058b
2972 ++#define INFINEON_TRIBOARD_TC1798_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
2973 ++#define INFINEON_TRIBOARD_TC2X7_PID 0x0043 /* DAS JTAG TriBoard TC2X7 V1.0 */
2974 +
2975 + /*
2976 + * Acton Research Corp.
2977 +diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
2978 +index 137908af7c4c..4427705575c5 100644
2979 +--- a/drivers/usb/serial/usb-serial.c
2980 ++++ b/drivers/usb/serial/usb-serial.c
2981 +@@ -1061,7 +1061,8 @@ static int usb_serial_probe(struct usb_interface *interface,
2982 +
2983 + serial->disconnected = 0;
2984 +
2985 +- usb_serial_console_init(serial->port[0]->minor);
2986 ++ if (num_ports > 0)
2987 ++ usb_serial_console_init(serial->port[0]->minor);
2988 + exit:
2989 + module_put(type->driver.owner);
2990 + return 0;
2991 +diff --git a/drivers/uwb/lc-rc.c b/drivers/uwb/lc-rc.c
2992 +index 3eca6ceb9844..4be2a5d1a9d2 100644
2993 +--- a/drivers/uwb/lc-rc.c
2994 ++++ b/drivers/uwb/lc-rc.c
2995 +@@ -56,8 +56,11 @@ static struct uwb_rc *uwb_rc_find_by_index(int index)
2996 + struct uwb_rc *rc = NULL;
2997 +
2998 + dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match);
2999 +- if (dev)
3000 ++ if (dev) {
3001 + rc = dev_get_drvdata(dev);
3002 ++ put_device(dev);
3003 ++ }
3004 ++
3005 + return rc;
3006 + }
3007 +
3008 +@@ -368,7 +371,9 @@ struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *target_rc)
3009 + if (dev) {
3010 + rc = dev_get_drvdata(dev);
3011 + __uwb_rc_get(rc);
3012 ++ put_device(dev);
3013 + }
3014 ++
3015 + return rc;
3016 + }
3017 + EXPORT_SYMBOL_GPL(__uwb_rc_try_get);
3018 +@@ -421,8 +426,11 @@ struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *grandpa_dev)
3019 +
3020 + dev = class_find_device(&uwb_rc_class, NULL, grandpa_dev,
3021 + find_rc_grandpa);
3022 +- if (dev)
3023 ++ if (dev) {
3024 + rc = dev_get_drvdata(dev);
3025 ++ put_device(dev);
3026 ++ }
3027 ++
3028 + return rc;
3029 + }
3030 + EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa);
3031 +@@ -454,8 +462,10 @@ struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *addr)
3032 + struct uwb_rc *rc = NULL;
3033 +
3034 + dev = class_find_device(&uwb_rc_class, NULL, addr, find_rc_dev);
3035 +- if (dev)
3036 ++ if (dev) {
3037 + rc = dev_get_drvdata(dev);
3038 ++ put_device(dev);
3039 ++ }
3040 +
3041 + return rc;
3042 + }
3043 +diff --git a/drivers/uwb/pal.c b/drivers/uwb/pal.c
3044 +index c1304b8d4985..678e93741ae1 100644
3045 +--- a/drivers/uwb/pal.c
3046 ++++ b/drivers/uwb/pal.c
3047 +@@ -97,6 +97,8 @@ static bool uwb_rc_class_device_exists(struct uwb_rc *target_rc)
3048 +
3049 + dev = class_find_device(&uwb_rc_class, NULL, target_rc, find_rc);
3050 +
3051 ++ put_device(dev);
3052 ++
3053 + return (dev != NULL);
3054 + }
3055 +
3056 +diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
3057 +index ba3fac8318bb..47a4177b16d2 100644
3058 +--- a/drivers/xen/xen-pciback/conf_space.c
3059 ++++ b/drivers/xen/xen-pciback/conf_space.c
3060 +@@ -16,8 +16,8 @@
3061 + #include "conf_space.h"
3062 + #include "conf_space_quirks.h"
3063 +
3064 +-bool permissive;
3065 +-module_param(permissive, bool, 0644);
3066 ++bool xen_pcibk_permissive;
3067 ++module_param_named(permissive, xen_pcibk_permissive, bool, 0644);
3068 +
3069 + /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word,
3070 + * xen_pcibk_write_config_word, and xen_pcibk_write_config_byte are created. */
3071 +@@ -260,7 +260,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
3072 + * This means that some fields may still be read-only because
3073 + * they have entries in the config_field list that intercept
3074 + * the write and do nothing. */
3075 +- if (dev_data->permissive || permissive) {
3076 ++ if (dev_data->permissive || xen_pcibk_permissive) {
3077 + switch (size) {
3078 + case 1:
3079 + err = pci_write_config_byte(dev, offset,
3080 +diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
3081 +index 2e1d73d1d5d0..62461a8ba1d6 100644
3082 +--- a/drivers/xen/xen-pciback/conf_space.h
3083 ++++ b/drivers/xen/xen-pciback/conf_space.h
3084 +@@ -64,7 +64,7 @@ struct config_field_entry {
3085 + void *data;
3086 + };
3087 +
3088 +-extern bool permissive;
3089 ++extern bool xen_pcibk_permissive;
3090 +
3091 + #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset)
3092 +
3093 +diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
3094 +index 2d7369391472..f8baf463dd35 100644
3095 +--- a/drivers/xen/xen-pciback/conf_space_header.c
3096 ++++ b/drivers/xen/xen-pciback/conf_space_header.c
3097 +@@ -105,7 +105,7 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
3098 +
3099 + cmd->val = value;
3100 +
3101 +- if (!permissive && (!dev_data || !dev_data->permissive))
3102 ++ if (!xen_pcibk_permissive && (!dev_data || !dev_data->permissive))
3103 + return 0;
3104 +
3105 + /* Only allow the guest to control certain bits. */
3106 +diff --git a/fs/coredump.c b/fs/coredump.c
3107 +index 86753db01f2d..29950247a29a 100644
3108 +--- a/fs/coredump.c
3109 ++++ b/fs/coredump.c
3110 +@@ -1,6 +1,7 @@
3111 + #include <linux/slab.h>
3112 + #include <linux/file.h>
3113 + #include <linux/fdtable.h>
3114 ++#include <linux/freezer.h>
3115 + #include <linux/mm.h>
3116 + #include <linux/stat.h>
3117 + #include <linux/fcntl.h>
3118 +@@ -386,7 +387,9 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
3119 + if (core_waiters > 0) {
3120 + struct core_thread *ptr;
3121 +
3122 ++ freezer_do_not_count();
3123 + wait_for_completion(&core_state->startup);
3124 ++ freezer_count();
3125 + /*
3126 + * Wait for all the threads to become inactive, so that
3127 + * all the thread context (extended register state, like
3128 +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
3129 +index 66d2dc9ef561..7e80c4dd4735 100644
3130 +--- a/fs/ext4/ext4.h
3131 ++++ b/fs/ext4/ext4.h
3132 +@@ -233,6 +233,7 @@ struct ext4_io_submit {
3133 + #define EXT4_MAX_BLOCK_SIZE 65536
3134 + #define EXT4_MIN_BLOCK_LOG_SIZE 10
3135 + #define EXT4_MAX_BLOCK_LOG_SIZE 16
3136 ++#define EXT4_MAX_CLUSTER_LOG_SIZE 30
3137 + #ifdef __KERNEL__
3138 + # define EXT4_BLOCK_SIZE(s) ((s)->s_blocksize)
3139 + #else
3140 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
3141 +index 584d22c58329..483bc328643d 100644
3142 +--- a/fs/ext4/super.c
3143 ++++ b/fs/ext4/super.c
3144 +@@ -3612,7 +3612,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3145 + if (blocksize < EXT4_MIN_BLOCK_SIZE ||
3146 + blocksize > EXT4_MAX_BLOCK_SIZE) {
3147 + ext4_msg(sb, KERN_ERR,
3148 +- "Unsupported filesystem blocksize %d", blocksize);
3149 ++ "Unsupported filesystem blocksize %d (%d log_block_size)",
3150 ++ blocksize, le32_to_cpu(es->s_log_block_size));
3151 ++ goto failed_mount;
3152 ++ }
3153 ++ if (le32_to_cpu(es->s_log_block_size) >
3154 ++ (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
3155 ++ ext4_msg(sb, KERN_ERR,
3156 ++ "Invalid log block size: %u",
3157 ++ le32_to_cpu(es->s_log_block_size));
3158 + goto failed_mount;
3159 + }
3160 +
3161 +@@ -3727,6 +3735,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3162 + "block size (%d)", clustersize, blocksize);
3163 + goto failed_mount;
3164 + }
3165 ++ if (le32_to_cpu(es->s_log_cluster_size) >
3166 ++ (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
3167 ++ ext4_msg(sb, KERN_ERR,
3168 ++ "Invalid log cluster size: %u",
3169 ++ le32_to_cpu(es->s_log_cluster_size));
3170 ++ goto failed_mount;
3171 ++ }
3172 + sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
3173 + le32_to_cpu(es->s_log_block_size);
3174 + sbi->s_clusters_per_group =
3175 +diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
3176 +index 6b4947f75af7..a751d1aa0e6a 100644
3177 +--- a/fs/ubifs/dir.c
3178 ++++ b/fs/ubifs/dir.c
3179 +@@ -348,7 +348,7 @@ static unsigned int vfs_dent_type(uint8_t type)
3180 + */
3181 + static int ubifs_readdir(struct file *file, struct dir_context *ctx)
3182 + {
3183 +- int err;
3184 ++ int err = 0;
3185 + struct qstr nm;
3186 + union ubifs_key key;
3187 + struct ubifs_dent_node *dent;
3188 +@@ -447,16 +447,23 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
3189 + }
3190 +
3191 + out:
3192 +- if (err != -ENOENT) {
3193 +- ubifs_err("cannot find next direntry, error %d", err);
3194 +- return err;
3195 +- }
3196 +-
3197 + kfree(file->private_data);
3198 + file->private_data = NULL;
3199 ++
3200 ++ if (err != -ENOENT)
3201 ++ ubifs_err("cannot find next direntry, error %d", err);
3202 ++ else
3203 ++ /*
3204 ++ * -ENOENT is a non-fatal error in this context, the TNC uses
3205 ++ * it to indicate that the cursor moved past the current directory
3206 ++ * and readdir() has to stop.
3207 ++ */
3208 ++ err = 0;
3209 ++
3210 ++
3211 + /* 2 is a special value indicating that there are no more direntries */
3212 + ctx->pos = 2;
3213 +- return 0;
3214 ++ return err;
3215 + }
3216 +
3217 + /* Free saved readdir() state when the directory is closed */
3218 +diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
3219 +index 895db7a88412..65d600f0d200 100644
3220 +--- a/fs/xfs/xfs_dquot.c
3221 ++++ b/fs/xfs/xfs_dquot.c
3222 +@@ -312,8 +312,7 @@ xfs_dquot_buf_verify_crc(
3223 + if (mp->m_quotainfo)
3224 + ndquots = mp->m_quotainfo->qi_dqperchunk;
3225 + else
3226 +- ndquots = xfs_qm_calc_dquots_per_chunk(mp,
3227 +- XFS_BB_TO_FSB(mp, bp->b_length));
3228 ++ ndquots = xfs_qm_calc_dquots_per_chunk(mp, bp->b_length);
3229 +
3230 + for (i = 0; i < ndquots; i++, d++) {
3231 + if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
3232 +diff --git a/include/linux/filter.h b/include/linux/filter.h
3233 +index ff4e40cd45b1..264c1a440240 100644
3234 +--- a/include/linux/filter.h
3235 ++++ b/include/linux/filter.h
3236 +@@ -41,7 +41,11 @@ static inline unsigned int sk_filter_size(unsigned int proglen)
3237 + offsetof(struct sk_filter, insns[proglen]));
3238 + }
3239 +
3240 +-extern int sk_filter(struct sock *sk, struct sk_buff *skb);
3241 ++int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
3242 ++static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
3243 ++{
3244 ++ return sk_filter_trim_cap(sk, skb, 1);
3245 ++}
3246 + extern unsigned int sk_run_filter(const struct sk_buff *skb,
3247 + const struct sock_filter *filter);
3248 + extern int sk_unattached_filter_create(struct sk_filter **pfp,
3249 +diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
3250 +index 1eaf61dde2c3..6671b365ba60 100644
3251 +--- a/include/linux/hugetlb.h
3252 ++++ b/include/linux/hugetlb.h
3253 +@@ -395,15 +395,14 @@ static inline int hugepage_migration_support(struct hstate *h)
3254 + #endif
3255 + }
3256 +
3257 +-static inline bool hugepages_supported(void)
3258 +-{
3259 +- /*
3260 +- * Some platform decide whether they support huge pages at boot
3261 +- * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
3262 +- * there is no such support
3263 +- */
3264 +- return HPAGE_SHIFT != 0;
3265 +-}
3266 ++#ifndef hugepages_supported
3267 ++/*
3268 ++ * Some platform decide whether they support huge pages at boot
3269 ++ * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
3270 ++ * when there is no such support
3271 ++ */
3272 ++#define hugepages_supported() (HPAGE_SHIFT != 0)
3273 ++#endif
3274 +
3275 + #else /* CONFIG_HUGETLB_PAGE */
3276 + struct hstate {};
3277 +diff --git a/include/linux/mroute.h b/include/linux/mroute.h
3278 +index 79aaa9fc1a15..d5277fc3ce2e 100644
3279 +--- a/include/linux/mroute.h
3280 ++++ b/include/linux/mroute.h
3281 +@@ -103,5 +103,5 @@ struct mfc_cache {
3282 + struct rtmsg;
3283 + extern int ipmr_get_route(struct net *net, struct sk_buff *skb,
3284 + __be32 saddr, __be32 daddr,
3285 +- struct rtmsg *rtm, int nowait);
3286 ++ struct rtmsg *rtm, int nowait, u32 portid);
3287 + #endif
3288 +diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
3289 +index 66982e764051..f831155dc7d1 100644
3290 +--- a/include/linux/mroute6.h
3291 ++++ b/include/linux/mroute6.h
3292 +@@ -115,7 +115,7 @@ struct mfc6_cache {
3293 +
3294 + struct rtmsg;
3295 + extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
3296 +- struct rtmsg *rtm, int nowait);
3297 ++ struct rtmsg *rtm, int nowait, u32 portid);
3298 +
3299 + #ifdef CONFIG_IPV6_MROUTE
3300 + extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb);
3301 +diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
3302 +index c8ba627c1d60..45aa1c62dbfa 100644
3303 +--- a/include/linux/perf_event.h
3304 ++++ b/include/linux/perf_event.h
3305 +@@ -439,11 +439,6 @@ struct perf_event {
3306 + #endif /* CONFIG_PERF_EVENTS */
3307 + };
3308 +
3309 +-enum perf_event_context_type {
3310 +- task_context,
3311 +- cpu_context,
3312 +-};
3313 +-
3314 + /**
3315 + * struct perf_event_context - event context structure
3316 + *
3317 +@@ -451,7 +446,6 @@ enum perf_event_context_type {
3318 + */
3319 + struct perf_event_context {
3320 + struct pmu *pmu;
3321 +- enum perf_event_context_type type;
3322 + /*
3323 + * Protect the states of the events in the list,
3324 + * nr_active, and the list:
3325 +diff --git a/include/linux/pwm.h b/include/linux/pwm.h
3326 +index f0feafd184a0..08b0215128dc 100644
3327 +--- a/include/linux/pwm.h
3328 ++++ b/include/linux/pwm.h
3329 +@@ -295,6 +295,7 @@ static inline void pwm_add_table(struct pwm_lookup *table, size_t num)
3330 + #ifdef CONFIG_PWM_SYSFS
3331 + void pwmchip_sysfs_export(struct pwm_chip *chip);
3332 + void pwmchip_sysfs_unexport(struct pwm_chip *chip);
3333 ++void pwmchip_sysfs_unexport_children(struct pwm_chip *chip);
3334 + #else
3335 + static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
3336 + {
3337 +@@ -303,6 +304,10 @@ static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
3338 + static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip)
3339 + {
3340 + }
3341 ++
3342 ++static inline void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
3343 ++{
3344 ++}
3345 + #endif /* CONFIG_PWM_SYSFS */
3346 +
3347 + #endif /* __LINUX_PWM_H */
3348 +diff --git a/include/linux/stddef.h b/include/linux/stddef.h
3349 +index f4aec0e75c3a..9c61c7cda936 100644
3350 +--- a/include/linux/stddef.h
3351 ++++ b/include/linux/stddef.h
3352 +@@ -3,7 +3,6 @@
3353 +
3354 + #include <uapi/linux/stddef.h>
3355 +
3356 +-
3357 + #undef NULL
3358 + #define NULL ((void *)0)
3359 +
3360 +@@ -14,8 +13,18 @@ enum {
3361 +
3362 + #undef offsetof
3363 + #ifdef __compiler_offsetof
3364 +-#define offsetof(TYPE,MEMBER) __compiler_offsetof(TYPE,MEMBER)
3365 ++#define offsetof(TYPE, MEMBER) __compiler_offsetof(TYPE, MEMBER)
3366 + #else
3367 +-#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
3368 ++#define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER)
3369 + #endif
3370 ++
3371 ++/**
3372 ++ * offsetofend(TYPE, MEMBER)
3373 ++ *
3374 ++ * @TYPE: The type of the structure
3375 ++ * @MEMBER: The member within the structure to get the end offset of
3376 ++ */
3377 ++#define offsetofend(TYPE, MEMBER) \
3378 ++ (offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER))
3379 ++
3380 + #endif
3381 +diff --git a/include/linux/vfio.h b/include/linux/vfio.h
3382 +index 24579a0312a0..9131a4bf5c3e 100644
3383 +--- a/include/linux/vfio.h
3384 ++++ b/include/linux/vfio.h
3385 +@@ -76,20 +76,6 @@ extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops);
3386 + extern void vfio_unregister_iommu_driver(
3387 + const struct vfio_iommu_driver_ops *ops);
3388 +
3389 +-/**
3390 +- * offsetofend(TYPE, MEMBER)
3391 +- *
3392 +- * @TYPE: The type of the structure
3393 +- * @MEMBER: The member within the structure to get the end offset of
3394 +- *
3395 +- * Simple helper macro for dealing with variable sized structures passed
3396 +- * from user space. This allows us to easily determine if the provided
3397 +- * structure is sized to include various fields.
3398 +- */
3399 +-#define offsetofend(TYPE, MEMBER) ({ \
3400 +- TYPE tmp; \
3401 +- offsetof(TYPE, MEMBER) + sizeof(tmp.MEMBER); }) \
3402 +-
3403 + /*
3404 + * External user API
3405 + */
3406 +diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
3407 +index 6d1549c4893c..e6f0917d1ab5 100644
3408 +--- a/include/net/ip6_tunnel.h
3409 ++++ b/include/net/ip6_tunnel.h
3410 +@@ -75,6 +75,7 @@ static inline void ip6tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
3411 + struct net_device_stats *stats = &dev->stats;
3412 + int pkt_len, err;
3413 +
3414 ++ memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
3415 + pkt_len = skb->len;
3416 + err = ip6_local_out(skb);
3417 +
3418 +diff --git a/include/net/sock.h b/include/net/sock.h
3419 +index 6ed6df149bce..238e934dd3c3 100644
3420 +--- a/include/net/sock.h
3421 ++++ b/include/net/sock.h
3422 +@@ -1380,7 +1380,7 @@ static inline struct inode *SOCK_INODE(struct socket *socket)
3423 + * Functions for memory accounting
3424 + */
3425 + extern int __sk_mem_schedule(struct sock *sk, int size, int kind);
3426 +-extern void __sk_mem_reclaim(struct sock *sk);
3427 ++void __sk_mem_reclaim(struct sock *sk, int amount);
3428 +
3429 + #define SK_MEM_QUANTUM ((int)PAGE_SIZE)
3430 + #define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
3431 +@@ -1421,7 +1421,7 @@ static inline void sk_mem_reclaim(struct sock *sk)
3432 + if (!sk_has_account(sk))
3433 + return;
3434 + if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
3435 +- __sk_mem_reclaim(sk);
3436 ++ __sk_mem_reclaim(sk, sk->sk_forward_alloc);
3437 + }
3438 +
3439 + static inline void sk_mem_reclaim_partial(struct sock *sk)
3440 +@@ -1429,7 +1429,7 @@ static inline void sk_mem_reclaim_partial(struct sock *sk)
3441 + if (!sk_has_account(sk))
3442 + return;
3443 + if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
3444 +- __sk_mem_reclaim(sk);
3445 ++ __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1);
3446 + }
3447 +
3448 + static inline void sk_mem_charge(struct sock *sk, int size)
3449 +@@ -1444,6 +1444,16 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
3450 + if (!sk_has_account(sk))
3451 + return;
3452 + sk->sk_forward_alloc += size;
3453 ++
3454 ++ /* Avoid a possible overflow.
3455 ++ * TCP send queues can make this happen, if sk_mem_reclaim()
3456 ++ * is not called and more than 2 GBytes are released at once.
3457 ++ *
3458 ++ * If we reach 2 MBytes, reclaim 1 MBytes right now, there is
3459 ++ * no need to hold that much forward allocation anyway.
3460 ++ */
3461 ++ if (unlikely(sk->sk_forward_alloc >= 1 << 21))
3462 ++ __sk_mem_reclaim(sk, 1 << 20);
3463 + }
3464 +
3465 + static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
3466 +diff --git a/include/net/tcp.h b/include/net/tcp.h
3467 +index 035135b43820..83d03f86e914 100644
3468 +--- a/include/net/tcp.h
3469 ++++ b/include/net/tcp.h
3470 +@@ -1049,6 +1049,7 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp)
3471 + }
3472 +
3473 + extern bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
3474 ++int tcp_filter(struct sock *sk, struct sk_buff *skb);
3475 +
3476 + #undef STATE_TRACE
3477 +
3478 +diff --git a/kernel/events/core.c b/kernel/events/core.c
3479 +index 0b3c09a3f7b6..a4a1516f3efc 100644
3480 +--- a/kernel/events/core.c
3481 ++++ b/kernel/events/core.c
3482 +@@ -6503,7 +6503,6 @@ skip_type:
3483 + __perf_event_init_context(&cpuctx->ctx);
3484 + lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
3485 + lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
3486 +- cpuctx->ctx.type = cpu_context;
3487 + cpuctx->ctx.pmu = pmu;
3488 +
3489 + __perf_cpu_hrtimer_init(cpuctx, cpu);
3490 +@@ -7136,7 +7135,19 @@ SYSCALL_DEFINE5(perf_event_open,
3491 + * task or CPU context:
3492 + */
3493 + if (move_group) {
3494 +- if (group_leader->ctx->type != ctx->type)
3495 ++ /*
3496 ++ * Make sure we're both on the same task, or both
3497 ++ * per-cpu events.
3498 ++ */
3499 ++ if (group_leader->ctx->task != ctx->task)
3500 ++ goto err_context;
3501 ++
3502 ++ /*
3503 ++ * Make sure we're both events for the same CPU;
3504 ++ * grouping events for different CPUs is broken; since
3505 ++ * you can never concurrently schedule them anyhow.
3506 ++ */
3507 ++ if (group_leader->cpu != event->cpu)
3508 + goto err_context;
3509 + } else {
3510 + if (group_leader->ctx != ctx)
3511 +diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c
3512 +index 269b097e78ea..743615bfdcec 100644
3513 +--- a/kernel/power/suspend_test.c
3514 ++++ b/kernel/power/suspend_test.c
3515 +@@ -169,8 +169,10 @@ static int __init test_suspend(void)
3516 +
3517 + /* RTCs have initialized by now too ... can we use one? */
3518 + dev = class_find_device(rtc_class, NULL, NULL, has_wakealarm);
3519 +- if (dev)
3520 ++ if (dev) {
3521 + rtc = rtc_class_open(dev_name(dev));
3522 ++ put_device(dev);
3523 ++ }
3524 + if (!rtc) {
3525 + printk(warn_no_rtc);
3526 + goto done;
3527 +diff --git a/lib/genalloc.c b/lib/genalloc.c
3528 +index 26cf20be72b7..17271ef368ca 100644
3529 +--- a/lib/genalloc.c
3530 ++++ b/lib/genalloc.c
3531 +@@ -273,7 +273,7 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
3532 + struct gen_pool_chunk *chunk;
3533 + unsigned long addr = 0;
3534 + int order = pool->min_alloc_order;
3535 +- int nbits, start_bit = 0, end_bit, remain;
3536 ++ int nbits, start_bit, end_bit, remain;
3537 +
3538 + #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
3539 + BUG_ON(in_nmi());
3540 +@@ -288,6 +288,7 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
3541 + if (size > atomic_read(&chunk->avail))
3542 + continue;
3543 +
3544 ++ start_bit = 0;
3545 + end_bit = chunk_size(chunk) >> order;
3546 + retry:
3547 + start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,
3548 +diff --git a/mm/filemap.c b/mm/filemap.c
3549 +index af9e11ea4ecf..9fa5c3f40cd6 100644
3550 +--- a/mm/filemap.c
3551 ++++ b/mm/filemap.c
3552 +@@ -808,8 +808,8 @@ EXPORT_SYMBOL(page_cache_prev_hole);
3553 + * Looks up the page cache slot at @mapping & @offset. If there is a
3554 + * page cache page, it is returned with an increased refcount.
3555 + *
3556 +- * If the slot holds a shadow entry of a previously evicted page, it
3557 +- * is returned.
3558 ++ * If the slot holds a shadow entry of a previously evicted page, or a
3559 ++ * swap entry from shmem/tmpfs, it is returned.
3560 + *
3561 + * Otherwise, %NULL is returned.
3562 + */
3563 +@@ -830,9 +830,9 @@ repeat:
3564 + if (radix_tree_deref_retry(page))
3565 + goto repeat;
3566 + /*
3567 +- * Otherwise, shmem/tmpfs must be storing a swap entry
3568 +- * here as an exceptional entry: so return it without
3569 +- * attempting to raise page count.
3570 ++ * A shadow entry of a recently evicted page,
3571 ++ * or a swap entry from shmem/tmpfs. Return
3572 ++ * it without attempting to raise page count.
3573 + */
3574 + goto out;
3575 + }
3576 +@@ -865,8 +865,8 @@ EXPORT_SYMBOL(find_get_entry);
3577 + * page cache page, it is returned locked and with an increased
3578 + * refcount.
3579 + *
3580 +- * If the slot holds a shadow entry of a previously evicted page, it
3581 +- * is returned.
3582 ++ * If the slot holds a shadow entry of a previously evicted page, or a
3583 ++ * swap entry from shmem/tmpfs, it is returned.
3584 + *
3585 + * Otherwise, %NULL is returned.
3586 + *
3587 +@@ -999,8 +999,8 @@ EXPORT_SYMBOL(pagecache_get_page);
3588 + * with ascending indexes. There may be holes in the indices due to
3589 + * not-present pages.
3590 + *
3591 +- * Any shadow entries of evicted pages are included in the returned
3592 +- * array.
3593 ++ * Any shadow entries of evicted pages, or swap entries from
3594 ++ * shmem/tmpfs, are included in the returned array.
3595 + *
3596 + * find_get_entries() returns the number of pages and shadow entries
3597 + * which were found.
3598 +@@ -1028,9 +1028,9 @@ repeat:
3599 + if (radix_tree_deref_retry(page))
3600 + goto restart;
3601 + /*
3602 +- * Otherwise, we must be storing a swap entry
3603 +- * here as an exceptional entry: so return it
3604 +- * without attempting to raise page count.
3605 ++ * A shadow entry of a recently evicted page,
3606 ++ * or a swap entry from shmem/tmpfs. Return
3607 ++ * it without attempting to raise page count.
3608 + */
3609 + goto export;
3610 + }
3611 +@@ -1098,9 +1098,9 @@ repeat:
3612 + goto restart;
3613 + }
3614 + /*
3615 +- * Otherwise, shmem/tmpfs must be storing a swap entry
3616 +- * here as an exceptional entry: so skip over it -
3617 +- * we only reach this from invalidate_mapping_pages().
3618 ++ * A shadow entry of a recently evicted page,
3619 ++ * or a swap entry from shmem/tmpfs. Skip
3620 ++ * over it.
3621 + */
3622 + continue;
3623 + }
3624 +@@ -1165,9 +1165,9 @@ repeat:
3625 + goto restart;
3626 + }
3627 + /*
3628 +- * Otherwise, shmem/tmpfs must be storing a swap entry
3629 +- * here as an exceptional entry: so stop looking for
3630 +- * contiguous pages.
3631 ++ * A shadow entry of a recently evicted page,
3632 ++ * or a swap entry from shmem/tmpfs. Stop
3633 ++ * looking for contiguous pages.
3634 + */
3635 + break;
3636 + }
3637 +@@ -1241,10 +1241,17 @@ repeat:
3638 + goto restart;
3639 + }
3640 + /*
3641 +- * This function is never used on a shmem/tmpfs
3642 +- * mapping, so a swap entry won't be found here.
3643 ++ * A shadow entry of a recently evicted page.
3644 ++ *
3645 ++ * Those entries should never be tagged, but
3646 ++ * this tree walk is lockless and the tags are
3647 ++ * looked up in bulk, one radix tree node at a
3648 ++ * time, so there is a sizable window for page
3649 ++ * reclaim to evict a page we saw tagged.
3650 ++ *
3651 ++ * Skip over it.
3652 + */
3653 +- BUG();
3654 ++ continue;
3655 + }
3656 +
3657 + if (!page_cache_get_speculative(page))
3658 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
3659 +index 4a1559d8739f..0154a004667c 100644
3660 +--- a/mm/memcontrol.c
3661 ++++ b/mm/memcontrol.c
3662 +@@ -6599,16 +6599,20 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
3663 + pgoff = pte_to_pgoff(ptent);
3664 +
3665 + /* page is moved even if it's not RSS of this task(page-faulted). */
3666 +- page = find_get_page(mapping, pgoff);
3667 +-
3668 + #ifdef CONFIG_SWAP
3669 + /* shmem/tmpfs may report page out on swap: account for that too. */
3670 +- if (radix_tree_exceptional_entry(page)) {
3671 +- swp_entry_t swap = radix_to_swp_entry(page);
3672 +- if (do_swap_account)
3673 +- *entry = swap;
3674 +- page = find_get_page(swap_address_space(swap), swap.val);
3675 +- }
3676 ++ if (shmem_mapping(mapping)) {
3677 ++ page = find_get_entry(mapping, pgoff);
3678 ++ if (radix_tree_exceptional_entry(page)) {
3679 ++ swp_entry_t swp = radix_to_swp_entry(page);
3680 ++ if (do_swap_account)
3681 ++ *entry = swp;
3682 ++ page = find_get_page(swap_address_space(swp), swp.val);
3683 ++ }
3684 ++ } else
3685 ++ page = find_get_page(mapping, pgoff);
3686 ++#else
3687 ++ page = find_get_page(mapping, pgoff);
3688 + #endif
3689 + return page;
3690 + }
3691 +diff --git a/mm/memory.c b/mm/memory.c
3692 +index a0c9c6cb59d1..f5744269a454 100644
3693 +--- a/mm/memory.c
3694 ++++ b/mm/memory.c
3695 +@@ -116,6 +116,8 @@ __setup("norandmaps", disable_randmaps);
3696 + unsigned long zero_pfn __read_mostly;
3697 + unsigned long highest_memmap_pfn __read_mostly;
3698 +
3699 ++EXPORT_SYMBOL(zero_pfn);
3700 ++
3701 + /*
3702 + * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
3703 + */
3704 +diff --git a/mm/swapfile.c b/mm/swapfile.c
3705 +index 660b9c0e2e40..32fed0949adf 100644
3706 +--- a/mm/swapfile.c
3707 ++++ b/mm/swapfile.c
3708 +@@ -2207,6 +2207,8 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
3709 + swab32s(&swap_header->info.version);
3710 + swab32s(&swap_header->info.last_page);
3711 + swab32s(&swap_header->info.nr_badpages);
3712 ++ if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
3713 ++ return 0;
3714 + for (i = 0; i < swap_header->info.nr_badpages; i++)
3715 + swab32s(&swap_header->info.badpages[i]);
3716 + }
3717 +diff --git a/mm/truncate.c b/mm/truncate.c
3718 +index 827ad8d2b5cd..6dde010a6676 100644
3719 +--- a/mm/truncate.c
3720 ++++ b/mm/truncate.c
3721 +@@ -415,14 +415,6 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
3722 + unsigned long count = 0;
3723 + int i;
3724 +
3725 +- /*
3726 +- * Note: this function may get called on a shmem/tmpfs mapping:
3727 +- * pagevec_lookup() might then return 0 prematurely (because it
3728 +- * got a gangful of swap entries); but it's hardly worth worrying
3729 +- * about - it can rarely have anything to free from such a mapping
3730 +- * (most pages are dirty), and already skips over any difficulties.
3731 +- */
3732 +-
3733 + pagevec_init(&pvec, 0);
3734 + while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
3735 + min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
3736 +diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
3737 +index 91fed8147c39..edb0eee5caf7 100644
3738 +--- a/net/bridge/br_multicast.c
3739 ++++ b/net/bridge/br_multicast.c
3740 +@@ -911,20 +911,25 @@ static void br_multicast_enable(struct bridge_mcast_query *query)
3741 + mod_timer(&query->timer, jiffies);
3742 + }
3743 +
3744 +-void br_multicast_enable_port(struct net_bridge_port *port)
3745 ++static void __br_multicast_enable_port(struct net_bridge_port *port)
3746 + {
3747 + struct net_bridge *br = port->br;
3748 +
3749 +- spin_lock(&br->multicast_lock);
3750 + if (br->multicast_disabled || !netif_running(br->dev))
3751 +- goto out;
3752 ++ return;
3753 +
3754 + br_multicast_enable(&port->ip4_query);
3755 + #if IS_ENABLED(CONFIG_IPV6)
3756 + br_multicast_enable(&port->ip6_query);
3757 + #endif
3758 ++}
3759 +
3760 +-out:
3761 ++void br_multicast_enable_port(struct net_bridge_port *port)
3762 ++{
3763 ++ struct net_bridge *br = port->br;
3764 ++
3765 ++ spin_lock(&br->multicast_lock);
3766 ++ __br_multicast_enable_port(port);
3767 + spin_unlock(&br->multicast_lock);
3768 + }
3769 +
3770 +@@ -1954,8 +1959,9 @@ static void br_multicast_start_querier(struct net_bridge *br,
3771 +
3772 + int br_multicast_toggle(struct net_bridge *br, unsigned long val)
3773 + {
3774 +- int err = 0;
3775 + struct net_bridge_mdb_htable *mdb;
3776 ++ struct net_bridge_port *port;
3777 ++ int err = 0;
3778 +
3779 + spin_lock_bh(&br->multicast_lock);
3780 + if (br->multicast_disabled == !val)
3781 +@@ -1983,10 +1989,9 @@ rollback:
3782 + goto rollback;
3783 + }
3784 +
3785 +- br_multicast_start_querier(br, &br->ip4_query);
3786 +-#if IS_ENABLED(CONFIG_IPV6)
3787 +- br_multicast_start_querier(br, &br->ip6_query);
3788 +-#endif
3789 ++ br_multicast_open(br);
3790 ++ list_for_each_entry(port, &br->port_list, list)
3791 ++ __br_multicast_enable_port(port);
3792 +
3793 + unlock:
3794 + spin_unlock_bh(&br->multicast_lock);
3795 +diff --git a/net/can/bcm.c b/net/can/bcm.c
3796 +index b57452a65fb9..392a687d3ca6 100644
3797 +--- a/net/can/bcm.c
3798 ++++ b/net/can/bcm.c
3799 +@@ -1500,24 +1500,31 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
3800 + struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
3801 + struct sock *sk = sock->sk;
3802 + struct bcm_sock *bo = bcm_sk(sk);
3803 ++ int ret = 0;
3804 +
3805 + if (len < sizeof(*addr))
3806 + return -EINVAL;
3807 +
3808 +- if (bo->bound)
3809 +- return -EISCONN;
3810 ++ lock_sock(sk);
3811 ++
3812 ++ if (bo->bound) {
3813 ++ ret = -EISCONN;
3814 ++ goto fail;
3815 ++ }
3816 +
3817 + /* bind a device to this socket */
3818 + if (addr->can_ifindex) {
3819 + struct net_device *dev;
3820 +
3821 + dev = dev_get_by_index(&init_net, addr->can_ifindex);
3822 +- if (!dev)
3823 +- return -ENODEV;
3824 +-
3825 ++ if (!dev) {
3826 ++ ret = -ENODEV;
3827 ++ goto fail;
3828 ++ }
3829 + if (dev->type != ARPHRD_CAN) {
3830 + dev_put(dev);
3831 +- return -ENODEV;
3832 ++ ret = -ENODEV;
3833 ++ goto fail;
3834 + }
3835 +
3836 + bo->ifindex = dev->ifindex;
3837 +@@ -1528,17 +1535,24 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
3838 + bo->ifindex = 0;
3839 + }
3840 +
3841 +- bo->bound = 1;
3842 +-
3843 + if (proc_dir) {
3844 + /* unique socket address as filename */
3845 + sprintf(bo->procname, "%lu", sock_i_ino(sk));
3846 + bo->bcm_proc_read = proc_create_data(bo->procname, 0644,
3847 + proc_dir,
3848 + &bcm_proc_fops, sk);
3849 ++ if (!bo->bcm_proc_read) {
3850 ++ ret = -ENOMEM;
3851 ++ goto fail;
3852 ++ }
3853 + }
3854 +
3855 +- return 0;
3856 ++ bo->bound = 1;
3857 ++
3858 ++fail:
3859 ++ release_sock(sk);
3860 ++
3861 ++ return ret;
3862 + }
3863 +
3864 + static int bcm_recvmsg(struct kiocb *iocb, struct socket *sock,
3865 +diff --git a/net/core/dev.c b/net/core/dev.c
3866 +index d30c12263f38..fa6d9a47f71f 100644
3867 +--- a/net/core/dev.c
3868 ++++ b/net/core/dev.c
3869 +@@ -2263,7 +2263,7 @@ int skb_checksum_help(struct sk_buff *skb)
3870 + goto out;
3871 + }
3872 +
3873 +- *(__sum16 *)(skb->data + offset) = csum_fold(csum);
3874 ++ *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
3875 + out_set_summed:
3876 + skb->ip_summed = CHECKSUM_NONE;
3877 + out:
3878 +@@ -4546,6 +4546,7 @@ EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
3879 +
3880 + static int __netdev_adjacent_dev_insert(struct net_device *dev,
3881 + struct net_device *adj_dev,
3882 ++ u16 ref_nr,
3883 + bool neighbour, bool master,
3884 + bool upper)
3885 + {
3886 +@@ -4555,7 +4556,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
3887 +
3888 + if (adj) {
3889 + BUG_ON(neighbour);
3890 +- adj->ref_nr++;
3891 ++ adj->ref_nr += ref_nr;
3892 + return 0;
3893 + }
3894 +
3895 +@@ -4566,7 +4567,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
3896 + adj->dev = adj_dev;
3897 + adj->master = master;
3898 + adj->neighbour = neighbour;
3899 +- adj->ref_nr = 1;
3900 ++ adj->ref_nr = ref_nr;
3901 +
3902 + dev_hold(adj_dev);
3903 + pr_debug("dev_hold for %s, because of %s link added from %s to %s\n",
3904 +@@ -4589,22 +4590,25 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
3905 +
3906 + static inline int __netdev_upper_dev_insert(struct net_device *dev,
3907 + struct net_device *udev,
3908 ++ u16 ref_nr,
3909 + bool master, bool neighbour)
3910 + {
3911 +- return __netdev_adjacent_dev_insert(dev, udev, neighbour, master,
3912 +- true);
3913 ++ return __netdev_adjacent_dev_insert(dev, udev, ref_nr, neighbour,
3914 ++ master, true);
3915 + }
3916 +
3917 + static inline int __netdev_lower_dev_insert(struct net_device *dev,
3918 + struct net_device *ldev,
3919 ++ u16 ref_nr,
3920 + bool neighbour)
3921 + {
3922 +- return __netdev_adjacent_dev_insert(dev, ldev, neighbour, false,
3923 ++ return __netdev_adjacent_dev_insert(dev, ldev, ref_nr, neighbour, false,
3924 + false);
3925 + }
3926 +
3927 + void __netdev_adjacent_dev_remove(struct net_device *dev,
3928 +- struct net_device *adj_dev, bool upper)
3929 ++ struct net_device *adj_dev, u16 ref_nr,
3930 ++ bool upper)
3931 + {
3932 + struct netdev_adjacent *adj;
3933 +
3934 +@@ -4616,8 +4620,8 @@ void __netdev_adjacent_dev_remove(struct net_device *dev,
3935 + if (!adj)
3936 + BUG();
3937 +
3938 +- if (adj->ref_nr > 1) {
3939 +- adj->ref_nr--;
3940 ++ if (adj->ref_nr > ref_nr) {
3941 ++ adj->ref_nr -= ref_nr;
3942 + return;
3943 + }
3944 +
3945 +@@ -4630,30 +4634,33 @@ void __netdev_adjacent_dev_remove(struct net_device *dev,
3946 + }
3947 +
3948 + static inline void __netdev_upper_dev_remove(struct net_device *dev,
3949 +- struct net_device *udev)
3950 ++ struct net_device *udev,
3951 ++ u16 ref_nr)
3952 + {
3953 +- return __netdev_adjacent_dev_remove(dev, udev, true);
3954 ++ return __netdev_adjacent_dev_remove(dev, udev, ref_nr, true);
3955 + }
3956 +
3957 + static inline void __netdev_lower_dev_remove(struct net_device *dev,
3958 +- struct net_device *ldev)
3959 ++ struct net_device *ldev,
3960 ++ u16 ref_nr)
3961 + {
3962 +- return __netdev_adjacent_dev_remove(dev, ldev, false);
3963 ++ return __netdev_adjacent_dev_remove(dev, ldev, ref_nr, false);
3964 + }
3965 +
3966 + int __netdev_adjacent_dev_insert_link(struct net_device *dev,
3967 + struct net_device *upper_dev,
3968 +- bool master, bool neighbour)
3969 ++ u16 ref_nr, bool master, bool neighbour)
3970 + {
3971 + int ret;
3972 +
3973 +- ret = __netdev_upper_dev_insert(dev, upper_dev, master, neighbour);
3974 ++ ret = __netdev_upper_dev_insert(dev, upper_dev, ref_nr, master,
3975 ++ neighbour);
3976 + if (ret)
3977 + return ret;
3978 +
3979 +- ret = __netdev_lower_dev_insert(upper_dev, dev, neighbour);
3980 ++ ret = __netdev_lower_dev_insert(upper_dev, dev, ref_nr, neighbour);
3981 + if (ret) {
3982 +- __netdev_upper_dev_remove(dev, upper_dev);
3983 ++ __netdev_upper_dev_remove(dev, upper_dev, ref_nr);
3984 + return ret;
3985 + }
3986 +
3987 +@@ -4661,23 +4668,25 @@ int __netdev_adjacent_dev_insert_link(struct net_device *dev,
3988 + }
3989 +
3990 + static inline int __netdev_adjacent_dev_link(struct net_device *dev,
3991 +- struct net_device *udev)
3992 ++ struct net_device *udev,
3993 ++ u16 ref_nr)
3994 + {
3995 +- return __netdev_adjacent_dev_insert_link(dev, udev, false, false);
3996 ++ return __netdev_adjacent_dev_insert_link(dev, udev, ref_nr, false,
3997 ++ false);
3998 + }
3999 +
4000 + static inline int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
4001 + struct net_device *udev,
4002 + bool master)
4003 + {
4004 +- return __netdev_adjacent_dev_insert_link(dev, udev, master, true);
4005 ++ return __netdev_adjacent_dev_insert_link(dev, udev, 1, master, true);
4006 + }
4007 +
4008 + void __netdev_adjacent_dev_unlink(struct net_device *dev,
4009 +- struct net_device *upper_dev)
4010 ++ struct net_device *upper_dev, u16 ref_nr)
4011 + {
4012 +- __netdev_upper_dev_remove(dev, upper_dev);
4013 +- __netdev_lower_dev_remove(upper_dev, dev);
4014 ++ __netdev_upper_dev_remove(dev, upper_dev, ref_nr);
4015 ++ __netdev_lower_dev_remove(upper_dev, dev, ref_nr);
4016 + }
4017 +
4018 +
4019 +@@ -4713,7 +4722,8 @@ static int __netdev_upper_dev_link(struct net_device *dev,
4020 + */
4021 + list_for_each_entry(i, &dev->lower_dev_list, list) {
4022 + list_for_each_entry(j, &upper_dev->upper_dev_list, list) {
4023 +- ret = __netdev_adjacent_dev_link(i->dev, j->dev);
4024 ++ ret = __netdev_adjacent_dev_link(i->dev, j->dev,
4025 ++ i->ref_nr);
4026 + if (ret)
4027 + goto rollback_mesh;
4028 + }
4029 +@@ -4721,14 +4731,14 @@ static int __netdev_upper_dev_link(struct net_device *dev,
4030 +
4031 + /* add dev to every upper_dev's upper device */
4032 + list_for_each_entry(i, &upper_dev->upper_dev_list, list) {
4033 +- ret = __netdev_adjacent_dev_link(dev, i->dev);
4034 ++ ret = __netdev_adjacent_dev_link(dev, i->dev, i->ref_nr);
4035 + if (ret)
4036 + goto rollback_upper_mesh;
4037 + }
4038 +
4039 + /* add upper_dev to every dev's lower device */
4040 + list_for_each_entry(i, &dev->lower_dev_list, list) {
4041 +- ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
4042 ++ ret = __netdev_adjacent_dev_link(i->dev, upper_dev, i->ref_nr);
4043 + if (ret)
4044 + goto rollback_lower_mesh;
4045 + }
4046 +@@ -4741,7 +4751,7 @@ rollback_lower_mesh:
4047 + list_for_each_entry(i, &dev->lower_dev_list, list) {
4048 + if (i == to_i)
4049 + break;
4050 +- __netdev_adjacent_dev_unlink(i->dev, upper_dev);
4051 ++ __netdev_adjacent_dev_unlink(i->dev, upper_dev, i->ref_nr);
4052 + }
4053 +
4054 + i = NULL;
4055 +@@ -4751,7 +4761,7 @@ rollback_upper_mesh:
4056 + list_for_each_entry(i, &upper_dev->upper_dev_list, list) {
4057 + if (i == to_i)
4058 + break;
4059 +- __netdev_adjacent_dev_unlink(dev, i->dev);
4060 ++ __netdev_adjacent_dev_unlink(dev, i->dev, i->ref_nr);
4061 + }
4062 +
4063 + i = j = NULL;
4064 +@@ -4763,13 +4773,13 @@ rollback_mesh:
4065 + list_for_each_entry(j, &upper_dev->upper_dev_list, list) {
4066 + if (i == to_i && j == to_j)
4067 + break;
4068 +- __netdev_adjacent_dev_unlink(i->dev, j->dev);
4069 ++ __netdev_adjacent_dev_unlink(i->dev, j->dev, i->ref_nr);
4070 + }
4071 + if (i == to_i)
4072 + break;
4073 + }
4074 +
4075 +- __netdev_adjacent_dev_unlink(dev, upper_dev);
4076 ++ __netdev_adjacent_dev_unlink(dev, upper_dev, 1);
4077 +
4078 + return ret;
4079 + }
4080 +@@ -4823,7 +4833,7 @@ void netdev_upper_dev_unlink(struct net_device *dev,
4081 + struct netdev_adjacent *i, *j;
4082 + ASSERT_RTNL();
4083 +
4084 +- __netdev_adjacent_dev_unlink(dev, upper_dev);
4085 ++ __netdev_adjacent_dev_unlink(dev, upper_dev, 1);
4086 +
4087 + /* Here is the tricky part. We must remove all dev's lower
4088 + * devices from all upper_dev's upper devices and vice
4089 +@@ -4831,16 +4841,16 @@ void netdev_upper_dev_unlink(struct net_device *dev,
4090 + */
4091 + list_for_each_entry(i, &dev->lower_dev_list, list)
4092 + list_for_each_entry(j, &upper_dev->upper_dev_list, list)
4093 +- __netdev_adjacent_dev_unlink(i->dev, j->dev);
4094 ++ __netdev_adjacent_dev_unlink(i->dev, j->dev, i->ref_nr);
4095 +
4096 + /* remove also the devices itself from lower/upper device
4097 + * list
4098 + */
4099 + list_for_each_entry(i, &dev->lower_dev_list, list)
4100 +- __netdev_adjacent_dev_unlink(i->dev, upper_dev);
4101 ++ __netdev_adjacent_dev_unlink(i->dev, upper_dev, i->ref_nr);
4102 +
4103 + list_for_each_entry(i, &upper_dev->upper_dev_list, list)
4104 +- __netdev_adjacent_dev_unlink(dev, i->dev);
4105 ++ __netdev_adjacent_dev_unlink(dev, i->dev, i->ref_nr);
4106 +
4107 + call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
4108 + }
4109 +diff --git a/net/core/filter.c b/net/core/filter.c
4110 +index ebce437678fc..5903efc408da 100644
4111 +--- a/net/core/filter.c
4112 ++++ b/net/core/filter.c
4113 +@@ -67,9 +67,10 @@ static inline void *load_pointer(const struct sk_buff *skb, int k,
4114 + }
4115 +
4116 + /**
4117 +- * sk_filter - run a packet through a socket filter
4118 ++ * sk_filter_trim_cap - run a packet through a socket filter
4119 + * @sk: sock associated with &sk_buff
4120 + * @skb: buffer to filter
4121 ++ * @cap: limit on how short the eBPF program may trim the packet
4122 + *
4123 + * Run the filter code and then cut skb->data to correct size returned by
4124 + * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
4125 +@@ -78,7 +79,7 @@ static inline void *load_pointer(const struct sk_buff *skb, int k,
4126 + * be accepted or -EPERM if the packet should be tossed.
4127 + *
4128 + */
4129 +-int sk_filter(struct sock *sk, struct sk_buff *skb)
4130 ++int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
4131 + {
4132 + int err;
4133 + struct sk_filter *filter;
4134 +@@ -99,14 +100,13 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
4135 + filter = rcu_dereference(sk->sk_filter);
4136 + if (filter) {
4137 + unsigned int pkt_len = SK_RUN_FILTER(filter, skb);
4138 +-
4139 +- err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
4140 ++ err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
4141 + }
4142 + rcu_read_unlock();
4143 +
4144 + return err;
4145 + }
4146 +-EXPORT_SYMBOL(sk_filter);
4147 ++EXPORT_SYMBOL(sk_filter_trim_cap);
4148 +
4149 + /**
4150 + * sk_run_filter - run a filter on a socket
4151 +diff --git a/net/core/sock.c b/net/core/sock.c
4152 +index 4ac4c13352ab..73c6093e136a 100644
4153 +--- a/net/core/sock.c
4154 ++++ b/net/core/sock.c
4155 +@@ -1537,6 +1537,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
4156 + }
4157 +
4158 + newsk->sk_err = 0;
4159 ++ newsk->sk_err_soft = 0;
4160 + newsk->sk_priority = 0;
4161 + /*
4162 + * Before updating sk_refcnt, we must commit prior changes to memory
4163 +@@ -2095,12 +2096,13 @@ EXPORT_SYMBOL(__sk_mem_schedule);
4164 + /**
4165 + * __sk_reclaim - reclaim memory_allocated
4166 + * @sk: socket
4167 ++ * @amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
4168 + */
4169 +-void __sk_mem_reclaim(struct sock *sk)
4170 ++void __sk_mem_reclaim(struct sock *sk, int amount)
4171 + {
4172 +- sk_memory_allocated_sub(sk,
4173 +- sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
4174 +- sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
4175 ++ amount >>= SK_MEM_QUANTUM_SHIFT;
4176 ++ sk_memory_allocated_sub(sk, amount);
4177 ++ sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
4178 +
4179 + if (sk_under_memory_pressure(sk) &&
4180 + (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
4181 +diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
4182 +index ebc54fef85a5..294c642fbebb 100644
4183 +--- a/net/dccp/ipv4.c
4184 ++++ b/net/dccp/ipv4.c
4185 +@@ -212,7 +212,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
4186 + {
4187 + const struct iphdr *iph = (struct iphdr *)skb->data;
4188 + const u8 offset = iph->ihl << 2;
4189 +- const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
4190 ++ const struct dccp_hdr *dh;
4191 + struct dccp_sock *dp;
4192 + struct inet_sock *inet;
4193 + const int type = icmp_hdr(skb)->type;
4194 +@@ -222,11 +222,13 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
4195 + int err;
4196 + struct net *net = dev_net(skb->dev);
4197 +
4198 +- if (skb->len < offset + sizeof(*dh) ||
4199 +- skb->len < offset + __dccp_basic_hdr_len(dh)) {
4200 +- ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
4201 +- return;
4202 +- }
4203 ++ /* Only need dccph_dport & dccph_sport which are the first
4204 ++ * 4 bytes in dccp header.
4205 ++ * Our caller (icmp_socket_deliver()) already pulled 8 bytes for us.
4206 ++ */
4207 ++ BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8);
4208 ++ BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8);
4209 ++ dh = (struct dccp_hdr *)(skb->data + offset);
4210 +
4211 + sk = inet_lookup(net, &dccp_hashinfo,
4212 + iph->daddr, dh->dccph_dport,
4213 +diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
4214 +index 86eedbaf037f..736fdedf9c85 100644
4215 +--- a/net/dccp/ipv6.c
4216 ++++ b/net/dccp/ipv6.c
4217 +@@ -83,7 +83,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
4218 + u8 type, u8 code, int offset, __be32 info)
4219 + {
4220 + const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
4221 +- const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
4222 ++ const struct dccp_hdr *dh;
4223 + struct dccp_sock *dp;
4224 + struct ipv6_pinfo *np;
4225 + struct sock *sk;
4226 +@@ -91,12 +91,13 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
4227 + __u64 seq;
4228 + struct net *net = dev_net(skb->dev);
4229 +
4230 +- if (skb->len < offset + sizeof(*dh) ||
4231 +- skb->len < offset + __dccp_basic_hdr_len(dh)) {
4232 +- ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
4233 +- ICMP6_MIB_INERRORS);
4234 +- return;
4235 +- }
4236 ++ /* Only need dccph_dport & dccph_sport which are the first
4237 ++ * 4 bytes in dccp header.
4238 ++ * Our caller (icmpv6_notify()) already pulled 8 bytes for us.
4239 ++ */
4240 ++ BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8);
4241 ++ BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8);
4242 ++ dh = (struct dccp_hdr *)(skb->data + offset);
4243 +
4244 + sk = inet6_lookup(net, &dccp_hashinfo,
4245 + &hdr->daddr, dh->dccph_dport,
4246 +@@ -1022,6 +1023,7 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
4247 + .getsockopt = ipv6_getsockopt,
4248 + .addr2sockaddr = inet6_csk_addr2sockaddr,
4249 + .sockaddr_len = sizeof(struct sockaddr_in6),
4250 ++ .bind_conflict = inet6_csk_bind_conflict,
4251 + #ifdef CONFIG_COMPAT
4252 + .compat_setsockopt = compat_ipv6_setsockopt,
4253 + .compat_getsockopt = compat_ipv6_getsockopt,
4254 +diff --git a/net/dccp/proto.c b/net/dccp/proto.c
4255 +index ba64750f0387..f6f6fa1ddeb0 100644
4256 +--- a/net/dccp/proto.c
4257 ++++ b/net/dccp/proto.c
4258 +@@ -1012,6 +1012,10 @@ void dccp_close(struct sock *sk, long timeout)
4259 + __kfree_skb(skb);
4260 + }
4261 +
4262 ++ /* If socket has been already reset kill it. */
4263 ++ if (sk->sk_state == DCCP_CLOSED)
4264 ++ goto adjudge_to_death;
4265 ++
4266 + if (data_was_unread) {
4267 + /* Unread data was tossed, send an appropriate Reset Code */
4268 + DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread);
4269 +diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
4270 +index dccda72bac62..5643a10da91d 100644
4271 +--- a/net/ipv4/ipmr.c
4272 ++++ b/net/ipv4/ipmr.c
4273 +@@ -2188,7 +2188,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
4274 +
4275 + int ipmr_get_route(struct net *net, struct sk_buff *skb,
4276 + __be32 saddr, __be32 daddr,
4277 +- struct rtmsg *rtm, int nowait)
4278 ++ struct rtmsg *rtm, int nowait, u32 portid)
4279 + {
4280 + struct mfc_cache *cache;
4281 + struct mr_table *mrt;
4282 +@@ -2233,6 +2233,7 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
4283 + return -ENOMEM;
4284 + }
4285 +
4286 ++ NETLINK_CB(skb2).portid = portid;
4287 + skb_push(skb2, sizeof(struct iphdr));
4288 + skb_reset_network_header(skb2);
4289 + iph = ip_hdr(skb2);
4290 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
4291 +index 1454176792b3..fd2811086257 100644
4292 +--- a/net/ipv4/route.c
4293 ++++ b/net/ipv4/route.c
4294 +@@ -764,8 +764,10 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
4295 + goto reject_redirect;
4296 + }
4297 +
4298 +- n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw);
4299 +- if (n) {
4300 ++ n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
4301 ++ if (!n)
4302 ++ n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
4303 ++ if (!IS_ERR(n)) {
4304 + if (!(n->nud_state & NUD_VALID)) {
4305 + neigh_event_send(n, NULL);
4306 + } else {
4307 +@@ -2427,7 +2429,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
4308 + IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
4309 + int err = ipmr_get_route(net, skb,
4310 + fl4->saddr, fl4->daddr,
4311 +- r, nowait);
4312 ++ r, nowait, portid);
4313 ++
4314 + if (err <= 0) {
4315 + if (!nowait) {
4316 + if (err == 0)
4317 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
4318 +index 392d3259f9ad..3e63b5fb2121 100644
4319 +--- a/net/ipv4/tcp.c
4320 ++++ b/net/ipv4/tcp.c
4321 +@@ -1169,7 +1169,7 @@ new_segment:
4322 +
4323 + if (!skb_can_coalesce(skb, i, pfrag->page,
4324 + pfrag->offset)) {
4325 +- if (i == sysctl_max_skb_frags || !sg) {
4326 ++ if (i >= sysctl_max_skb_frags || !sg) {
4327 + tcp_mark_push(tp, skb);
4328 + goto new_segment;
4329 + }
4330 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
4331 +index 4b2040762733..57f5bad5650c 100644
4332 +--- a/net/ipv4/tcp_ipv4.c
4333 ++++ b/net/ipv4/tcp_ipv4.c
4334 +@@ -1941,6 +1941,21 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
4335 + }
4336 + EXPORT_SYMBOL(tcp_prequeue);
4337 +
4338 ++int tcp_filter(struct sock *sk, struct sk_buff *skb)
4339 ++{
4340 ++ struct tcphdr *th = (struct tcphdr *)skb->data;
4341 ++ unsigned int eaten = skb->len;
4342 ++ int err;
4343 ++
4344 ++ err = sk_filter_trim_cap(sk, skb, th->doff * 4);
4345 ++ if (!err) {
4346 ++ eaten -= skb->len;
4347 ++ TCP_SKB_CB(skb)->end_seq -= eaten;
4348 ++ }
4349 ++ return err;
4350 ++}
4351 ++EXPORT_SYMBOL(tcp_filter);
4352 ++
4353 + /*
4354 + * From tcp_input.c
4355 + */
4356 +@@ -2003,8 +2018,10 @@ process:
4357 + goto discard_and_relse;
4358 + nf_reset(skb);
4359 +
4360 +- if (sk_filter(sk, skb))
4361 ++ if (tcp_filter(sk, skb))
4362 + goto discard_and_relse;
4363 ++ th = (const struct tcphdr *)skb->data;
4364 ++ iph = ip_hdr(skb);
4365 +
4366 + sk_mark_napi_id(sk, skb);
4367 + skb->dev = NULL;
4368 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
4369 +index aa72c9d604a0..c807d5790ca1 100644
4370 +--- a/net/ipv4/tcp_output.c
4371 ++++ b/net/ipv4/tcp_output.c
4372 +@@ -1762,12 +1762,14 @@ static int tcp_mtu_probe(struct sock *sk)
4373 + len = 0;
4374 + tcp_for_write_queue_from_safe(skb, next, sk) {
4375 + copy = min_t(int, skb->len, probe_size - len);
4376 +- if (nskb->ip_summed)
4377 ++ if (nskb->ip_summed) {
4378 + skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
4379 +- else
4380 +- nskb->csum = skb_copy_and_csum_bits(skb, 0,
4381 +- skb_put(nskb, copy),
4382 +- copy, nskb->csum);
4383 ++ } else {
4384 ++ __wsum csum = skb_copy_and_csum_bits(skb, 0,
4385 ++ skb_put(nskb, copy),
4386 ++ copy, 0);
4387 ++ nskb->csum = csum_block_add(nskb->csum, csum, len);
4388 ++ }
4389 +
4390 + if (skb->len <= copy) {
4391 + /* We've eaten all the data from this skb.
4392 +@@ -2336,7 +2338,8 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
4393 + * copying overhead: fragmentation, tunneling, mangling etc.
4394 + */
4395 + if (atomic_read(&sk->sk_wmem_alloc) >
4396 +- min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
4397 ++ min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2),
4398 ++ sk->sk_sndbuf))
4399 + return -EAGAIN;
4400 +
4401 + if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
4402 +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
4403 +index bbf35875e4ef..1e31fc5477e8 100644
4404 +--- a/net/ipv6/addrconf.c
4405 ++++ b/net/ipv6/addrconf.c
4406 +@@ -2648,7 +2648,7 @@ static void init_loopback(struct net_device *dev)
4407 + * lo device down, release this obsolete dst and
4408 + * reallocate a new router for ifa.
4409 + */
4410 +- if (sp_ifa->rt->dst.obsolete > 0) {
4411 ++ if (!atomic_read(&sp_ifa->rt->rt6i_ref)) {
4412 + ip6_rt_put(sp_ifa->rt);
4413 + sp_ifa->rt = NULL;
4414 + } else {
4415 +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
4416 +index 737af492ed75..6b5acd50103f 100644
4417 +--- a/net/ipv6/ip6_gre.c
4418 ++++ b/net/ipv6/ip6_gre.c
4419 +@@ -895,7 +895,6 @@ static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
4420 + encap_limit = t->parms.encap_limit;
4421 +
4422 + memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
4423 +- fl6.flowi6_proto = skb->protocol;
4424 +
4425 + err = ip6gre_xmit2(skb, dev, 0, &fl6, encap_limit, &mtu);
4426 +
4427 +diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
4428 +index 86d30e60242a..56aa540d77f6 100644
4429 +--- a/net/ipv6/ip6mr.c
4430 ++++ b/net/ipv6/ip6mr.c
4431 +@@ -2273,8 +2273,8 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
4432 + return 1;
4433 + }
4434 +
4435 +-int ip6mr_get_route(struct net *net,
4436 +- struct sk_buff *skb, struct rtmsg *rtm, int nowait)
4437 ++int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
4438 ++ int nowait, u32 portid)
4439 + {
4440 + int err;
4441 + struct mr6_table *mrt;
4442 +@@ -2319,6 +2319,7 @@ int ip6mr_get_route(struct net *net,
4443 + return -ENOMEM;
4444 + }
4445 +
4446 ++ NETLINK_CB(skb2).portid = portid;
4447 + skb_reset_transport_header(skb2);
4448 +
4449 + skb_put(skb2, sizeof(struct ipv6hdr));
4450 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
4451 +index f862c7688c99..e19817a090c7 100644
4452 +--- a/net/ipv6/route.c
4453 ++++ b/net/ipv6/route.c
4454 +@@ -2614,7 +2614,9 @@ static int rt6_fill_node(struct net *net,
4455 + if (iif) {
4456 + #ifdef CONFIG_IPV6_MROUTE
4457 + if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
4458 +- int err = ip6mr_get_route(net, skb, rtm, nowait);
4459 ++ int err = ip6mr_get_route(net, skb, rtm, nowait,
4460 ++ portid);
4461 ++
4462 + if (err <= 0) {
4463 + if (!nowait) {
4464 + if (err == 0)
4465 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
4466 +index 0812b615885d..e5bafd576a13 100644
4467 +--- a/net/ipv6/tcp_ipv6.c
4468 ++++ b/net/ipv6/tcp_ipv6.c
4469 +@@ -1339,7 +1339,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
4470 + goto discard;
4471 + #endif
4472 +
4473 +- if (sk_filter(sk, skb))
4474 ++ if (tcp_filter(sk, skb))
4475 + goto discard;
4476 +
4477 + /*
4478 +@@ -1509,8 +1509,10 @@ process:
4479 + if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
4480 + goto discard_and_relse;
4481 +
4482 +- if (sk_filter(sk, skb))
4483 ++ if (tcp_filter(sk, skb))
4484 + goto discard_and_relse;
4485 ++ th = (const struct tcphdr *)skb->data;
4486 ++ hdr = ipv6_hdr(skb);
4487 +
4488 + sk_mark_napi_id(sk, skb);
4489 + skb->dev = NULL;
4490 +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
4491 +index 834a41830778..4003bd682e06 100644
4492 +--- a/net/mac80211/rx.c
4493 ++++ b/net/mac80211/rx.c
4494 +@@ -2007,16 +2007,22 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
4495 + if (!(status->rx_flags & IEEE80211_RX_AMSDU))
4496 + return RX_CONTINUE;
4497 +
4498 +- if (ieee80211_has_a4(hdr->frame_control) &&
4499 +- rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
4500 +- !rx->sdata->u.vlan.sta)
4501 +- return RX_DROP_UNUSABLE;
4502 ++ if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
4503 ++ switch (rx->sdata->vif.type) {
4504 ++ case NL80211_IFTYPE_AP_VLAN:
4505 ++ if (!rx->sdata->u.vlan.sta)
4506 ++ return RX_DROP_UNUSABLE;
4507 ++ break;
4508 ++ case NL80211_IFTYPE_STATION:
4509 ++ if (!rx->sdata->u.mgd.use_4addr)
4510 ++ return RX_DROP_UNUSABLE;
4511 ++ break;
4512 ++ default:
4513 ++ return RX_DROP_UNUSABLE;
4514 ++ }
4515 ++ }
4516 +
4517 +- if (is_multicast_ether_addr(hdr->addr1) &&
4518 +- ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
4519 +- rx->sdata->u.vlan.sta) ||
4520 +- (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
4521 +- rx->sdata->u.mgd.use_4addr)))
4522 ++ if (is_multicast_ether_addr(hdr->addr1))
4523 + return RX_DROP_UNUSABLE;
4524 +
4525 + skb->dev = dev;
4526 +diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
4527 +index 85296d4eac0e..811dd66f021e 100644
4528 +--- a/net/netfilter/nf_log.c
4529 ++++ b/net/netfilter/nf_log.c
4530 +@@ -253,7 +253,7 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
4531 + size_t size = *lenp;
4532 + int r = 0;
4533 + int tindex = (unsigned long)table->extra1;
4534 +- struct net *net = current->nsproxy->net_ns;
4535 ++ struct net *net = table->extra2;
4536 +
4537 + if (write) {
4538 + if (size > sizeof(buf))
4539 +@@ -306,7 +306,6 @@ static int netfilter_log_sysctl_init(struct net *net)
4540 + 3, "%d", i);
4541 + nf_log_sysctl_table[i].procname =
4542 + nf_log_sysctl_fnames[i];
4543 +- nf_log_sysctl_table[i].data = NULL;
4544 + nf_log_sysctl_table[i].maxlen =
4545 + NFLOGGER_NAME_LEN * sizeof(char);
4546 + nf_log_sysctl_table[i].mode = 0644;
4547 +@@ -317,6 +316,9 @@ static int netfilter_log_sysctl_init(struct net *net)
4548 + }
4549 + }
4550 +
4551 ++ for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++)
4552 ++ table[i].extra2 = net;
4553 ++
4554 + net->nf.nf_log_dir_header = register_net_sysctl(net,
4555 + "net/netfilter/nf_log",
4556 + table);
4557 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
4558 +index 1e9cb9921daa..3f9804b2802a 100644
4559 +--- a/net/packet/af_packet.c
4560 ++++ b/net/packet/af_packet.c
4561 +@@ -3365,6 +3365,7 @@ static int packet_notifier(struct notifier_block *this,
4562 + }
4563 + if (msg == NETDEV_UNREGISTER) {
4564 + packet_cached_dev_reset(po);
4565 ++ fanout_release(sk);
4566 + po->ifindex = -1;
4567 + if (po->prot_hook.dev)
4568 + dev_put(po->prot_hook.dev);
4569 +diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
4570 +index 63a116c31a8b..ce6c8910f041 100644
4571 +--- a/net/sctp/sm_statefuns.c
4572 ++++ b/net/sctp/sm_statefuns.c
4573 +@@ -3427,6 +3427,12 @@ sctp_disposition_t sctp_sf_ootb(struct net *net,
4574 + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
4575 + commands);
4576 +
4577 ++ /* Report violation if chunk len overflows */
4578 ++ ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
4579 ++ if (ch_end > skb_tail_pointer(skb))
4580 ++ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
4581 ++ commands);
4582 ++
4583 + /* Now that we know we at least have a chunk header,
4584 + * do things that are type appropriate.
4585 + */
4586 +@@ -3458,12 +3464,6 @@ sctp_disposition_t sctp_sf_ootb(struct net *net,
4587 + }
4588 + }
4589 +
4590 +- /* Report violation if chunk len overflows */
4591 +- ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
4592 +- if (ch_end > skb_tail_pointer(skb))
4593 +- return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
4594 +- commands);
4595 +-
4596 + ch = (sctp_chunkhdr_t *) ch_end;
4597 + } while (ch_end < skb_tail_pointer(skb));
4598 +
4599 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
4600 +index ead3a8adca08..2c5cb6d2787d 100644
4601 +--- a/net/sctp/socket.c
4602 ++++ b/net/sctp/socket.c
4603 +@@ -1217,9 +1217,12 @@ static int __sctp_connect(struct sock* sk,
4604 +
4605 + timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK);
4606 +
4607 +- err = sctp_wait_for_connect(asoc, &timeo);
4608 +- if ((err == 0 || err == -EINPROGRESS) && assoc_id)
4609 ++ if (assoc_id)
4610 + *assoc_id = asoc->assoc_id;
4611 ++ err = sctp_wait_for_connect(asoc, &timeo);
4612 ++ /* Note: the asoc may be freed after the return of
4613 ++ * sctp_wait_for_connect.
4614 ++ */
4615 +
4616 + /* Don't free association on exit. */
4617 + asoc = NULL;
4618 +@@ -4247,7 +4250,7 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
4619 + static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
4620 + int __user *optlen)
4621 + {
4622 +- if (len <= 0)
4623 ++ if (len == 0)
4624 + return -EINVAL;
4625 + if (len > sizeof(struct sctp_event_subscribe))
4626 + len = sizeof(struct sctp_event_subscribe);
4627 +@@ -5758,6 +5761,9 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
4628 + if (get_user(len, optlen))
4629 + return -EFAULT;
4630 +
4631 ++ if (len < 0)
4632 ++ return -EINVAL;
4633 ++
4634 + sctp_lock_sock(sk);
4635 +
4636 + switch (optname) {
4637 +diff --git a/scripts/gcc-x86_64-has-stack-protector.sh b/scripts/gcc-x86_64-has-stack-protector.sh
4638 +index 973e8c141567..17867e723a51 100644
4639 +--- a/scripts/gcc-x86_64-has-stack-protector.sh
4640 ++++ b/scripts/gcc-x86_64-has-stack-protector.sh
4641 +@@ -1,6 +1,6 @@
4642 + #!/bin/sh
4643 +
4644 +-echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
4645 ++echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
4646 + if [ "$?" -eq "0" ] ; then
4647 + echo y
4648 + else
4649 +diff --git a/security/keys/proc.c b/security/keys/proc.c
4650 +index 217b6855e815..374c3301b802 100644
4651 +--- a/security/keys/proc.c
4652 ++++ b/security/keys/proc.c
4653 +@@ -188,7 +188,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
4654 + struct timespec now;
4655 + unsigned long timo;
4656 + key_ref_t key_ref, skey_ref;
4657 +- char xbuf[12];
4658 ++ char xbuf[16];
4659 + int rc;
4660 +
4661 + key_ref = make_key_ref(key, 0);
4662 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
4663 +index 6a5e36dc23e5..202150d7873c 100644
4664 +--- a/sound/pci/hda/hda_intel.c
4665 ++++ b/sound/pci/hda/hda_intel.c
4666 +@@ -594,7 +594,7 @@ enum {
4667 + #define AZX_DCAPS_NVIDIA_SNOOP (1 << 11) /* Nvidia snoop enable */
4668 + #define AZX_DCAPS_SCH_SNOOP (1 << 12) /* SCH/PCH snoop enable */
4669 + #define AZX_DCAPS_RIRB_DELAY (1 << 13) /* Long delay in read loop */
4670 +-#define AZX_DCAPS_RIRB_PRE_DELAY (1 << 14) /* Put a delay before read */
4671 ++/* 14 unused */
4672 + #define AZX_DCAPS_CTX_WORKAROUND (1 << 15) /* X-Fi workaround */
4673 + #define AZX_DCAPS_POSFIX_LPIB (1 << 16) /* Use LPIB as default */
4674 + #define AZX_DCAPS_POSFIX_VIA (1 << 17) /* Use VIACOMBO as default */
4675 +@@ -1540,7 +1540,7 @@ static irqreturn_t azx_interrupt(int irq, void *dev_id)
4676 + status = azx_readb(chip, RIRBSTS);
4677 + if (status & RIRB_INT_MASK) {
4678 + if (status & RIRB_INT_RESPONSE) {
4679 +- if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)
4680 ++ if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
4681 + udelay(80);
4682 + azx_update_rirb(chip);
4683 + }
4684 +@@ -4288,14 +4288,12 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
4685 + .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
4686 + .class_mask = 0xffffff,
4687 + .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
4688 +- AZX_DCAPS_NO_64BIT |
4689 +- AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB },
4690 ++ AZX_DCAPS_NO_64BIT | AZX_DCAPS_POSFIX_LPIB },
4691 + #else
4692 + /* this entry seems still valid -- i.e. without emu20kx chip */
4693 + { PCI_DEVICE(0x1102, 0x0009),
4694 + .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
4695 +- AZX_DCAPS_NO_64BIT |
4696 +- AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB },
4697 ++ AZX_DCAPS_NO_64BIT | AZX_DCAPS_POSFIX_LPIB },
4698 + #endif
4699 + /* Vortex86MX */
4700 + { PCI_DEVICE(0x17f3, 0x3010), .driver_data = AZX_DRIVER_GENERIC },
4701 +diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c
4702 +index 83c835d9fd88..67c82956367d 100644
4703 +--- a/sound/soc/codecs/cs4270.c
4704 ++++ b/sound/soc/codecs/cs4270.c
4705 +@@ -148,11 +148,11 @@ SND_SOC_DAPM_OUTPUT("AOUTR"),
4706 + };
4707 +
4708 + static const struct snd_soc_dapm_route cs4270_dapm_routes[] = {
4709 +- { "Capture", NULL, "AINA" },
4710 +- { "Capture", NULL, "AINB" },
4711 ++ { "Capture", NULL, "AINL" },
4712 ++ { "Capture", NULL, "AINR" },
4713 +
4714 +- { "AOUTA", NULL, "Playback" },
4715 +- { "AOUTB", NULL, "Playback" },
4716 ++ { "AOUTL", NULL, "Playback" },
4717 ++ { "AOUTR", NULL, "Playback" },
4718 + };
4719 +
4720 + /**
4721 +diff --git a/sound/usb/card.c b/sound/usb/card.c
4722 +index bc5795f342a7..96a09226be7d 100644
4723 +--- a/sound/usb/card.c
4724 ++++ b/sound/usb/card.c
4725 +@@ -661,7 +661,7 @@ int snd_usb_autoresume(struct snd_usb_audio *chip)
4726 + int err = -ENODEV;
4727 +
4728 + down_read(&chip->shutdown_rwsem);
4729 +- if (chip->probing && chip->in_pm)
4730 ++ if (chip->probing || chip->in_pm)
4731 + err = 0;
4732 + else if (!chip->shutdown)
4733 + err = usb_autopm_get_interface(chip->pm_intf);
4734 +diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
4735 +index c600d4277974..a1f08d8c7bd2 100644
4736 +--- a/sound/usb/quirks-table.h
4737 ++++ b/sound/usb/quirks-table.h
4738 +@@ -2953,6 +2953,23 @@ AU0828_DEVICE(0x2040, 0x7260, "Hauppauge", "HVR-950Q"),
4739 + AU0828_DEVICE(0x2040, 0x7213, "Hauppauge", "HVR-950Q"),
4740 + AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
4741 +
4742 ++/* Syntek STK1160 */
4743 ++{
4744 ++ .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
4745 ++ USB_DEVICE_ID_MATCH_INT_CLASS |
4746 ++ USB_DEVICE_ID_MATCH_INT_SUBCLASS,
4747 ++ .idVendor = 0x05e1,
4748 ++ .idProduct = 0x0408,
4749 ++ .bInterfaceClass = USB_CLASS_AUDIO,
4750 ++ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
4751 ++ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
4752 ++ .vendor_name = "Syntek",
4753 ++ .product_name = "STK1160",
4754 ++ .ifnum = QUIRK_ANY_INTERFACE,
4755 ++ .type = QUIRK_AUDIO_ALIGN_TRANSFER
4756 ++ }
4757 ++},
4758 ++
4759 + /* Digidesign Mbox */
4760 + {
4761 + /* Thanks to Clemens Ladisch <clemens@×××××××.de> */
4762 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
4763 +index 3351605d2608..e7a1166c3eb4 100644
4764 +--- a/virt/kvm/kvm_main.c
4765 ++++ b/virt/kvm/kvm_main.c
4766 +@@ -104,7 +104,7 @@ static bool largepages_enabled = true;
4767 + bool kvm_is_mmio_pfn(pfn_t pfn)
4768 + {
4769 + if (pfn_valid(pfn))
4770 +- return PageReserved(pfn_to_page(pfn));
4771 ++ return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn));
4772 +
4773 + return true;
4774 + }