Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Sat, 09 Dec 2017 23:29:37
Message-Id: 1512862162.d08a1764ec3b4b4d175474453e1aaf3c26e65f63.mpagano@gentoo
1 commit: d08a1764ec3b4b4d175474453e1aaf3c26e65f63
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Dec 9 23:29:22 2017 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat Dec 9 23:29:22 2017 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d08a1764
7
8 Linux patch 4.9.68
9
10 0000_README | 4 +
11 1067_linux-4.9.68.patch | 3370 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 3374 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index baf7aeb..7f2750d 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -311,6 +311,10 @@ Patch: 1066_linux-4.9.67.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.9.67
21
22 +Patch: 1067_linux-4.9.68.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.9.68
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1067_linux-4.9.68.patch b/1067_linux-4.9.68.patch
31 new file mode 100644
32 index 0000000..d132406
33 --- /dev/null
34 +++ b/1067_linux-4.9.68.patch
35 @@ -0,0 +1,3370 @@
36 +diff --git a/Makefile b/Makefile
37 +index 70546af61a0a..dfe17af517b2 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 9
43 +-SUBLEVEL = 67
44 ++SUBLEVEL = 68
45 + EXTRAVERSION =
46 + NAME = Roaring Lionus
47 +
48 +diff --git a/arch/arm/mach-omap1/dma.c b/arch/arm/mach-omap1/dma.c
49 +index f6ba589cd312..c821c1d5610e 100644
50 +--- a/arch/arm/mach-omap1/dma.c
51 ++++ b/arch/arm/mach-omap1/dma.c
52 +@@ -32,7 +32,6 @@
53 + #include "soc.h"
54 +
55 + #define OMAP1_DMA_BASE (0xfffed800)
56 +-#define OMAP1_LOGICAL_DMA_CH_COUNT 17
57 +
58 + static u32 enable_1510_mode;
59 +
60 +@@ -348,8 +347,6 @@ static int __init omap1_system_dma_init(void)
61 + goto exit_iounmap;
62 + }
63 +
64 +- d->lch_count = OMAP1_LOGICAL_DMA_CH_COUNT;
65 +-
66 + /* Valid attributes for omap1 plus processors */
67 + if (cpu_is_omap15xx())
68 + d->dev_caps = ENABLE_1510_MODE;
69 +@@ -366,13 +363,14 @@ static int __init omap1_system_dma_init(void)
70 + d->dev_caps |= CLEAR_CSR_ON_READ;
71 + d->dev_caps |= IS_WORD_16;
72 +
73 +- if (cpu_is_omap15xx())
74 +- d->chan_count = 9;
75 +- else if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
76 +- if (!(d->dev_caps & ENABLE_1510_MODE))
77 +- d->chan_count = 16;
78 ++ /* available logical channels */
79 ++ if (cpu_is_omap15xx()) {
80 ++ d->lch_count = 9;
81 ++ } else {
82 ++ if (d->dev_caps & ENABLE_1510_MODE)
83 ++ d->lch_count = 9;
84 + else
85 +- d->chan_count = 9;
86 ++ d->lch_count = 16;
87 + }
88 +
89 + p = dma_plat_info;
90 +diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
91 +index da310bb779b9..88676fe9b119 100644
92 +--- a/arch/arm/mach-omap2/pdata-quirks.c
93 ++++ b/arch/arm/mach-omap2/pdata-quirks.c
94 +@@ -147,7 +147,7 @@ static struct ti_st_plat_data wilink_pdata = {
95 + .nshutdown_gpio = 137,
96 + .dev_name = "/dev/ttyO1",
97 + .flow_cntrl = 1,
98 +- .baud_rate = 300000,
99 ++ .baud_rate = 3000000,
100 + };
101 +
102 + static struct platform_device wl18xx_device = {
103 +diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
104 +index 87131cd3bc8f..6d3a50446b21 100644
105 +--- a/arch/m68k/mm/mcfmmu.c
106 ++++ b/arch/m68k/mm/mcfmmu.c
107 +@@ -169,7 +169,7 @@ void __init cf_bootmem_alloc(void)
108 + max_pfn = max_low_pfn = PFN_DOWN(_ramend);
109 + high_memory = (void *)_ramend;
110 +
111 +- m68k_virt_to_node_shift = fls(_ramend - _rambase - 1) - 6;
112 ++ m68k_virt_to_node_shift = fls(_ramend - 1) - 6;
113 + module_fixup(NULL, __start_fixup, __stop_fixup);
114 +
115 + /* setup bootmem data */
116 +diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
117 +index f61cad3de4e6..4c935f7504f7 100644
118 +--- a/arch/powerpc/include/asm/book3s/64/hash.h
119 ++++ b/arch/powerpc/include/asm/book3s/64/hash.h
120 +@@ -201,6 +201,10 @@ extern int __meminit hash__vmemmap_create_mapping(unsigned long start,
121 + unsigned long phys);
122 + extern void hash__vmemmap_remove_mapping(unsigned long start,
123 + unsigned long page_size);
124 ++
125 ++int hash__create_section_mapping(unsigned long start, unsigned long end);
126 ++int hash__remove_section_mapping(unsigned long start, unsigned long end);
127 ++
128 + #endif /* !__ASSEMBLY__ */
129 + #endif /* __KERNEL__ */
130 + #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
131 +diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
132 +index 78dabf065ba9..bd666287c5ed 100644
133 +--- a/arch/powerpc/mm/hash_utils_64.c
134 ++++ b/arch/powerpc/mm/hash_utils_64.c
135 +@@ -747,7 +747,7 @@ static unsigned long __init htab_get_table_size(void)
136 + }
137 +
138 + #ifdef CONFIG_MEMORY_HOTPLUG
139 +-int create_section_mapping(unsigned long start, unsigned long end)
140 ++int hash__create_section_mapping(unsigned long start, unsigned long end)
141 + {
142 + int rc = htab_bolt_mapping(start, end, __pa(start),
143 + pgprot_val(PAGE_KERNEL), mmu_linear_psize,
144 +@@ -761,7 +761,7 @@ int create_section_mapping(unsigned long start, unsigned long end)
145 + return rc;
146 + }
147 +
148 +-int remove_section_mapping(unsigned long start, unsigned long end)
149 ++int hash__remove_section_mapping(unsigned long start, unsigned long end)
150 + {
151 + int rc = htab_remove_mapping(start, end, mmu_linear_psize,
152 + mmu_kernel_ssize);
153 +diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
154 +index f4f437cbabf1..0fad7f6742ff 100644
155 +--- a/arch/powerpc/mm/pgtable-book3s64.c
156 ++++ b/arch/powerpc/mm/pgtable-book3s64.c
157 +@@ -125,3 +125,21 @@ void mmu_cleanup_all(void)
158 + else if (mmu_hash_ops.hpte_clear_all)
159 + mmu_hash_ops.hpte_clear_all();
160 + }
161 ++
162 ++#ifdef CONFIG_MEMORY_HOTPLUG
163 ++int create_section_mapping(unsigned long start, unsigned long end)
164 ++{
165 ++ if (radix_enabled())
166 ++ return -ENODEV;
167 ++
168 ++ return hash__create_section_mapping(start, end);
169 ++}
170 ++
171 ++int remove_section_mapping(unsigned long start, unsigned long end)
172 ++{
173 ++ if (radix_enabled())
174 ++ return -ENODEV;
175 ++
176 ++ return hash__remove_section_mapping(start, end);
177 ++}
178 ++#endif /* CONFIG_MEMORY_HOTPLUG */
179 +diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h
180 +index 649eb62c52b3..9e02cb7955c1 100644
181 +--- a/arch/s390/include/asm/pci_insn.h
182 ++++ b/arch/s390/include/asm/pci_insn.h
183 +@@ -81,6 +81,6 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
184 + int zpci_load(u64 *data, u64 req, u64 offset);
185 + int zpci_store(u64 data, u64 req, u64 offset);
186 + int zpci_store_block(const u64 *data, u64 req, u64 offset);
187 +-void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
188 ++int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
189 +
190 + #endif
191 +diff --git a/arch/s390/include/asm/runtime_instr.h b/arch/s390/include/asm/runtime_instr.h
192 +index 402ad6df4897..c54a9310d814 100644
193 +--- a/arch/s390/include/asm/runtime_instr.h
194 ++++ b/arch/s390/include/asm/runtime_instr.h
195 +@@ -85,6 +85,8 @@ static inline void restore_ri_cb(struct runtime_instr_cb *cb_next,
196 + load_runtime_instr_cb(&runtime_instr_empty_cb);
197 + }
198 +
199 +-void exit_thread_runtime_instr(void);
200 ++struct task_struct;
201 ++
202 ++void runtime_instr_release(struct task_struct *tsk);
203 +
204 + #endif /* _RUNTIME_INSTR_H */
205 +diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
206 +index 172fe1121d99..8382fc62cde6 100644
207 +--- a/arch/s390/kernel/process.c
208 ++++ b/arch/s390/kernel/process.c
209 +@@ -70,8 +70,6 @@ extern void kernel_thread_starter(void);
210 + */
211 + void exit_thread(struct task_struct *tsk)
212 + {
213 +- if (tsk == current)
214 +- exit_thread_runtime_instr();
215 + }
216 +
217 + void flush_thread(void)
218 +@@ -84,6 +82,7 @@ void release_thread(struct task_struct *dead_task)
219 +
220 + void arch_release_task_struct(struct task_struct *tsk)
221 + {
222 ++ runtime_instr_release(tsk);
223 + }
224 +
225 + int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
226 +diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
227 +index 70cdb03d4acd..fd03a7569e10 100644
228 +--- a/arch/s390/kernel/runtime_instr.c
229 ++++ b/arch/s390/kernel/runtime_instr.c
230 +@@ -18,11 +18,24 @@
231 + /* empty control block to disable RI by loading it */
232 + struct runtime_instr_cb runtime_instr_empty_cb;
233 +
234 ++void runtime_instr_release(struct task_struct *tsk)
235 ++{
236 ++ kfree(tsk->thread.ri_cb);
237 ++}
238 ++
239 + static void disable_runtime_instr(void)
240 + {
241 +- struct pt_regs *regs = task_pt_regs(current);
242 ++ struct task_struct *task = current;
243 ++ struct pt_regs *regs;
244 +
245 ++ if (!task->thread.ri_cb)
246 ++ return;
247 ++ regs = task_pt_regs(task);
248 ++ preempt_disable();
249 + load_runtime_instr_cb(&runtime_instr_empty_cb);
250 ++ kfree(task->thread.ri_cb);
251 ++ task->thread.ri_cb = NULL;
252 ++ preempt_enable();
253 +
254 + /*
255 + * Make sure the RI bit is deleted from the PSW. If the user did not
256 +@@ -43,19 +56,6 @@ static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
257 + cb->valid = 1;
258 + }
259 +
260 +-void exit_thread_runtime_instr(void)
261 +-{
262 +- struct task_struct *task = current;
263 +-
264 +- preempt_disable();
265 +- if (!task->thread.ri_cb)
266 +- return;
267 +- disable_runtime_instr();
268 +- kfree(task->thread.ri_cb);
269 +- task->thread.ri_cb = NULL;
270 +- preempt_enable();
271 +-}
272 +-
273 + SYSCALL_DEFINE1(s390_runtime_instr, int, command)
274 + {
275 + struct runtime_instr_cb *cb;
276 +@@ -64,7 +64,7 @@ SYSCALL_DEFINE1(s390_runtime_instr, int, command)
277 + return -EOPNOTSUPP;
278 +
279 + if (command == S390_RUNTIME_INSTR_STOP) {
280 +- exit_thread_runtime_instr();
281 ++ disable_runtime_instr();
282 + return 0;
283 + }
284 +
285 +diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
286 +index 15ffc19c8c0c..03a1d5976ff5 100644
287 +--- a/arch/s390/pci/pci.c
288 ++++ b/arch/s390/pci/pci.c
289 +@@ -354,7 +354,8 @@ static void zpci_irq_handler(struct airq_struct *airq)
290 + /* End of second scan with interrupts on. */
291 + break;
292 + /* First scan complete, reenable interrupts. */
293 +- zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
294 ++ if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC))
295 ++ break;
296 + si = 0;
297 + continue;
298 + }
299 +@@ -928,7 +929,7 @@ static int __init pci_base_init(void)
300 + if (!s390_pci_probe)
301 + return 0;
302 +
303 +- if (!test_facility(69) || !test_facility(71) || !test_facility(72))
304 ++ if (!test_facility(69) || !test_facility(71))
305 + return 0;
306 +
307 + rc = zpci_debug_init();
308 +diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c
309 +index fa8d7d4b9751..248146dcfce3 100644
310 +--- a/arch/s390/pci/pci_insn.c
311 ++++ b/arch/s390/pci/pci_insn.c
312 +@@ -7,6 +7,7 @@
313 + #include <linux/export.h>
314 + #include <linux/errno.h>
315 + #include <linux/delay.h>
316 ++#include <asm/facility.h>
317 + #include <asm/pci_insn.h>
318 + #include <asm/pci_debug.h>
319 + #include <asm/processor.h>
320 +@@ -91,11 +92,14 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
321 + }
322 +
323 + /* Set Interruption Controls */
324 +-void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc)
325 ++int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc)
326 + {
327 ++ if (!test_facility(72))
328 ++ return -EIO;
329 + asm volatile (
330 + " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
331 + : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused));
332 ++ return 0;
333 + }
334 +
335 + /* PCI Load */
336 +diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
337 +index be202390bbd3..9dfeeeca0ea8 100644
338 +--- a/arch/x86/events/intel/ds.c
339 ++++ b/arch/x86/events/intel/ds.c
340 +@@ -1389,9 +1389,13 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
341 + continue;
342 +
343 + /* log dropped samples number */
344 +- if (error[bit])
345 ++ if (error[bit]) {
346 + perf_log_lost_samples(event, error[bit]);
347 +
348 ++ if (perf_event_account_interrupt(event))
349 ++ x86_pmu_stop(event, 0);
350 ++ }
351 ++
352 + if (counts[bit]) {
353 + __intel_pmu_pebs_event(event, iregs, base,
354 + top, bit, counts[bit]);
355 +diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
356 +index 91dfcafe27a6..bad25bb80679 100644
357 +--- a/arch/x86/include/asm/syscalls.h
358 ++++ b/arch/x86/include/asm/syscalls.h
359 +@@ -21,7 +21,7 @@ asmlinkage long sys_ioperm(unsigned long, unsigned long, int);
360 + asmlinkage long sys_iopl(unsigned int);
361 +
362 + /* kernel/ldt.c */
363 +-asmlinkage int sys_modify_ldt(int, void __user *, unsigned long);
364 ++asmlinkage long sys_modify_ldt(int, void __user *, unsigned long);
365 +
366 + /* kernel/signal.c */
367 + asmlinkage long sys_rt_sigreturn(void);
368 +diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
369 +index 095ef7ddd6ae..abfbb61b18b8 100644
370 +--- a/arch/x86/kernel/fpu/xstate.c
371 ++++ b/arch/x86/kernel/fpu/xstate.c
372 +@@ -1077,6 +1077,7 @@ int copyin_to_xsaves(const void *kbuf, const void __user *ubuf,
373 + * Add back in the features that came in from userspace:
374 + */
375 + xsave->header.xfeatures |= xfeatures;
376 ++ xsave->header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT | xsave->header.xfeatures;
377 +
378 + return 0;
379 + }
380 +diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
381 +index 5f8f0b3cc674..2c0b0b645a74 100644
382 +--- a/arch/x86/kernel/kprobes/ftrace.c
383 ++++ b/arch/x86/kernel/kprobes/ftrace.c
384 +@@ -26,7 +26,7 @@
385 + #include "common.h"
386 +
387 + static nokprobe_inline
388 +-int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
389 ++void __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
390 + struct kprobe_ctlblk *kcb, unsigned long orig_ip)
391 + {
392 + /*
393 +@@ -41,20 +41,21 @@ int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
394 + __this_cpu_write(current_kprobe, NULL);
395 + if (orig_ip)
396 + regs->ip = orig_ip;
397 +- return 1;
398 + }
399 +
400 + int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
401 + struct kprobe_ctlblk *kcb)
402 + {
403 +- if (kprobe_ftrace(p))
404 +- return __skip_singlestep(p, regs, kcb, 0);
405 +- else
406 +- return 0;
407 ++ if (kprobe_ftrace(p)) {
408 ++ __skip_singlestep(p, regs, kcb, 0);
409 ++ preempt_enable_no_resched();
410 ++ return 1;
411 ++ }
412 ++ return 0;
413 + }
414 + NOKPROBE_SYMBOL(skip_singlestep);
415 +
416 +-/* Ftrace callback handler for kprobes */
417 ++/* Ftrace callback handler for kprobes -- called under preepmt disabed */
418 + void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
419 + struct ftrace_ops *ops, struct pt_regs *regs)
420 + {
421 +@@ -77,13 +78,17 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
422 + /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
423 + regs->ip = ip + sizeof(kprobe_opcode_t);
424 +
425 ++ /* To emulate trap based kprobes, preempt_disable here */
426 ++ preempt_disable();
427 + __this_cpu_write(current_kprobe, p);
428 + kcb->kprobe_status = KPROBE_HIT_ACTIVE;
429 +- if (!p->pre_handler || !p->pre_handler(p, regs))
430 ++ if (!p->pre_handler || !p->pre_handler(p, regs)) {
431 + __skip_singlestep(p, regs, kcb, orig_ip);
432 ++ preempt_enable_no_resched();
433 ++ }
434 + /*
435 + * If pre_handler returns !0, it sets regs->ip and
436 +- * resets current kprobe.
437 ++ * resets current kprobe, and keep preempt count +1.
438 + */
439 + }
440 + end:
441 +diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
442 +index 6707039b9032..5f70014ca602 100644
443 +--- a/arch/x86/kernel/ldt.c
444 ++++ b/arch/x86/kernel/ldt.c
445 +@@ -12,6 +12,7 @@
446 + #include <linux/string.h>
447 + #include <linux/mm.h>
448 + #include <linux/smp.h>
449 ++#include <linux/syscalls.h>
450 + #include <linux/slab.h>
451 + #include <linux/vmalloc.h>
452 + #include <linux/uaccess.h>
453 +@@ -271,8 +272,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
454 + return error;
455 + }
456 +
457 +-asmlinkage int sys_modify_ldt(int func, void __user *ptr,
458 +- unsigned long bytecount)
459 ++SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
460 ++ unsigned long , bytecount)
461 + {
462 + int ret = -ENOSYS;
463 +
464 +@@ -290,5 +291,14 @@ asmlinkage int sys_modify_ldt(int func, void __user *ptr,
465 + ret = write_ldt(ptr, bytecount, 0);
466 + break;
467 + }
468 +- return ret;
469 ++ /*
470 ++ * The SYSCALL_DEFINE() macros give us an 'unsigned long'
471 ++ * return type, but tht ABI for sys_modify_ldt() expects
472 ++ * 'int'. This cast gives us an int-sized value in %rax
473 ++ * for the return code. The 'unsigned' is necessary so
474 ++ * the compiler does not try to sign-extend the negative
475 ++ * return codes into the high half of the register when
476 ++ * taking the value from int->long.
477 ++ */
478 ++ return (unsigned int)ret;
479 + }
480 +diff --git a/arch/x86/um/ldt.c b/arch/x86/um/ldt.c
481 +index 836a1eb5df43..3ee234b6234d 100644
482 +--- a/arch/x86/um/ldt.c
483 ++++ b/arch/x86/um/ldt.c
484 +@@ -6,6 +6,7 @@
485 + #include <linux/mm.h>
486 + #include <linux/sched.h>
487 + #include <linux/slab.h>
488 ++#include <linux/syscalls.h>
489 + #include <linux/uaccess.h>
490 + #include <asm/unistd.h>
491 + #include <os.h>
492 +@@ -369,7 +370,9 @@ void free_ldt(struct mm_context *mm)
493 + mm->arch.ldt.entry_count = 0;
494 + }
495 +
496 +-int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
497 ++SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
498 ++ unsigned long , bytecount)
499 + {
500 +- return do_modify_ldt_skas(func, ptr, bytecount);
501 ++ /* See non-um modify_ldt() for why we do this cast */
502 ++ return (unsigned int)do_modify_ldt_skas(func, ptr, bytecount);
503 + }
504 +diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
505 +index 5d4c05074a5c..e2bcacc1a921 100644
506 +--- a/drivers/crypto/caam/intern.h
507 ++++ b/drivers/crypto/caam/intern.h
508 +@@ -41,6 +41,7 @@ struct caam_drv_private_jr {
509 + struct device *dev;
510 + int ridx;
511 + struct caam_job_ring __iomem *rregs; /* JobR's register space */
512 ++ struct tasklet_struct irqtask;
513 + int irq; /* One per queue */
514 +
515 + /* Number of scatterlist crypt transforms active on the JobR */
516 +diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
517 +index 757c27f9953d..9e7f28122bb7 100644
518 +--- a/drivers/crypto/caam/jr.c
519 ++++ b/drivers/crypto/caam/jr.c
520 +@@ -73,6 +73,8 @@ static int caam_jr_shutdown(struct device *dev)
521 +
522 + ret = caam_reset_hw_jr(dev);
523 +
524 ++ tasklet_kill(&jrp->irqtask);
525 ++
526 + /* Release interrupt */
527 + free_irq(jrp->irq, dev);
528 +
529 +@@ -128,7 +130,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
530 +
531 + /*
532 + * Check the output ring for ready responses, kick
533 +- * the threaded irq if jobs done.
534 ++ * tasklet if jobs done.
535 + */
536 + irqstate = rd_reg32(&jrp->rregs->jrintstatus);
537 + if (!irqstate)
538 +@@ -150,13 +152,18 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
539 + /* Have valid interrupt at this point, just ACK and trigger */
540 + wr_reg32(&jrp->rregs->jrintstatus, irqstate);
541 +
542 +- return IRQ_WAKE_THREAD;
543 ++ preempt_disable();
544 ++ tasklet_schedule(&jrp->irqtask);
545 ++ preempt_enable();
546 ++
547 ++ return IRQ_HANDLED;
548 + }
549 +
550 +-static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
551 ++/* Deferred service handler, run as interrupt-fired tasklet */
552 ++static void caam_jr_dequeue(unsigned long devarg)
553 + {
554 + int hw_idx, sw_idx, i, head, tail;
555 +- struct device *dev = st_dev;
556 ++ struct device *dev = (struct device *)devarg;
557 + struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
558 + void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
559 + u32 *userdesc, userstatus;
560 +@@ -230,8 +237,6 @@ static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
561 +
562 + /* reenable / unmask IRQs */
563 + clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
564 +-
565 +- return IRQ_HANDLED;
566 + }
567 +
568 + /**
569 +@@ -389,10 +394,11 @@ static int caam_jr_init(struct device *dev)
570 +
571 + jrp = dev_get_drvdata(dev);
572 +
573 ++ tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
574 ++
575 + /* Connect job ring interrupt handler. */
576 +- error = request_threaded_irq(jrp->irq, caam_jr_interrupt,
577 +- caam_jr_threadirq, IRQF_SHARED,
578 +- dev_name(dev), dev);
579 ++ error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
580 ++ dev_name(dev), dev);
581 + if (error) {
582 + dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
583 + jrp->ridx, jrp->irq);
584 +@@ -454,6 +460,7 @@ static int caam_jr_init(struct device *dev)
585 + out_free_irq:
586 + free_irq(jrp->irq, dev);
587 + out_kill_deq:
588 ++ tasklet_kill(&jrp->irqtask);
589 + return error;
590 + }
591 +
592 +diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c
593 +index 4d51f9e83fa8..04bf29808200 100644
594 +--- a/drivers/dma-buf/fence.c
595 ++++ b/drivers/dma-buf/fence.c
596 +@@ -280,6 +280,31 @@ int fence_add_callback(struct fence *fence, struct fence_cb *cb,
597 + }
598 + EXPORT_SYMBOL(fence_add_callback);
599 +
600 ++/**
601 ++ * fence_get_status - returns the status upon completion
602 ++ * @fence: [in] the fence to query
603 ++ *
604 ++ * This wraps fence_get_status_locked() to return the error status
605 ++ * condition on a signaled fence. See fence_get_status_locked() for more
606 ++ * details.
607 ++ *
608 ++ * Returns 0 if the fence has not yet been signaled, 1 if the fence has
609 ++ * been signaled without an error condition, or a negative error code
610 ++ * if the fence has been completed in err.
611 ++ */
612 ++int fence_get_status(struct fence *fence)
613 ++{
614 ++ unsigned long flags;
615 ++ int status;
616 ++
617 ++ spin_lock_irqsave(fence->lock, flags);
618 ++ status = fence_get_status_locked(fence);
619 ++ spin_unlock_irqrestore(fence->lock, flags);
620 ++
621 ++ return status;
622 ++}
623 ++EXPORT_SYMBOL(fence_get_status);
624 ++
625 + /**
626 + * fence_remove_callback - remove a callback from the signaling list
627 + * @fence: [in] the fence to wait on
628 +@@ -526,6 +551,7 @@ fence_init(struct fence *fence, const struct fence_ops *ops,
629 + fence->context = context;
630 + fence->seqno = seqno;
631 + fence->flags = 0UL;
632 ++ fence->error = 0;
633 +
634 + trace_fence_init(fence);
635 + }
636 +diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
637 +index 62e8e6dc7953..4f3511415b29 100644
638 +--- a/drivers/dma-buf/sw_sync.c
639 ++++ b/drivers/dma-buf/sw_sync.c
640 +@@ -96,9 +96,9 @@ struct sync_timeline *sync_timeline_create(const char *name)
641 + obj->context = fence_context_alloc(1);
642 + strlcpy(obj->name, name, sizeof(obj->name));
643 +
644 +- INIT_LIST_HEAD(&obj->child_list_head);
645 +- INIT_LIST_HEAD(&obj->active_list_head);
646 +- spin_lock_init(&obj->child_list_lock);
647 ++ obj->pt_tree = RB_ROOT;
648 ++ INIT_LIST_HEAD(&obj->pt_list);
649 ++ spin_lock_init(&obj->lock);
650 +
651 + sync_timeline_debug_add(obj);
652 +
653 +@@ -125,68 +125,6 @@ static void sync_timeline_put(struct sync_timeline *obj)
654 + kref_put(&obj->kref, sync_timeline_free);
655 + }
656 +
657 +-/**
658 +- * sync_timeline_signal() - signal a status change on a sync_timeline
659 +- * @obj: sync_timeline to signal
660 +- * @inc: num to increment on timeline->value
661 +- *
662 +- * A sync implementation should call this any time one of it's fences
663 +- * has signaled or has an error condition.
664 +- */
665 +-static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
666 +-{
667 +- unsigned long flags;
668 +- struct sync_pt *pt, *next;
669 +-
670 +- trace_sync_timeline(obj);
671 +-
672 +- spin_lock_irqsave(&obj->child_list_lock, flags);
673 +-
674 +- obj->value += inc;
675 +-
676 +- list_for_each_entry_safe(pt, next, &obj->active_list_head,
677 +- active_list) {
678 +- if (fence_is_signaled_locked(&pt->base))
679 +- list_del_init(&pt->active_list);
680 +- }
681 +-
682 +- spin_unlock_irqrestore(&obj->child_list_lock, flags);
683 +-}
684 +-
685 +-/**
686 +- * sync_pt_create() - creates a sync pt
687 +- * @parent: fence's parent sync_timeline
688 +- * @size: size to allocate for this pt
689 +- * @inc: value of the fence
690 +- *
691 +- * Creates a new sync_pt as a child of @parent. @size bytes will be
692 +- * allocated allowing for implementation specific data to be kept after
693 +- * the generic sync_timeline struct. Returns the sync_pt object or
694 +- * NULL in case of error.
695 +- */
696 +-static struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size,
697 +- unsigned int value)
698 +-{
699 +- unsigned long flags;
700 +- struct sync_pt *pt;
701 +-
702 +- if (size < sizeof(*pt))
703 +- return NULL;
704 +-
705 +- pt = kzalloc(size, GFP_KERNEL);
706 +- if (!pt)
707 +- return NULL;
708 +-
709 +- spin_lock_irqsave(&obj->child_list_lock, flags);
710 +- sync_timeline_get(obj);
711 +- fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock,
712 +- obj->context, value);
713 +- list_add_tail(&pt->child_list, &obj->child_list_head);
714 +- INIT_LIST_HEAD(&pt->active_list);
715 +- spin_unlock_irqrestore(&obj->child_list_lock, flags);
716 +- return pt;
717 +-}
718 +-
719 + static const char *timeline_fence_get_driver_name(struct fence *fence)
720 + {
721 + return "sw_sync";
722 +@@ -203,13 +141,17 @@ static void timeline_fence_release(struct fence *fence)
723 + {
724 + struct sync_pt *pt = fence_to_sync_pt(fence);
725 + struct sync_timeline *parent = fence_parent(fence);
726 +- unsigned long flags;
727 +
728 +- spin_lock_irqsave(fence->lock, flags);
729 +- list_del(&pt->child_list);
730 +- if (!list_empty(&pt->active_list))
731 +- list_del(&pt->active_list);
732 +- spin_unlock_irqrestore(fence->lock, flags);
733 ++ if (!list_empty(&pt->link)) {
734 ++ unsigned long flags;
735 ++
736 ++ spin_lock_irqsave(fence->lock, flags);
737 ++ if (!list_empty(&pt->link)) {
738 ++ list_del(&pt->link);
739 ++ rb_erase(&pt->node, &parent->pt_tree);
740 ++ }
741 ++ spin_unlock_irqrestore(fence->lock, flags);
742 ++ }
743 +
744 + sync_timeline_put(parent);
745 + fence_free(fence);
746 +@@ -219,18 +161,11 @@ static bool timeline_fence_signaled(struct fence *fence)
747 + {
748 + struct sync_timeline *parent = fence_parent(fence);
749 +
750 +- return (fence->seqno > parent->value) ? false : true;
751 ++ return !__fence_is_later(fence->seqno, parent->value);
752 + }
753 +
754 + static bool timeline_fence_enable_signaling(struct fence *fence)
755 + {
756 +- struct sync_pt *pt = fence_to_sync_pt(fence);
757 +- struct sync_timeline *parent = fence_parent(fence);
758 +-
759 +- if (timeline_fence_signaled(fence))
760 +- return false;
761 +-
762 +- list_add_tail(&pt->active_list, &parent->active_list_head);
763 + return true;
764 + }
765 +
766 +@@ -259,6 +194,107 @@ static const struct fence_ops timeline_fence_ops = {
767 + .timeline_value_str = timeline_fence_timeline_value_str,
768 + };
769 +
770 ++/**
771 ++ * sync_timeline_signal() - signal a status change on a sync_timeline
772 ++ * @obj: sync_timeline to signal
773 ++ * @inc: num to increment on timeline->value
774 ++ *
775 ++ * A sync implementation should call this any time one of it's fences
776 ++ * has signaled or has an error condition.
777 ++ */
778 ++static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
779 ++{
780 ++ struct sync_pt *pt, *next;
781 ++
782 ++ trace_sync_timeline(obj);
783 ++
784 ++ spin_lock_irq(&obj->lock);
785 ++
786 ++ obj->value += inc;
787 ++
788 ++ list_for_each_entry_safe(pt, next, &obj->pt_list, link) {
789 ++ if (!timeline_fence_signaled(&pt->base))
790 ++ break;
791 ++
792 ++ list_del_init(&pt->link);
793 ++ rb_erase(&pt->node, &obj->pt_tree);
794 ++
795 ++ /*
796 ++ * A signal callback may release the last reference to this
797 ++ * fence, causing it to be freed. That operation has to be
798 ++ * last to avoid a use after free inside this loop, and must
799 ++ * be after we remove the fence from the timeline in order to
800 ++ * prevent deadlocking on timeline->lock inside
801 ++ * timeline_fence_release().
802 ++ */
803 ++ fence_signal_locked(&pt->base);
804 ++ }
805 ++
806 ++ spin_unlock_irq(&obj->lock);
807 ++}
808 ++
809 ++/**
810 ++ * sync_pt_create() - creates a sync pt
811 ++ * @parent: fence's parent sync_timeline
812 ++ * @inc: value of the fence
813 ++ *
814 ++ * Creates a new sync_pt as a child of @parent. @size bytes will be
815 ++ * allocated allowing for implementation specific data to be kept after
816 ++ * the generic sync_timeline struct. Returns the sync_pt object or
817 ++ * NULL in case of error.
818 ++ */
819 ++static struct sync_pt *sync_pt_create(struct sync_timeline *obj,
820 ++ unsigned int value)
821 ++{
822 ++ struct sync_pt *pt;
823 ++
824 ++ pt = kzalloc(sizeof(*pt), GFP_KERNEL);
825 ++ if (!pt)
826 ++ return NULL;
827 ++
828 ++ sync_timeline_get(obj);
829 ++ fence_init(&pt->base, &timeline_fence_ops, &obj->lock,
830 ++ obj->context, value);
831 ++ INIT_LIST_HEAD(&pt->link);
832 ++
833 ++ spin_lock_irq(&obj->lock);
834 ++ if (!fence_is_signaled_locked(&pt->base)) {
835 ++ struct rb_node **p = &obj->pt_tree.rb_node;
836 ++ struct rb_node *parent = NULL;
837 ++
838 ++ while (*p) {
839 ++ struct sync_pt *other;
840 ++ int cmp;
841 ++
842 ++ parent = *p;
843 ++ other = rb_entry(parent, typeof(*pt), node);
844 ++ cmp = value - other->base.seqno;
845 ++ if (cmp > 0) {
846 ++ p = &parent->rb_right;
847 ++ } else if (cmp < 0) {
848 ++ p = &parent->rb_left;
849 ++ } else {
850 ++ if (fence_get_rcu(&other->base)) {
851 ++ fence_put(&pt->base);
852 ++ pt = other;
853 ++ goto unlock;
854 ++ }
855 ++ p = &parent->rb_left;
856 ++ }
857 ++ }
858 ++ rb_link_node(&pt->node, parent, p);
859 ++ rb_insert_color(&pt->node, &obj->pt_tree);
860 ++
861 ++ parent = rb_next(&pt->node);
862 ++ list_add_tail(&pt->link,
863 ++ parent ? &rb_entry(parent, typeof(*pt), node)->link : &obj->pt_list);
864 ++ }
865 ++unlock:
866 ++ spin_unlock_irq(&obj->lock);
867 ++
868 ++ return pt;
869 ++}
870 ++
871 + /*
872 + * *WARNING*
873 + *
874 +@@ -285,8 +321,16 @@ static int sw_sync_debugfs_open(struct inode *inode, struct file *file)
875 + static int sw_sync_debugfs_release(struct inode *inode, struct file *file)
876 + {
877 + struct sync_timeline *obj = file->private_data;
878 ++ struct sync_pt *pt, *next;
879 ++
880 ++ spin_lock_irq(&obj->lock);
881 ++
882 ++ list_for_each_entry_safe(pt, next, &obj->pt_list, link) {
883 ++ fence_set_error(&pt->base, -ENOENT);
884 ++ fence_signal_locked(&pt->base);
885 ++ }
886 +
887 +- smp_wmb();
888 ++ spin_unlock_irq(&obj->lock);
889 +
890 + sync_timeline_put(obj);
891 + return 0;
892 +@@ -309,7 +353,7 @@ static long sw_sync_ioctl_create_fence(struct sync_timeline *obj,
893 + goto err;
894 + }
895 +
896 +- pt = sync_pt_create(obj, sizeof(*pt), data.value);
897 ++ pt = sync_pt_create(obj, data.value);
898 + if (!pt) {
899 + err = -ENOMEM;
900 + goto err;
901 +@@ -345,6 +389,11 @@ static long sw_sync_ioctl_inc(struct sync_timeline *obj, unsigned long arg)
902 + if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
903 + return -EFAULT;
904 +
905 ++ while (value > INT_MAX) {
906 ++ sync_timeline_signal(obj, INT_MAX);
907 ++ value -= INT_MAX;
908 ++ }
909 ++
910 + sync_timeline_signal(obj, value);
911 +
912 + return 0;
913 +diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
914 +index 2dd4c3db6caa..858263dbecd4 100644
915 +--- a/drivers/dma-buf/sync_debug.c
916 ++++ b/drivers/dma-buf/sync_debug.c
917 +@@ -62,29 +62,29 @@ void sync_file_debug_remove(struct sync_file *sync_file)
918 +
919 + static const char *sync_status_str(int status)
920 + {
921 +- if (status == 0)
922 +- return "signaled";
923 ++ if (status < 0)
924 ++ return "error";
925 +
926 + if (status > 0)
927 +- return "active";
928 ++ return "signaled";
929 +
930 +- return "error";
931 ++ return "active";
932 + }
933 +
934 +-static void sync_print_fence(struct seq_file *s, struct fence *fence, bool show)
935 ++static void sync_print_fence(struct seq_file *s,
936 ++ struct fence *fence, bool show)
937 + {
938 +- int status = 1;
939 + struct sync_timeline *parent = fence_parent(fence);
940 ++ int status;
941 +
942 +- if (fence_is_signaled_locked(fence))
943 +- status = fence->status;
944 ++ status = fence_get_status_locked(fence);
945 +
946 + seq_printf(s, " %s%sfence %s",
947 + show ? parent->name : "",
948 + show ? "_" : "",
949 + sync_status_str(status));
950 +
951 +- if (status <= 0) {
952 ++ if (status) {
953 + struct timespec64 ts64 =
954 + ktime_to_timespec64(fence->timestamp);
955 +
956 +@@ -116,17 +116,15 @@ static void sync_print_fence(struct seq_file *s, struct fence *fence, bool show)
957 + static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
958 + {
959 + struct list_head *pos;
960 +- unsigned long flags;
961 +
962 + seq_printf(s, "%s: %d\n", obj->name, obj->value);
963 +
964 +- spin_lock_irqsave(&obj->child_list_lock, flags);
965 +- list_for_each(pos, &obj->child_list_head) {
966 +- struct sync_pt *pt =
967 +- container_of(pos, struct sync_pt, child_list);
968 ++ spin_lock_irq(&obj->lock);
969 ++ list_for_each(pos, &obj->pt_list) {
970 ++ struct sync_pt *pt = container_of(pos, struct sync_pt, link);
971 + sync_print_fence(s, &pt->base, false);
972 + }
973 +- spin_unlock_irqrestore(&obj->child_list_lock, flags);
974 ++ spin_unlock_irq(&obj->lock);
975 + }
976 +
977 + static void sync_print_sync_file(struct seq_file *s,
978 +@@ -135,7 +133,7 @@ static void sync_print_sync_file(struct seq_file *s,
979 + int i;
980 +
981 + seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name,
982 +- sync_status_str(!fence_is_signaled(sync_file->fence)));
983 ++ sync_status_str(fence_get_status(sync_file->fence)));
984 +
985 + if (fence_is_array(sync_file->fence)) {
986 + struct fence_array *array = to_fence_array(sync_file->fence);
987 +@@ -149,12 +147,11 @@ static void sync_print_sync_file(struct seq_file *s,
988 +
989 + static int sync_debugfs_show(struct seq_file *s, void *unused)
990 + {
991 +- unsigned long flags;
992 + struct list_head *pos;
993 +
994 + seq_puts(s, "objs:\n--------------\n");
995 +
996 +- spin_lock_irqsave(&sync_timeline_list_lock, flags);
997 ++ spin_lock_irq(&sync_timeline_list_lock);
998 + list_for_each(pos, &sync_timeline_list_head) {
999 + struct sync_timeline *obj =
1000 + container_of(pos, struct sync_timeline,
1001 +@@ -163,11 +160,11 @@ static int sync_debugfs_show(struct seq_file *s, void *unused)
1002 + sync_print_obj(s, obj);
1003 + seq_puts(s, "\n");
1004 + }
1005 +- spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
1006 ++ spin_unlock_irq(&sync_timeline_list_lock);
1007 +
1008 + seq_puts(s, "fences:\n--------------\n");
1009 +
1010 +- spin_lock_irqsave(&sync_file_list_lock, flags);
1011 ++ spin_lock_irq(&sync_file_list_lock);
1012 + list_for_each(pos, &sync_file_list_head) {
1013 + struct sync_file *sync_file =
1014 + container_of(pos, struct sync_file, sync_file_list);
1015 +@@ -175,7 +172,7 @@ static int sync_debugfs_show(struct seq_file *s, void *unused)
1016 + sync_print_sync_file(s, sync_file);
1017 + seq_puts(s, "\n");
1018 + }
1019 +- spin_unlock_irqrestore(&sync_file_list_lock, flags);
1020 ++ spin_unlock_irq(&sync_file_list_lock);
1021 + return 0;
1022 + }
1023 +
1024 +diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h
1025 +index d269aa6783aa..9615dc0385b5 100644
1026 +--- a/drivers/dma-buf/sync_debug.h
1027 ++++ b/drivers/dma-buf/sync_debug.h
1028 +@@ -14,6 +14,7 @@
1029 + #define _LINUX_SYNC_H
1030 +
1031 + #include <linux/list.h>
1032 ++#include <linux/rbtree.h>
1033 + #include <linux/spinlock.h>
1034 + #include <linux/fence.h>
1035 +
1036 +@@ -24,43 +25,41 @@
1037 + * struct sync_timeline - sync object
1038 + * @kref: reference count on fence.
1039 + * @name: name of the sync_timeline. Useful for debugging
1040 +- * @child_list_head: list of children sync_pts for this sync_timeline
1041 +- * @child_list_lock: lock protecting @child_list_head and fence.status
1042 +- * @active_list_head: list of active (unsignaled/errored) sync_pts
1043 ++ * @lock: lock protecting @pt_list and @value
1044 ++ * @pt_tree: rbtree of active (unsignaled/errored) sync_pts
1045 ++ * @pt_list: list of active (unsignaled/errored) sync_pts
1046 + * @sync_timeline_list: membership in global sync_timeline_list
1047 + */
1048 + struct sync_timeline {
1049 + struct kref kref;
1050 + char name[32];
1051 +
1052 +- /* protected by child_list_lock */
1053 ++ /* protected by lock */
1054 + u64 context;
1055 + int value;
1056 +
1057 +- struct list_head child_list_head;
1058 +- spinlock_t child_list_lock;
1059 +-
1060 +- struct list_head active_list_head;
1061 ++ struct rb_root pt_tree;
1062 ++ struct list_head pt_list;
1063 ++ spinlock_t lock;
1064 +
1065 + struct list_head sync_timeline_list;
1066 + };
1067 +
1068 + static inline struct sync_timeline *fence_parent(struct fence *fence)
1069 + {
1070 +- return container_of(fence->lock, struct sync_timeline,
1071 +- child_list_lock);
1072 ++ return container_of(fence->lock, struct sync_timeline, lock);
1073 + }
1074 +
1075 + /**
1076 + * struct sync_pt - sync_pt object
1077 + * @base: base fence object
1078 +- * @child_list: sync timeline child's list
1079 +- * @active_list: sync timeline active child's list
1080 ++ * @link: link on the sync timeline's list
1081 ++ * @node: node in the sync timeline's tree
1082 + */
1083 + struct sync_pt {
1084 + struct fence base;
1085 +- struct list_head child_list;
1086 +- struct list_head active_list;
1087 ++ struct list_head link;
1088 ++ struct rb_node node;
1089 + };
1090 +
1091 + #ifdef CONFIG_SW_SYNC
1092 +diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
1093 +index b29a9e817320..f0c374d6ab40 100644
1094 +--- a/drivers/dma-buf/sync_file.c
1095 ++++ b/drivers/dma-buf/sync_file.c
1096 +@@ -67,9 +67,10 @@ static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
1097 + * sync_file_create() - creates a sync file
1098 + * @fence: fence to add to the sync_fence
1099 + *
1100 +- * Creates a sync_file containg @fence. Once this is called, the sync_file
1101 +- * takes ownership of @fence. The sync_file can be released with
1102 +- * fput(sync_file->file). Returns the sync_file or NULL in case of error.
1103 ++ * Creates a sync_file containg @fence. This function acquires and additional
1104 ++ * reference of @fence for the newly-created &sync_file, if it succeeds. The
1105 ++ * sync_file can be released with fput(sync_file->file). Returns the
1106 ++ * sync_file or NULL in case of error.
1107 + */
1108 + struct sync_file *sync_file_create(struct fence *fence)
1109 + {
1110 +@@ -79,7 +80,7 @@ struct sync_file *sync_file_create(struct fence *fence)
1111 + if (!sync_file)
1112 + return NULL;
1113 +
1114 +- sync_file->fence = fence;
1115 ++ sync_file->fence = fence_get(fence);
1116 +
1117 + snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d",
1118 + fence->ops->get_driver_name(fence),
1119 +@@ -90,13 +91,6 @@ struct sync_file *sync_file_create(struct fence *fence)
1120 + }
1121 + EXPORT_SYMBOL(sync_file_create);
1122 +
1123 +-/**
1124 +- * sync_file_fdget() - get a sync_file from an fd
1125 +- * @fd: fd referencing a fence
1126 +- *
1127 +- * Ensures @fd references a valid sync_file, increments the refcount of the
1128 +- * backing file. Returns the sync_file or NULL in case of error.
1129 +- */
1130 + static struct sync_file *sync_file_fdget(int fd)
1131 + {
1132 + struct file *file = fget(fd);
1133 +@@ -377,10 +371,8 @@ static void sync_fill_fence_info(struct fence *fence,
1134 + sizeof(info->obj_name));
1135 + strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
1136 + sizeof(info->driver_name));
1137 +- if (fence_is_signaled(fence))
1138 +- info->status = fence->status >= 0 ? 1 : fence->status;
1139 +- else
1140 +- info->status = 0;
1141 ++
1142 ++ info->status = fence_get_status(fence);
1143 + info->timestamp_ns = ktime_to_ns(fence->timestamp);
1144 + }
1145 +
1146 +diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
1147 +index 9f3dbc8c63d2..fb2e7476d96b 100644
1148 +--- a/drivers/dma/pl330.c
1149 ++++ b/drivers/dma/pl330.c
1150 +@@ -1694,7 +1694,6 @@ static bool _chan_ns(const struct pl330_dmac *pl330, int i)
1151 + static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
1152 + {
1153 + struct pl330_thread *thrd = NULL;
1154 +- unsigned long flags;
1155 + int chans, i;
1156 +
1157 + if (pl330->state == DYING)
1158 +@@ -1702,8 +1701,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
1159 +
1160 + chans = pl330->pcfg.num_chan;
1161 +
1162 +- spin_lock_irqsave(&pl330->lock, flags);
1163 +-
1164 + for (i = 0; i < chans; i++) {
1165 + thrd = &pl330->channels[i];
1166 + if ((thrd->free) && (!_manager_ns(thrd) ||
1167 +@@ -1721,8 +1718,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
1168 + thrd = NULL;
1169 + }
1170 +
1171 +- spin_unlock_irqrestore(&pl330->lock, flags);
1172 +-
1173 + return thrd;
1174 + }
1175 +
1176 +@@ -1740,7 +1735,6 @@ static inline void _free_event(struct pl330_thread *thrd, int ev)
1177 + static void pl330_release_channel(struct pl330_thread *thrd)
1178 + {
1179 + struct pl330_dmac *pl330;
1180 +- unsigned long flags;
1181 +
1182 + if (!thrd || thrd->free)
1183 + return;
1184 +@@ -1752,10 +1746,8 @@ static void pl330_release_channel(struct pl330_thread *thrd)
1185 +
1186 + pl330 = thrd->dmac;
1187 +
1188 +- spin_lock_irqsave(&pl330->lock, flags);
1189 + _free_event(thrd, thrd->ev);
1190 + thrd->free = true;
1191 +- spin_unlock_irqrestore(&pl330->lock, flags);
1192 + }
1193 +
1194 + /* Initialize the structure for PL330 configuration, that can be used
1195 +@@ -2120,20 +2112,20 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
1196 + struct pl330_dmac *pl330 = pch->dmac;
1197 + unsigned long flags;
1198 +
1199 +- spin_lock_irqsave(&pch->lock, flags);
1200 ++ spin_lock_irqsave(&pl330->lock, flags);
1201 +
1202 + dma_cookie_init(chan);
1203 + pch->cyclic = false;
1204 +
1205 + pch->thread = pl330_request_channel(pl330);
1206 + if (!pch->thread) {
1207 +- spin_unlock_irqrestore(&pch->lock, flags);
1208 ++ spin_unlock_irqrestore(&pl330->lock, flags);
1209 + return -ENOMEM;
1210 + }
1211 +
1212 + tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
1213 +
1214 +- spin_unlock_irqrestore(&pch->lock, flags);
1215 ++ spin_unlock_irqrestore(&pl330->lock, flags);
1216 +
1217 + return 1;
1218 + }
1219 +@@ -2236,12 +2228,13 @@ static int pl330_pause(struct dma_chan *chan)
1220 + static void pl330_free_chan_resources(struct dma_chan *chan)
1221 + {
1222 + struct dma_pl330_chan *pch = to_pchan(chan);
1223 ++ struct pl330_dmac *pl330 = pch->dmac;
1224 + unsigned long flags;
1225 +
1226 + tasklet_kill(&pch->task);
1227 +
1228 + pm_runtime_get_sync(pch->dmac->ddma.dev);
1229 +- spin_lock_irqsave(&pch->lock, flags);
1230 ++ spin_lock_irqsave(&pl330->lock, flags);
1231 +
1232 + pl330_release_channel(pch->thread);
1233 + pch->thread = NULL;
1234 +@@ -2249,7 +2242,7 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
1235 + if (pch->cyclic)
1236 + list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
1237 +
1238 +- spin_unlock_irqrestore(&pch->lock, flags);
1239 ++ spin_unlock_irqrestore(&pl330->lock, flags);
1240 + pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
1241 + pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
1242 + }
1243 +diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
1244 +index 307547f4848d..ae3f60be7759 100644
1245 +--- a/drivers/dma/stm32-dma.c
1246 ++++ b/drivers/dma/stm32-dma.c
1247 +@@ -884,7 +884,7 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
1248 + struct virt_dma_desc *vdesc;
1249 + enum dma_status status;
1250 + unsigned long flags;
1251 +- u32 residue;
1252 ++ u32 residue = 0;
1253 +
1254 + status = dma_cookie_status(c, cookie, state);
1255 + if ((status == DMA_COMPLETE) || (!state))
1256 +@@ -892,16 +892,12 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
1257 +
1258 + spin_lock_irqsave(&chan->vchan.lock, flags);
1259 + vdesc = vchan_find_desc(&chan->vchan, cookie);
1260 +- if (cookie == chan->desc->vdesc.tx.cookie) {
1261 ++ if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
1262 + residue = stm32_dma_desc_residue(chan, chan->desc,
1263 + chan->next_sg);
1264 +- } else if (vdesc) {
1265 ++ else if (vdesc)
1266 + residue = stm32_dma_desc_residue(chan,
1267 + to_stm32_dma_desc(vdesc), 0);
1268 +- } else {
1269 +- residue = 0;
1270 +- }
1271 +-
1272 + dma_set_residue(state, residue);
1273 +
1274 + spin_unlock_irqrestore(&chan->vchan.lock, flags);
1275 +@@ -976,21 +972,18 @@ static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
1276 + struct stm32_dma_chan *chan;
1277 + struct dma_chan *c;
1278 +
1279 +- if (dma_spec->args_count < 3)
1280 ++ if (dma_spec->args_count < 4)
1281 + return NULL;
1282 +
1283 + cfg.channel_id = dma_spec->args[0];
1284 + cfg.request_line = dma_spec->args[1];
1285 + cfg.stream_config = dma_spec->args[2];
1286 +- cfg.threshold = 0;
1287 ++ cfg.threshold = dma_spec->args[3];
1288 +
1289 + if ((cfg.channel_id >= STM32_DMA_MAX_CHANNELS) || (cfg.request_line >=
1290 + STM32_DMA_MAX_REQUEST_ID))
1291 + return NULL;
1292 +
1293 +- if (dma_spec->args_count > 3)
1294 +- cfg.threshold = dma_spec->args[3];
1295 +-
1296 + chan = &dmadev->chan[cfg.channel_id];
1297 +
1298 + c = dma_get_slave_channel(&chan->vchan.chan);
1299 +diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
1300 +index 54775221a01f..3c47e6361d81 100644
1301 +--- a/drivers/edac/sb_edac.c
1302 ++++ b/drivers/edac/sb_edac.c
1303 +@@ -2510,6 +2510,7 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
1304 + break;
1305 + case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
1306 + pvt->pci_ta = pdev;
1307 ++ break;
1308 + case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
1309 + pvt->pci_ras = pdev;
1310 + break;
1311 +diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
1312 +index c2bd9f045532..6d75fd0e3105 100644
1313 +--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
1314 ++++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
1315 +@@ -565,11 +565,8 @@ static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs =
1316 +
1317 + static void dce_virtual_encoder_destroy(struct drm_encoder *encoder)
1318 + {
1319 +- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1320 +-
1321 +- kfree(amdgpu_encoder->enc_priv);
1322 + drm_encoder_cleanup(encoder);
1323 +- kfree(amdgpu_encoder);
1324 ++ kfree(encoder);
1325 + }
1326 +
1327 + static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
1328 +diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
1329 +index 50f0cf2788b7..7522f796f19b 100644
1330 +--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
1331 ++++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
1332 +@@ -182,7 +182,7 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
1333 + WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
1334 +
1335 + data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
1336 +- data &= ~0xffc00000;
1337 ++ data &= ~0x3ff;
1338 + WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
1339 +
1340 + data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
1341 +diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
1342 +index 6ca1f3117fe8..6dd09c306bc1 100644
1343 +--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
1344 ++++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
1345 +@@ -46,7 +46,8 @@ enum decon_flag_bits {
1346 + BIT_CLKS_ENABLED,
1347 + BIT_IRQS_ENABLED,
1348 + BIT_WIN_UPDATED,
1349 +- BIT_SUSPENDED
1350 ++ BIT_SUSPENDED,
1351 ++ BIT_REQUEST_UPDATE
1352 + };
1353 +
1354 + struct decon_context {
1355 +@@ -315,6 +316,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
1356 +
1357 + /* window enable */
1358 + decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0);
1359 ++ set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
1360 + }
1361 +
1362 + static void decon_disable_plane(struct exynos_drm_crtc *crtc,
1363 +@@ -327,6 +329,7 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
1364 + return;
1365 +
1366 + decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0);
1367 ++ set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
1368 + }
1369 +
1370 + static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
1371 +@@ -340,8 +343,8 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
1372 + for (i = ctx->first_win; i < WINDOWS_NR; i++)
1373 + decon_shadow_protect_win(ctx, i, false);
1374 +
1375 +- /* standalone update */
1376 +- decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
1377 ++ if (test_and_clear_bit(BIT_REQUEST_UPDATE, &ctx->flags))
1378 ++ decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
1379 +
1380 + if (ctx->out_type & IFTYPE_I80)
1381 + set_bit(BIT_WIN_UPDATED, &ctx->flags);
1382 +diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
1383 +index cc2fde2ae5ef..c9eef0f51d31 100644
1384 +--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
1385 ++++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
1386 +@@ -243,7 +243,6 @@ static int fsl_dcu_drm_pm_suspend(struct device *dev)
1387 + return PTR_ERR(fsl_dev->state);
1388 + }
1389 +
1390 +- clk_disable_unprepare(fsl_dev->pix_clk);
1391 + clk_disable_unprepare(fsl_dev->clk);
1392 +
1393 + return 0;
1394 +@@ -266,6 +265,7 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
1395 + if (fsl_dev->tcon)
1396 + fsl_tcon_bypass_enable(fsl_dev->tcon);
1397 + fsl_dcu_drm_init_planes(fsl_dev->drm);
1398 ++ enable_irq(fsl_dev->irq);
1399 + drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
1400 +
1401 + console_lock();
1402 +@@ -273,7 +273,6 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
1403 + console_unlock();
1404 +
1405 + drm_kms_helper_poll_enable(fsl_dev->drm);
1406 +- enable_irq(fsl_dev->irq);
1407 +
1408 + return 0;
1409 + }
1410 +diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
1411 +index 686971263bef..45d6771fac8c 100644
1412 +--- a/drivers/i2c/busses/i2c-cadence.c
1413 ++++ b/drivers/i2c/busses/i2c-cadence.c
1414 +@@ -962,10 +962,6 @@ static int cdns_i2c_probe(struct platform_device *pdev)
1415 + goto err_clk_dis;
1416 + }
1417 +
1418 +- ret = i2c_add_adapter(&id->adap);
1419 +- if (ret < 0)
1420 +- goto err_clk_dis;
1421 +-
1422 + /*
1423 + * Cadence I2C controller has a bug wherein it generates
1424 + * invalid read transaction after HW timeout in master receiver mode.
1425 +@@ -975,6 +971,10 @@ static int cdns_i2c_probe(struct platform_device *pdev)
1426 + */
1427 + cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET);
1428 +
1429 ++ ret = i2c_add_adapter(&id->adap);
1430 ++ if (ret < 0)
1431 ++ goto err_clk_dis;
1432 ++
1433 + dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n",
1434 + id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq);
1435 +
1436 +diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
1437 +index 472641fc890c..af05e20c986b 100644
1438 +--- a/drivers/iio/adc/ti-ads1015.c
1439 ++++ b/drivers/iio/adc/ti-ads1015.c
1440 +@@ -269,6 +269,7 @@ int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val)
1441 +
1442 + conv_time = DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr_old]);
1443 + conv_time += DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr]);
1444 ++ conv_time += conv_time / 10; /* 10% internal clock inaccuracy */
1445 + usleep_range(conv_time, conv_time + 1);
1446 + data->conv_invalid = false;
1447 + }
1448 +diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c
1449 +index 63890ebb72bd..eccf7039aaca 100644
1450 +--- a/drivers/infiniband/hw/qedr/qedr_cm.c
1451 ++++ b/drivers/infiniband/hw/qedr/qedr_cm.c
1452 +@@ -404,9 +404,9 @@ static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
1453 + }
1454 +
1455 + if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
1456 +- packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
1457 +- else
1458 + packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB;
1459 ++ else
1460 ++ packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
1461 +
1462 + packet->roce_mode = roce_mode;
1463 + memcpy(packet->header.vaddr, ud_header_buffer, header_size);
1464 +diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
1465 +index 4ba019e3dc56..35d5b89decb4 100644
1466 +--- a/drivers/infiniband/hw/qedr/verbs.c
1467 ++++ b/drivers/infiniband/hw/qedr/verbs.c
1468 +@@ -1653,7 +1653,7 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
1469 + int status = 0;
1470 +
1471 + if (new_state == qp->state)
1472 +- return 1;
1473 ++ return 0;
1474 +
1475 + switch (qp->state) {
1476 + case QED_ROCE_QP_STATE_RESET:
1477 +diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
1478 +index e0f1c6d534fe..ab8a1b36af21 100644
1479 +--- a/drivers/md/bcache/request.c
1480 ++++ b/drivers/md/bcache/request.c
1481 +@@ -703,7 +703,14 @@ static void cached_dev_read_error(struct closure *cl)
1482 + struct search *s = container_of(cl, struct search, cl);
1483 + struct bio *bio = &s->bio.bio;
1484 +
1485 +- if (s->recoverable) {
1486 ++ /*
1487 ++ * If read request hit dirty data (s->read_dirty_data is true),
1488 ++ * then recovery a failed read request from cached device may
1489 ++ * get a stale data back. So read failure recovery is only
1490 ++ * permitted when read request hit clean data in cache device,
1491 ++ * or when cache read race happened.
1492 ++ */
1493 ++ if (s->recoverable && !s->read_dirty_data) {
1494 + /* Retry from the backing device: */
1495 + trace_bcache_read_retry(s->orig_bio);
1496 +
1497 +diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
1498 +index 90ed2e12d345..80c89a31d790 100644
1499 +--- a/drivers/mmc/host/sdhci-msm.c
1500 ++++ b/drivers/mmc/host/sdhci-msm.c
1501 +@@ -642,6 +642,21 @@ static int sdhci_msm_probe(struct platform_device *pdev)
1502 + CORE_VENDOR_SPEC_CAPABILITIES0);
1503 + }
1504 +
1505 ++ /*
1506 ++ * Power on reset state may trigger power irq if previous status of
1507 ++ * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq
1508 ++ * interrupt in GIC, any pending power irq interrupt should be
1509 ++ * acknowledged. Otherwise power irq interrupt handler would be
1510 ++ * fired prematurely.
1511 ++ */
1512 ++ sdhci_msm_voltage_switch(host);
1513 ++
1514 ++ /*
1515 ++ * Ensure that above writes are propogated before interrupt enablement
1516 ++ * in GIC.
1517 ++ */
1518 ++ mb();
1519 ++
1520 + /* Setup IRQ for handling power/voltage tasks with PMIC */
1521 + msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
1522 + if (msm_host->pwr_irq < 0) {
1523 +@@ -651,6 +666,9 @@ static int sdhci_msm_probe(struct platform_device *pdev)
1524 + goto clk_disable;
1525 + }
1526 +
1527 ++ /* Enable pwr irq interrupts */
1528 ++ writel_relaxed(INT_MASK, msm_host->core_mem + CORE_PWRCTL_MASK);
1529 ++
1530 + ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
1531 + sdhci_msm_pwr_irq, IRQF_ONESHOT,
1532 + dev_name(&pdev->dev), host);
1533 +diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
1534 +index e90c6a7333d7..2e4649655181 100644
1535 +--- a/drivers/net/appletalk/ipddp.c
1536 ++++ b/drivers/net/appletalk/ipddp.c
1537 +@@ -191,7 +191,7 @@ static netdev_tx_t ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
1538 + */
1539 + static int ipddp_create(struct ipddp_route *new_rt)
1540 + {
1541 +- struct ipddp_route *rt = kmalloc(sizeof(*rt), GFP_KERNEL);
1542 ++ struct ipddp_route *rt = kzalloc(sizeof(*rt), GFP_KERNEL);
1543 +
1544 + if (rt == NULL)
1545 + return -ENOMEM;
1546 +diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
1547 +index be7ec5a76a54..744ed6ddaf37 100644
1548 +--- a/drivers/net/ethernet/broadcom/bcmsysport.c
1549 ++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
1550 +@@ -1023,15 +1023,6 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
1551 + goto out;
1552 + }
1553 +
1554 +- /* Insert TSB and checksum infos */
1555 +- if (priv->tsb_en) {
1556 +- skb = bcm_sysport_insert_tsb(skb, dev);
1557 +- if (!skb) {
1558 +- ret = NETDEV_TX_OK;
1559 +- goto out;
1560 +- }
1561 +- }
1562 +-
1563 + /* The Ethernet switch we are interfaced with needs packets to be at
1564 + * least 64 bytes (including FCS) otherwise they will be discarded when
1565 + * they enter the switch port logic. When Broadcom tags are enabled, we
1566 +@@ -1039,13 +1030,21 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
1567 + * (including FCS and tag) because the length verification is done after
1568 + * the Broadcom tag is stripped off the ingress packet.
1569 + */
1570 +- if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
1571 ++ if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
1572 + ret = NETDEV_TX_OK;
1573 + goto out;
1574 + }
1575 +
1576 +- skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ?
1577 +- ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len;
1578 ++ /* Insert TSB and checksum infos */
1579 ++ if (priv->tsb_en) {
1580 ++ skb = bcm_sysport_insert_tsb(skb, dev);
1581 ++ if (!skb) {
1582 ++ ret = NETDEV_TX_OK;
1583 ++ goto out;
1584 ++ }
1585 ++ }
1586 ++
1587 ++ skb_len = skb->len;
1588 +
1589 + mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
1590 + if (dma_mapping_error(kdev, mapping)) {
1591 +diff --git a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
1592 +index 67befedef709..578c7f8f11bf 100644
1593 +--- a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
1594 ++++ b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
1595 +@@ -116,8 +116,7 @@ void xcv_setup_link(bool link_up, int link_speed)
1596 + int speed = 2;
1597 +
1598 + if (!xcv) {
1599 +- dev_err(&xcv->pdev->dev,
1600 +- "XCV init not done, probe may have failed\n");
1601 ++ pr_err("XCV init not done, probe may have failed\n");
1602 + return;
1603 + }
1604 +
1605 +diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
1606 +index 0f0de5b63622..d04a6c163445 100644
1607 +--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
1608 ++++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
1609 +@@ -133,17 +133,15 @@ cxgb_find_route6(struct cxgb4_lld_info *lldi,
1610 + if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
1611 + fl6.flowi6_oif = sin6_scope_id;
1612 + dst = ip6_route_output(&init_net, NULL, &fl6);
1613 +- if (!dst)
1614 +- goto out;
1615 +- if (!cxgb_our_interface(lldi, get_real_dev,
1616 +- ip6_dst_idev(dst)->dev) &&
1617 +- !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
1618 ++ if (dst->error ||
1619 ++ (!cxgb_our_interface(lldi, get_real_dev,
1620 ++ ip6_dst_idev(dst)->dev) &&
1621 ++ !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK))) {
1622 + dst_release(dst);
1623 +- dst = NULL;
1624 ++ return NULL;
1625 + }
1626 + }
1627 +
1628 +-out:
1629 + return dst;
1630 + }
1631 + EXPORT_SYMBOL(cxgb_find_route6);
1632 +diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
1633 +index 5626908f3f7a..1644896568c4 100644
1634 +--- a/drivers/net/ethernet/emulex/benet/be_main.c
1635 ++++ b/drivers/net/ethernet/emulex/benet/be_main.c
1636 +@@ -275,8 +275,7 @@ static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac)
1637 +
1638 + /* Check if mac has already been added as part of uc-list */
1639 + for (i = 0; i < adapter->uc_macs; i++) {
1640 +- if (ether_addr_equal((u8 *)&adapter->uc_list[i * ETH_ALEN],
1641 +- mac)) {
1642 ++ if (ether_addr_equal(adapter->uc_list[i].mac, mac)) {
1643 + /* mac already added, skip addition */
1644 + adapter->pmac_id[0] = adapter->pmac_id[i + 1];
1645 + return 0;
1646 +@@ -363,8 +362,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
1647 + status = -EPERM;
1648 + goto err;
1649 + }
1650 +-done:
1651 ++
1652 ++ /* Remember currently programmed MAC */
1653 + ether_addr_copy(adapter->dev_mac, addr->sa_data);
1654 ++done:
1655 + ether_addr_copy(netdev->dev_addr, addr->sa_data);
1656 + dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
1657 + return 0;
1658 +@@ -1679,14 +1680,12 @@ static void be_clear_mc_list(struct be_adapter *adapter)
1659 +
1660 + static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx)
1661 + {
1662 +- if (ether_addr_equal((u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
1663 +- adapter->dev_mac)) {
1664 ++ if (ether_addr_equal(adapter->uc_list[uc_idx].mac, adapter->dev_mac)) {
1665 + adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0];
1666 + return 0;
1667 + }
1668 +
1669 +- return be_cmd_pmac_add(adapter,
1670 +- (u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
1671 ++ return be_cmd_pmac_add(adapter, adapter->uc_list[uc_idx].mac,
1672 + adapter->if_handle,
1673 + &adapter->pmac_id[uc_idx + 1], 0);
1674 + }
1675 +@@ -1722,9 +1721,8 @@ static void be_set_uc_list(struct be_adapter *adapter)
1676 + }
1677 +
1678 + if (adapter->update_uc_list) {
1679 +- i = 1; /* First slot is claimed by the Primary MAC */
1680 +-
1681 + /* cache the uc-list in adapter array */
1682 ++ i = 0;
1683 + netdev_for_each_uc_addr(ha, netdev) {
1684 + ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
1685 + i++;
1686 +@@ -3639,8 +3637,10 @@ static void be_disable_if_filters(struct be_adapter *adapter)
1687 + {
1688 + /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
1689 + if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
1690 +- check_privilege(adapter, BE_PRIV_FILTMGMT))
1691 ++ check_privilege(adapter, BE_PRIV_FILTMGMT)) {
1692 + be_dev_mac_del(adapter, adapter->pmac_id[0]);
1693 ++ eth_zero_addr(adapter->dev_mac);
1694 ++ }
1695 +
1696 + be_clear_uc_list(adapter);
1697 + be_clear_mc_list(adapter);
1698 +@@ -3794,12 +3794,27 @@ static int be_enable_if_filters(struct be_adapter *adapter)
1699 + if (status)
1700 + return status;
1701 +
1702 +- /* Don't add MAC on BE3 VFs without FILTMGMT privilege */
1703 +- if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
1704 +- check_privilege(adapter, BE_PRIV_FILTMGMT)) {
1705 ++ /* Normally this condition usually true as the ->dev_mac is zeroed.
1706 ++ * But on BE3 VFs the initial MAC is pre-programmed by PF and
1707 ++ * subsequent be_dev_mac_add() can fail (after fresh boot)
1708 ++ */
1709 ++ if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) {
1710 ++ int old_pmac_id = -1;
1711 ++
1712 ++ /* Remember old programmed MAC if any - can happen on BE3 VF */
1713 ++ if (!is_zero_ether_addr(adapter->dev_mac))
1714 ++ old_pmac_id = adapter->pmac_id[0];
1715 ++
1716 + status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
1717 + if (status)
1718 + return status;
1719 ++
1720 ++ /* Delete the old programmed MAC as we successfully programmed
1721 ++ * a new MAC
1722 ++ */
1723 ++ if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0])
1724 ++ be_dev_mac_del(adapter, old_pmac_id);
1725 ++
1726 + ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
1727 + }
1728 +
1729 +@@ -4573,6 +4588,10 @@ static int be_mac_setup(struct be_adapter *adapter)
1730 +
1731 + memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
1732 + memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
1733 ++
1734 ++ /* Initial MAC for BE3 VFs is already programmed by PF */
1735 ++ if (BEx_chip(adapter) && be_virtfn(adapter))
1736 ++ memcpy(adapter->dev_mac, mac, ETH_ALEN);
1737 + }
1738 +
1739 + return 0;
1740 +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
1741 +index 12aef1b15356..849b8712ec81 100644
1742 +--- a/drivers/net/ethernet/freescale/fec_main.c
1743 ++++ b/drivers/net/ethernet/freescale/fec_main.c
1744 +@@ -2923,6 +2923,7 @@ static void set_multicast_list(struct net_device *ndev)
1745 + struct netdev_hw_addr *ha;
1746 + unsigned int i, bit, data, crc, tmp;
1747 + unsigned char hash;
1748 ++ unsigned int hash_high = 0, hash_low = 0;
1749 +
1750 + if (ndev->flags & IFF_PROMISC) {
1751 + tmp = readl(fep->hwp + FEC_R_CNTRL);
1752 +@@ -2945,11 +2946,7 @@ static void set_multicast_list(struct net_device *ndev)
1753 + return;
1754 + }
1755 +
1756 +- /* Clear filter and add the addresses in hash register
1757 +- */
1758 +- writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1759 +- writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1760 +-
1761 ++ /* Add the addresses in hash register */
1762 + netdev_for_each_mc_addr(ha, ndev) {
1763 + /* calculate crc32 value of mac address */
1764 + crc = 0xffffffff;
1765 +@@ -2967,16 +2964,14 @@ static void set_multicast_list(struct net_device *ndev)
1766 + */
1767 + hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
1768 +
1769 +- if (hash > 31) {
1770 +- tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1771 +- tmp |= 1 << (hash - 32);
1772 +- writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1773 +- } else {
1774 +- tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1775 +- tmp |= 1 << hash;
1776 +- writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1777 +- }
1778 ++ if (hash > 31)
1779 ++ hash_high |= 1 << (hash - 32);
1780 ++ else
1781 ++ hash_low |= 1 << hash;
1782 + }
1783 ++
1784 ++ writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1785 ++ writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1786 + }
1787 +
1788 + /* Set a MAC change in hardware. */
1789 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
1790 +index d4d97ca12e83..f9897d17f01d 100644
1791 +--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
1792 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
1793 +@@ -251,13 +251,9 @@ static u32 freq_to_shift(u16 freq)
1794 + {
1795 + u32 freq_khz = freq * 1000;
1796 + u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
1797 +- u64 tmp_rounded =
1798 +- roundup_pow_of_two(max_val_cycles) > max_val_cycles ?
1799 +- roundup_pow_of_two(max_val_cycles) - 1 : UINT_MAX;
1800 +- u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
1801 +- max_val_cycles : tmp_rounded;
1802 ++ u64 max_val_cycles_rounded = 1ULL << fls64(max_val_cycles - 1);
1803 + /* calculate max possible multiplier in order to fit in 64bit */
1804 +- u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
1805 ++ u64 max_mul = div64_u64(ULLONG_MAX, max_val_cycles_rounded);
1806 +
1807 + /* This comes from the reverse of clocksource_khz2mult */
1808 + return ilog2(div_u64(max_mul * freq_khz, 1000000));
1809 +diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
1810 +index 11623aad0e8e..10d3a9f6349e 100644
1811 +--- a/drivers/net/ethernet/renesas/ravb_main.c
1812 ++++ b/drivers/net/ethernet/renesas/ravb_main.c
1813 +@@ -941,14 +941,10 @@ static int ravb_poll(struct napi_struct *napi, int budget)
1814 + /* Receive error message handling */
1815 + priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
1816 + priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
1817 +- if (priv->rx_over_errors != ndev->stats.rx_over_errors) {
1818 ++ if (priv->rx_over_errors != ndev->stats.rx_over_errors)
1819 + ndev->stats.rx_over_errors = priv->rx_over_errors;
1820 +- netif_err(priv, rx_err, ndev, "Receive Descriptor Empty\n");
1821 +- }
1822 +- if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) {
1823 ++ if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
1824 + ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
1825 +- netif_err(priv, rx_err, ndev, "Receive FIFO Overflow\n");
1826 +- }
1827 + out:
1828 + return budget - quota;
1829 + }
1830 +diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
1831 +index cebde074d196..cb206e5526c4 100644
1832 +--- a/drivers/net/gtp.c
1833 ++++ b/drivers/net/gtp.c
1834 +@@ -69,7 +69,6 @@ struct gtp_dev {
1835 + struct socket *sock0;
1836 + struct socket *sock1u;
1837 +
1838 +- struct net *net;
1839 + struct net_device *dev;
1840 +
1841 + unsigned int hash_size;
1842 +@@ -316,7 +315,7 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
1843 +
1844 + netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
1845 +
1846 +- xnet = !net_eq(gtp->net, dev_net(gtp->dev));
1847 ++ xnet = !net_eq(sock_net(sk), dev_net(gtp->dev));
1848 +
1849 + switch (udp_sk(sk)->encap_type) {
1850 + case UDP_ENCAP_GTP0:
1851 +@@ -612,7 +611,7 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
1852 + pktinfo.fl4.saddr, pktinfo.fl4.daddr,
1853 + pktinfo.iph->tos,
1854 + ip4_dst_hoplimit(&pktinfo.rt->dst),
1855 +- htons(IP_DF),
1856 ++ 0,
1857 + pktinfo.gtph_port, pktinfo.gtph_port,
1858 + true, false);
1859 + break;
1860 +@@ -658,7 +657,7 @@ static void gtp_link_setup(struct net_device *dev)
1861 + static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
1862 + static void gtp_hashtable_free(struct gtp_dev *gtp);
1863 + static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
1864 +- int fd_gtp0, int fd_gtp1, struct net *src_net);
1865 ++ int fd_gtp0, int fd_gtp1);
1866 +
1867 + static int gtp_newlink(struct net *src_net, struct net_device *dev,
1868 + struct nlattr *tb[], struct nlattr *data[])
1869 +@@ -675,7 +674,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
1870 + fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
1871 + fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
1872 +
1873 +- err = gtp_encap_enable(dev, gtp, fd0, fd1, src_net);
1874 ++ err = gtp_encap_enable(dev, gtp, fd0, fd1);
1875 + if (err < 0)
1876 + goto out_err;
1877 +
1878 +@@ -821,7 +820,7 @@ static void gtp_hashtable_free(struct gtp_dev *gtp)
1879 + }
1880 +
1881 + static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
1882 +- int fd_gtp0, int fd_gtp1, struct net *src_net)
1883 ++ int fd_gtp0, int fd_gtp1)
1884 + {
1885 + struct udp_tunnel_sock_cfg tuncfg = {NULL};
1886 + struct socket *sock0, *sock1u;
1887 +@@ -858,7 +857,6 @@ static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
1888 +
1889 + gtp->sock0 = sock0;
1890 + gtp->sock1u = sock1u;
1891 +- gtp->net = src_net;
1892 +
1893 + tuncfg.sk_user_data = gtp;
1894 + tuncfg.encap_rcv = gtp_encap_recv;
1895 +diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
1896 +index 222918828655..fbf5945ce00d 100644
1897 +--- a/drivers/net/phy/micrel.c
1898 ++++ b/drivers/net/phy/micrel.c
1899 +@@ -1020,7 +1020,7 @@ static struct phy_driver ksphy_driver[] = {
1900 + .phy_id = PHY_ID_KSZ8795,
1901 + .phy_id_mask = MICREL_PHY_ID_MASK,
1902 + .name = "Micrel KSZ8795",
1903 +- .features = (SUPPORTED_Pause | SUPPORTED_Asym_Pause),
1904 ++ .features = PHY_BASIC_FEATURES,
1905 + .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
1906 + .config_init = kszphy_config_init,
1907 + .config_aneg = ksz8873mll_config_aneg,
1908 +diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
1909 +index cb7365bdf6e0..5b1d2e8402d9 100644
1910 +--- a/drivers/net/xen-netback/common.h
1911 ++++ b/drivers/net/xen-netback/common.h
1912 +@@ -113,10 +113,10 @@ struct xenvif_stats {
1913 + * A subset of struct net_device_stats that contains only the
1914 + * fields that are updated in netback.c for each queue.
1915 + */
1916 +- unsigned int rx_bytes;
1917 +- unsigned int rx_packets;
1918 +- unsigned int tx_bytes;
1919 +- unsigned int tx_packets;
1920 ++ u64 rx_bytes;
1921 ++ u64 rx_packets;
1922 ++ u64 tx_bytes;
1923 ++ u64 tx_packets;
1924 +
1925 + /* Additional stats used by xenvif */
1926 + unsigned long rx_gso_checksum_fixup;
1927 +diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
1928 +index 5bfaf5578810..618013e7f87b 100644
1929 +--- a/drivers/net/xen-netback/interface.c
1930 ++++ b/drivers/net/xen-netback/interface.c
1931 +@@ -225,10 +225,10 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
1932 + {
1933 + struct xenvif *vif = netdev_priv(dev);
1934 + struct xenvif_queue *queue = NULL;
1935 +- unsigned long rx_bytes = 0;
1936 +- unsigned long rx_packets = 0;
1937 +- unsigned long tx_bytes = 0;
1938 +- unsigned long tx_packets = 0;
1939 ++ u64 rx_bytes = 0;
1940 ++ u64 rx_packets = 0;
1941 ++ u64 tx_bytes = 0;
1942 ++ u64 tx_packets = 0;
1943 + unsigned int index;
1944 +
1945 + spin_lock(&vif->lock);
1946 +diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
1947 +index cd442e46afb4..8d498a997e25 100644
1948 +--- a/drivers/net/xen-netfront.c
1949 ++++ b/drivers/net/xen-netfront.c
1950 +@@ -1854,27 +1854,19 @@ static int talk_to_netback(struct xenbus_device *dev,
1951 + xennet_destroy_queues(info);
1952 +
1953 + err = xennet_create_queues(info, &num_queues);
1954 +- if (err < 0)
1955 +- goto destroy_ring;
1956 ++ if (err < 0) {
1957 ++ xenbus_dev_fatal(dev, err, "creating queues");
1958 ++ kfree(info->queues);
1959 ++ info->queues = NULL;
1960 ++ goto out;
1961 ++ }
1962 +
1963 + /* Create shared ring, alloc event channel -- for each queue */
1964 + for (i = 0; i < num_queues; ++i) {
1965 + queue = &info->queues[i];
1966 + err = setup_netfront(dev, queue, feature_split_evtchn);
1967 +- if (err) {
1968 +- /* setup_netfront() will tidy up the current
1969 +- * queue on error, but we need to clean up
1970 +- * those already allocated.
1971 +- */
1972 +- if (i > 0) {
1973 +- rtnl_lock();
1974 +- netif_set_real_num_tx_queues(info->netdev, i);
1975 +- rtnl_unlock();
1976 +- goto destroy_ring;
1977 +- } else {
1978 +- goto out;
1979 +- }
1980 +- }
1981 ++ if (err)
1982 ++ goto destroy_ring;
1983 + }
1984 +
1985 + again:
1986 +@@ -1964,9 +1956,9 @@ static int talk_to_netback(struct xenbus_device *dev,
1987 + xenbus_transaction_end(xbt, 1);
1988 + destroy_ring:
1989 + xennet_disconnect_backend(info);
1990 +- kfree(info->queues);
1991 +- info->queues = NULL;
1992 ++ xennet_destroy_queues(info);
1993 + out:
1994 ++ device_unregister(&dev->dev);
1995 + return err;
1996 + }
1997 +
1998 +diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
1999 +index 55ce769cecee..fbd6d487103f 100644
2000 +--- a/drivers/nvme/target/core.c
2001 ++++ b/drivers/nvme/target/core.c
2002 +@@ -816,6 +816,9 @@ static void nvmet_ctrl_free(struct kref *ref)
2003 + list_del(&ctrl->subsys_entry);
2004 + mutex_unlock(&subsys->lock);
2005 +
2006 ++ flush_work(&ctrl->async_event_work);
2007 ++ cancel_work_sync(&ctrl->fatal_err_work);
2008 ++
2009 + ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid);
2010 + nvmet_subsys_put(subsys);
2011 +
2012 +diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
2013 +index 91f5f55a8a9b..59059ffbb98c 100644
2014 +--- a/drivers/scsi/qla2xxx/qla_target.c
2015 ++++ b/drivers/scsi/qla2xxx/qla_target.c
2016 +@@ -668,11 +668,9 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
2017 + {
2018 + struct qla_hw_data *ha = vha->hw;
2019 + struct qla_tgt_sess *sess = NULL;
2020 +- uint32_t unpacked_lun, lun = 0;
2021 + uint16_t loop_id;
2022 + int res = 0;
2023 + struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
2024 +- struct atio_from_isp *a = (struct atio_from_isp *)iocb;
2025 + unsigned long flags;
2026 +
2027 + loop_id = le16_to_cpu(n->u.isp24.nport_handle);
2028 +@@ -725,11 +723,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
2029 + "loop_id %d)\n", vha->host_no, sess, sess->port_name,
2030 + mcmd, loop_id);
2031 +
2032 +- lun = a->u.isp24.fcp_cmnd.lun;
2033 +- unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
2034 +-
2035 +- return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
2036 +- iocb, QLA24XX_MGMT_SEND_NACK);
2037 ++ return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
2038 + }
2039 +
2040 + /* ha->tgt.sess_lock supposed to be held on entry */
2041 +diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
2042 +index c1eafbd7610a..da51fed143cd 100644
2043 +--- a/drivers/spi/spi-axi-spi-engine.c
2044 ++++ b/drivers/spi/spi-axi-spi-engine.c
2045 +@@ -553,7 +553,7 @@ static int spi_engine_probe(struct platform_device *pdev)
2046 +
2047 + static int spi_engine_remove(struct platform_device *pdev)
2048 + {
2049 +- struct spi_master *master = platform_get_drvdata(pdev);
2050 ++ struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
2051 + struct spi_engine *spi_engine = spi_master_get_devdata(master);
2052 + int irq = platform_get_irq(pdev, 0);
2053 +
2054 +@@ -561,6 +561,8 @@ static int spi_engine_remove(struct platform_device *pdev)
2055 +
2056 + free_irq(irq, master);
2057 +
2058 ++ spi_master_put(master);
2059 ++
2060 + writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
2061 + writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
2062 + writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
2063 +diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
2064 +index 1de3a772eb7d..cbf02ebb30a2 100644
2065 +--- a/drivers/spi/spi-sh-msiof.c
2066 ++++ b/drivers/spi/spi-sh-msiof.c
2067 +@@ -862,7 +862,7 @@ static int sh_msiof_transfer_one(struct spi_master *master,
2068 + break;
2069 + copy32 = copy_bswap32;
2070 + } else if (bits <= 16) {
2071 +- if (l & 1)
2072 ++ if (l & 3)
2073 + break;
2074 + copy32 = copy_wswap32;
2075 + } else {
2076 +diff --git a/drivers/staging/greybus/loopback.c b/drivers/staging/greybus/loopback.c
2077 +index 29dc249b0c74..3c2c233c2e49 100644
2078 +--- a/drivers/staging/greybus/loopback.c
2079 ++++ b/drivers/staging/greybus/loopback.c
2080 +@@ -1034,8 +1034,10 @@ static int gb_loopback_fn(void *data)
2081 + error = gb_loopback_async_sink(gb, size);
2082 + }
2083 +
2084 +- if (error)
2085 ++ if (error) {
2086 + gb->error++;
2087 ++ gb->iteration_count++;
2088 ++ }
2089 + } else {
2090 + /* We are effectively single threaded here */
2091 + if (type == GB_LOOPBACK_TYPE_PING)
2092 +diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
2093 +index 436691814a5e..27333d973bcd 100644
2094 +--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
2095 ++++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
2096 +@@ -401,15 +401,13 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2097 + result = VM_FAULT_LOCKED;
2098 + break;
2099 + case -ENODATA:
2100 ++ case -EAGAIN:
2101 + case -EFAULT:
2102 + result = VM_FAULT_NOPAGE;
2103 + break;
2104 + case -ENOMEM:
2105 + result = VM_FAULT_OOM;
2106 + break;
2107 +- case -EAGAIN:
2108 +- result = VM_FAULT_RETRY;
2109 +- break;
2110 + default:
2111 + result = VM_FAULT_SIGBUS;
2112 + break;
2113 +diff --git a/drivers/staging/media/cec/cec-adap.c b/drivers/staging/media/cec/cec-adap.c
2114 +index 499d7bfe7147..75e6d5e0504f 100644
2115 +--- a/drivers/staging/media/cec/cec-adap.c
2116 ++++ b/drivers/staging/media/cec/cec-adap.c
2117 +@@ -608,8 +608,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
2118 + }
2119 + memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len);
2120 + if (msg->len == 1) {
2121 +- if (cec_msg_initiator(msg) != 0xf ||
2122 +- cec_msg_destination(msg) == 0xf) {
2123 ++ if (cec_msg_destination(msg) == 0xf) {
2124 + dprintk(1, "cec_transmit_msg: invalid poll message\n");
2125 + return -EINVAL;
2126 + }
2127 +@@ -634,7 +633,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
2128 + dprintk(1, "cec_transmit_msg: destination is the adapter itself\n");
2129 + return -EINVAL;
2130 + }
2131 +- if (cec_msg_initiator(msg) != 0xf &&
2132 ++ if (msg->len > 1 && adap->is_configured &&
2133 + !cec_has_log_addr(adap, cec_msg_initiator(msg))) {
2134 + dprintk(1, "cec_transmit_msg: initiator has unknown logical address %d\n",
2135 + cec_msg_initiator(msg));
2136 +@@ -883,7 +882,7 @@ static int cec_config_log_addr(struct cec_adapter *adap,
2137 +
2138 + /* Send poll message */
2139 + msg.len = 1;
2140 +- msg.msg[0] = 0xf0 | log_addr;
2141 ++ msg.msg[0] = (log_addr << 4) | log_addr;
2142 + err = cec_transmit_msg_fh(adap, &msg, NULL, true);
2143 +
2144 + /*
2145 +diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
2146 +index ee2dcd05010f..0b60d1e0333e 100644
2147 +--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
2148 ++++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
2149 +@@ -107,10 +107,10 @@ void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv)
2150 +
2151 + void rtw_free_mlme_priv(struct mlme_priv *pmlmepriv)
2152 + {
2153 +- rtw_free_mlme_priv_ie_data(pmlmepriv);
2154 +-
2155 +- if (pmlmepriv)
2156 ++ if (pmlmepriv) {
2157 ++ rtw_free_mlme_priv_ie_data(pmlmepriv);
2158 + vfree(pmlmepriv->free_bss_buf);
2159 ++ }
2160 + }
2161 +
2162 + struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv)
2163 +diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
2164 +index f8c31070a337..2ffebb7e5ff8 100644
2165 +--- a/drivers/tty/serial/8250/8250_fintek.c
2166 ++++ b/drivers/tty/serial/8250/8250_fintek.c
2167 +@@ -121,7 +121,7 @@ static int fintek_8250_rs485_config(struct uart_port *port,
2168 +
2169 + if ((!!(rs485->flags & SER_RS485_RTS_ON_SEND)) ==
2170 + (!!(rs485->flags & SER_RS485_RTS_AFTER_SEND)))
2171 +- rs485->flags &= SER_RS485_ENABLED;
2172 ++ rs485->flags &= ~SER_RS485_ENABLED;
2173 + else
2174 + config |= RS485_URA;
2175 +
2176 +diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
2177 +index 22d32d295c5b..b80ea872b039 100644
2178 +--- a/drivers/tty/serial/8250/8250_pci.c
2179 ++++ b/drivers/tty/serial/8250/8250_pci.c
2180 +@@ -5568,6 +5568,9 @@ static struct pci_device_id serial_pci_tbl[] = {
2181 + { PCI_DEVICE(0x1601, 0x0800), .driver_data = pbn_b0_4_1250000 },
2182 + { PCI_DEVICE(0x1601, 0xa801), .driver_data = pbn_b0_4_1250000 },
2183 +
2184 ++ /* Amazon PCI serial device */
2185 ++ { PCI_DEVICE(0x1d0f, 0x8250), .driver_data = pbn_b0_1_115200 },
2186 ++
2187 + /*
2188 + * These entries match devices with class COMMUNICATION_SERIAL,
2189 + * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
2190 +diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
2191 +index 1ef31e3ee4a1..f6e4373a8850 100644
2192 +--- a/drivers/tty/serial/8250/8250_port.c
2193 ++++ b/drivers/tty/serial/8250/8250_port.c
2194 +@@ -2526,8 +2526,11 @@ static void serial8250_set_divisor(struct uart_port *port, unsigned int baud,
2195 + serial_dl_write(up, quot);
2196 +
2197 + /* XR17V35x UARTs have an extra fractional divisor register (DLD) */
2198 +- if (up->port.type == PORT_XR17V35X)
2199 ++ if (up->port.type == PORT_XR17V35X) {
2200 ++ /* Preserve bits not related to baudrate; DLD[7:4]. */
2201 ++ quot_frac |= serial_port_in(port, 0x2) & 0xf0;
2202 + serial_port_out(port, 0x2, quot_frac);
2203 ++ }
2204 + }
2205 +
2206 + static unsigned int serial8250_get_baud_rate(struct uart_port *port,
2207 +diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
2208 +index 701c085bb19b..53cbf4ebef10 100644
2209 +--- a/drivers/tty/sysrq.c
2210 ++++ b/drivers/tty/sysrq.c
2211 +@@ -243,8 +243,10 @@ static void sysrq_handle_showallcpus(int key)
2212 + * architecture has no support for it:
2213 + */
2214 + if (!trigger_all_cpu_backtrace()) {
2215 +- struct pt_regs *regs = get_irq_regs();
2216 ++ struct pt_regs *regs = NULL;
2217 +
2218 ++ if (in_irq())
2219 ++ regs = get_irq_regs();
2220 + if (regs) {
2221 + pr_info("CPU%d:\n", smp_processor_id());
2222 + show_regs(regs);
2223 +@@ -263,7 +265,10 @@ static struct sysrq_key_op sysrq_showallcpus_op = {
2224 +
2225 + static void sysrq_handle_showregs(int key)
2226 + {
2227 +- struct pt_regs *regs = get_irq_regs();
2228 ++ struct pt_regs *regs = NULL;
2229 ++
2230 ++ if (in_irq())
2231 ++ regs = get_irq_regs();
2232 + if (regs)
2233 + show_regs(regs);
2234 + perf_event_print_debug();
2235 +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
2236 +index 5008f71fb08d..5ebe04d3598b 100644
2237 +--- a/drivers/usb/core/config.c
2238 ++++ b/drivers/usb/core/config.c
2239 +@@ -900,14 +900,25 @@ void usb_release_bos_descriptor(struct usb_device *dev)
2240 + }
2241 + }
2242 +
2243 ++static const __u8 bos_desc_len[256] = {
2244 ++ [USB_CAP_TYPE_WIRELESS_USB] = USB_DT_USB_WIRELESS_CAP_SIZE,
2245 ++ [USB_CAP_TYPE_EXT] = USB_DT_USB_EXT_CAP_SIZE,
2246 ++ [USB_SS_CAP_TYPE] = USB_DT_USB_SS_CAP_SIZE,
2247 ++ [USB_SSP_CAP_TYPE] = USB_DT_USB_SSP_CAP_SIZE(1),
2248 ++ [CONTAINER_ID_TYPE] = USB_DT_USB_SS_CONTN_ID_SIZE,
2249 ++ [USB_PTM_CAP_TYPE] = USB_DT_USB_PTM_ID_SIZE,
2250 ++};
2251 ++
2252 + /* Get BOS descriptor set */
2253 + int usb_get_bos_descriptor(struct usb_device *dev)
2254 + {
2255 + struct device *ddev = &dev->dev;
2256 + struct usb_bos_descriptor *bos;
2257 + struct usb_dev_cap_header *cap;
2258 ++ struct usb_ssp_cap_descriptor *ssp_cap;
2259 + unsigned char *buffer;
2260 +- int length, total_len, num, i;
2261 ++ int length, total_len, num, i, ssac;
2262 ++ __u8 cap_type;
2263 + int ret;
2264 +
2265 + bos = kzalloc(sizeof(struct usb_bos_descriptor), GFP_KERNEL);
2266 +@@ -960,7 +971,13 @@ int usb_get_bos_descriptor(struct usb_device *dev)
2267 + dev->bos->desc->bNumDeviceCaps = i;
2268 + break;
2269 + }
2270 ++ cap_type = cap->bDevCapabilityType;
2271 + length = cap->bLength;
2272 ++ if (bos_desc_len[cap_type] && length < bos_desc_len[cap_type]) {
2273 ++ dev->bos->desc->bNumDeviceCaps = i;
2274 ++ break;
2275 ++ }
2276 ++
2277 + total_len -= length;
2278 +
2279 + if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
2280 +@@ -968,7 +985,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
2281 + continue;
2282 + }
2283 +
2284 +- switch (cap->bDevCapabilityType) {
2285 ++ switch (cap_type) {
2286 + case USB_CAP_TYPE_WIRELESS_USB:
2287 + /* Wireless USB cap descriptor is handled by wusb */
2288 + break;
2289 +@@ -981,8 +998,11 @@ int usb_get_bos_descriptor(struct usb_device *dev)
2290 + (struct usb_ss_cap_descriptor *)buffer;
2291 + break;
2292 + case USB_SSP_CAP_TYPE:
2293 +- dev->bos->ssp_cap =
2294 +- (struct usb_ssp_cap_descriptor *)buffer;
2295 ++ ssp_cap = (struct usb_ssp_cap_descriptor *)buffer;
2296 ++ ssac = (le32_to_cpu(ssp_cap->bmAttributes) &
2297 ++ USB_SSP_SUBLINK_SPEED_ATTRIBS) + 1;
2298 ++ if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac))
2299 ++ dev->bos->ssp_cap = ssp_cap;
2300 + break;
2301 + case CONTAINER_ID_TYPE:
2302 + dev->bos->ss_id =
2303 +diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
2304 +index fa619354c5c5..893ebae51029 100644
2305 +--- a/drivers/usb/core/devio.c
2306 ++++ b/drivers/usb/core/devio.c
2307 +@@ -134,42 +134,38 @@ enum snoop_when {
2308 + #define USB_DEVICE_DEV MKDEV(USB_DEVICE_MAJOR, 0)
2309 +
2310 + /* Limit on the total amount of memory we can allocate for transfers */
2311 +-static unsigned usbfs_memory_mb = 16;
2312 ++static u32 usbfs_memory_mb = 16;
2313 + module_param(usbfs_memory_mb, uint, 0644);
2314 + MODULE_PARM_DESC(usbfs_memory_mb,
2315 + "maximum MB allowed for usbfs buffers (0 = no limit)");
2316 +
2317 + /* Hard limit, necessary to avoid arithmetic overflow */
2318 +-#define USBFS_XFER_MAX (UINT_MAX / 2 - 1000000)
2319 ++#define USBFS_XFER_MAX (UINT_MAX / 2 - 1000000)
2320 +
2321 +-static atomic_t usbfs_memory_usage; /* Total memory currently allocated */
2322 ++static atomic64_t usbfs_memory_usage; /* Total memory currently allocated */
2323 +
2324 + /* Check whether it's okay to allocate more memory for a transfer */
2325 +-static int usbfs_increase_memory_usage(unsigned amount)
2326 ++static int usbfs_increase_memory_usage(u64 amount)
2327 + {
2328 +- unsigned lim;
2329 ++ u64 lim;
2330 +
2331 +- /*
2332 +- * Convert usbfs_memory_mb to bytes, avoiding overflows.
2333 +- * 0 means use the hard limit (effectively unlimited).
2334 +- */
2335 + lim = ACCESS_ONCE(usbfs_memory_mb);
2336 +- if (lim == 0 || lim > (USBFS_XFER_MAX >> 20))
2337 +- lim = USBFS_XFER_MAX;
2338 +- else
2339 +- lim <<= 20;
2340 ++ lim <<= 20;
2341 +
2342 +- atomic_add(amount, &usbfs_memory_usage);
2343 +- if (atomic_read(&usbfs_memory_usage) <= lim)
2344 +- return 0;
2345 +- atomic_sub(amount, &usbfs_memory_usage);
2346 +- return -ENOMEM;
2347 ++ atomic64_add(amount, &usbfs_memory_usage);
2348 ++
2349 ++ if (lim > 0 && atomic64_read(&usbfs_memory_usage) > lim) {
2350 ++ atomic64_sub(amount, &usbfs_memory_usage);
2351 ++ return -ENOMEM;
2352 ++ }
2353 ++
2354 ++ return 0;
2355 + }
2356 +
2357 + /* Memory for a transfer is being deallocated */
2358 +-static void usbfs_decrease_memory_usage(unsigned amount)
2359 ++static void usbfs_decrease_memory_usage(u64 amount)
2360 + {
2361 +- atomic_sub(amount, &usbfs_memory_usage);
2362 ++ atomic64_sub(amount, &usbfs_memory_usage);
2363 + }
2364 +
2365 + static int connected(struct usb_dev_state *ps)
2366 +@@ -1191,7 +1187,7 @@ static int proc_bulk(struct usb_dev_state *ps, void __user *arg)
2367 + if (!usb_maxpacket(dev, pipe, !(bulk.ep & USB_DIR_IN)))
2368 + return -EINVAL;
2369 + len1 = bulk.len;
2370 +- if (len1 >= USBFS_XFER_MAX)
2371 ++ if (len1 >= (INT_MAX - sizeof(struct urb)))
2372 + return -EINVAL;
2373 + ret = usbfs_increase_memory_usage(len1 + sizeof(struct urb));
2374 + if (ret)
2375 +@@ -1458,13 +1454,19 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
2376 + int number_of_packets = 0;
2377 + unsigned int stream_id = 0;
2378 + void *buf;
2379 +-
2380 +- if (uurb->flags & ~(USBDEVFS_URB_ISO_ASAP |
2381 +- USBDEVFS_URB_SHORT_NOT_OK |
2382 ++ unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK |
2383 + USBDEVFS_URB_BULK_CONTINUATION |
2384 + USBDEVFS_URB_NO_FSBR |
2385 + USBDEVFS_URB_ZERO_PACKET |
2386 +- USBDEVFS_URB_NO_INTERRUPT))
2387 ++ USBDEVFS_URB_NO_INTERRUPT;
2388 ++ /* USBDEVFS_URB_ISO_ASAP is a special case */
2389 ++ if (uurb->type == USBDEVFS_URB_TYPE_ISO)
2390 ++ mask |= USBDEVFS_URB_ISO_ASAP;
2391 ++
2392 ++ if (uurb->flags & ~mask)
2393 ++ return -EINVAL;
2394 ++
2395 ++ if ((unsigned int)uurb->buffer_length >= USBFS_XFER_MAX)
2396 + return -EINVAL;
2397 + if (uurb->buffer_length > 0 && !uurb->buffer)
2398 + return -EINVAL;
2399 +@@ -1584,10 +1586,6 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
2400 + return -EINVAL;
2401 + }
2402 +
2403 +- if (uurb->buffer_length >= USBFS_XFER_MAX) {
2404 +- ret = -EINVAL;
2405 +- goto error;
2406 +- }
2407 + if (uurb->buffer_length > 0 &&
2408 + !access_ok(is_in ? VERIFY_WRITE : VERIFY_READ,
2409 + uurb->buffer, uurb->buffer_length)) {
2410 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
2411 +index 706b3d6a7614..d0d3f9ef9f10 100644
2412 +--- a/drivers/usb/core/hub.c
2413 ++++ b/drivers/usb/core/hub.c
2414 +@@ -4925,6 +4925,15 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
2415 + usb_put_dev(udev);
2416 + if ((status == -ENOTCONN) || (status == -ENOTSUPP))
2417 + break;
2418 ++
2419 ++ /* When halfway through our retry count, power-cycle the port */
2420 ++ if (i == (SET_CONFIG_TRIES / 2) - 1) {
2421 ++ dev_info(&port_dev->dev, "attempt power cycle\n");
2422 ++ usb_hub_set_port_power(hdev, hub, port1, false);
2423 ++ msleep(2 * hub_power_on_good_delay(hub));
2424 ++ usb_hub_set_port_power(hdev, hub, port1, true);
2425 ++ msleep(hub_power_on_good_delay(hub));
2426 ++ }
2427 + }
2428 + if (hub->hdev->parent ||
2429 + !hcd->driver->port_handed_over ||
2430 +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
2431 +index 37c418e581fb..50010282c010 100644
2432 +--- a/drivers/usb/core/quirks.c
2433 ++++ b/drivers/usb/core/quirks.c
2434 +@@ -151,6 +151,9 @@ static const struct usb_device_id usb_quirk_list[] = {
2435 + /* appletouch */
2436 + { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
2437 +
2438 ++ /* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */
2439 ++ { USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM },
2440 ++
2441 + /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
2442 + { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
2443 +
2444 +diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
2445 +index 273320fa30ae..4fce83266926 100644
2446 +--- a/drivers/usb/gadget/function/f_fs.c
2447 ++++ b/drivers/usb/gadget/function/f_fs.c
2448 +@@ -2263,7 +2263,7 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
2449 +
2450 + if (len < sizeof(*d) ||
2451 + d->bFirstInterfaceNumber >= ffs->interfaces_count ||
2452 +- !d->Reserved1)
2453 ++ d->Reserved1)
2454 + return -EINVAL;
2455 + for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
2456 + if (d->Reserved2[i])
2457 +diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
2458 +index 1a2614aae42c..3ff6468a1f5f 100644
2459 +--- a/drivers/usb/host/ehci-dbg.c
2460 ++++ b/drivers/usb/host/ehci-dbg.c
2461 +@@ -837,7 +837,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
2462 + default: /* unknown */
2463 + break;
2464 + }
2465 +- temp = (cap >> 8) & 0xff;
2466 ++ offset = (cap >> 8) & 0xff;
2467 + }
2468 + }
2469 + #endif
2470 +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
2471 +index b7114c3f52aa..a3ecd8bd5324 100644
2472 +--- a/drivers/usb/host/xhci-mem.c
2473 ++++ b/drivers/usb/host/xhci-mem.c
2474 +@@ -996,6 +996,12 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
2475 + if (!vdev)
2476 + return;
2477 +
2478 ++ if (vdev->real_port == 0 ||
2479 ++ vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
2480 ++ xhci_dbg(xhci, "Bad vdev->real_port.\n");
2481 ++ goto out;
2482 ++ }
2483 ++
2484 + tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
2485 + list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
2486 + /* is this a hub device that added a tt_info to the tts list */
2487 +@@ -1009,6 +1015,7 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
2488 + }
2489 + }
2490 + }
2491 ++out:
2492 + /* we are now at a leaf device */
2493 + xhci_free_virt_device(xhci, slot_id);
2494 + }
2495 +diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c
2496 +index ab5d364f6e8c..335a1ef35224 100644
2497 +--- a/drivers/usb/phy/phy-tahvo.c
2498 ++++ b/drivers/usb/phy/phy-tahvo.c
2499 +@@ -368,7 +368,8 @@ static int tahvo_usb_probe(struct platform_device *pdev)
2500 + tu->extcon = devm_extcon_dev_allocate(&pdev->dev, tahvo_cable);
2501 + if (IS_ERR(tu->extcon)) {
2502 + dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
2503 +- return -ENOMEM;
2504 ++ ret = PTR_ERR(tu->extcon);
2505 ++ goto err_disable_clk;
2506 + }
2507 +
2508 + ret = devm_extcon_dev_register(&pdev->dev, tu->extcon);
2509 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
2510 +index db3d34c2c82e..ffa8ec917ff5 100644
2511 +--- a/drivers/usb/serial/option.c
2512 ++++ b/drivers/usb/serial/option.c
2513 +@@ -241,6 +241,7 @@ static void option_instat_callback(struct urb *urb);
2514 + /* These Quectel products use Quectel's vendor ID */
2515 + #define QUECTEL_PRODUCT_EC21 0x0121
2516 + #define QUECTEL_PRODUCT_EC25 0x0125
2517 ++#define QUECTEL_PRODUCT_BG96 0x0296
2518 +
2519 + #define CMOTECH_VENDOR_ID 0x16d8
2520 + #define CMOTECH_PRODUCT_6001 0x6001
2521 +@@ -1185,6 +1186,8 @@ static const struct usb_device_id option_ids[] = {
2522 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2523 + { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
2524 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2525 ++ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
2526 ++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2527 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
2528 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
2529 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
2530 +diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h
2531 +index a155cd02bce2..ecc83c405a8b 100644
2532 +--- a/drivers/usb/storage/uas-detect.h
2533 ++++ b/drivers/usb/storage/uas-detect.h
2534 +@@ -111,6 +111,10 @@ static int uas_use_uas_driver(struct usb_interface *intf,
2535 + }
2536 + }
2537 +
2538 ++ /* All Seagate disk enclosures have broken ATA pass-through support */
2539 ++ if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bc2)
2540 ++ flags |= US_FL_NO_ATA_1X;
2541 ++
2542 + usb_stor_adjust_quirks(udev, &flags);
2543 +
2544 + if (flags & US_FL_IGNORE_UAS) {
2545 +diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
2546 +index 85d3e648bdea..59b3f62a2d64 100644
2547 +--- a/drivers/vfio/vfio_iommu_spapr_tce.c
2548 ++++ b/drivers/vfio/vfio_iommu_spapr_tce.c
2549 +@@ -1123,12 +1123,11 @@ static long tce_iommu_ioctl(void *iommu_data,
2550 + mutex_lock(&container->lock);
2551 +
2552 + ret = tce_iommu_create_default_window(container);
2553 +- if (ret)
2554 +- return ret;
2555 +-
2556 +- ret = tce_iommu_create_window(container, create.page_shift,
2557 +- create.window_size, create.levels,
2558 +- &create.start_addr);
2559 ++ if (!ret)
2560 ++ ret = tce_iommu_create_window(container,
2561 ++ create.page_shift,
2562 ++ create.window_size, create.levels,
2563 ++ &create.start_addr);
2564 +
2565 + mutex_unlock(&container->lock);
2566 +
2567 +diff --git a/fs/dax.c b/fs/dax.c
2568 +index bf6218da7928..800748f10b3d 100644
2569 +--- a/fs/dax.c
2570 ++++ b/fs/dax.c
2571 +@@ -1265,6 +1265,17 @@ iomap_dax_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
2572 + if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
2573 + return -EIO;
2574 +
2575 ++ /*
2576 ++ * Write can allocate block for an area which has a hole page mapped
2577 ++ * into page tables. We have to tear down these mappings so that data
2578 ++ * written by write(2) is visible in mmap.
2579 ++ */
2580 ++ if ((iomap->flags & IOMAP_F_NEW) && inode->i_mapping->nrpages) {
2581 ++ invalidate_inode_pages2_range(inode->i_mapping,
2582 ++ pos >> PAGE_SHIFT,
2583 ++ (end - 1) >> PAGE_SHIFT);
2584 ++ }
2585 ++
2586 + while (pos < end) {
2587 + unsigned offset = pos & (PAGE_SIZE - 1);
2588 + struct blk_dax_ctl dax = { 0 };
2589 +@@ -1329,23 +1340,6 @@ iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter,
2590 + if (iov_iter_rw(iter) == WRITE)
2591 + flags |= IOMAP_WRITE;
2592 +
2593 +- /*
2594 +- * Yes, even DAX files can have page cache attached to them: A zeroed
2595 +- * page is inserted into the pagecache when we have to serve a write
2596 +- * fault on a hole. It should never be dirtied and can simply be
2597 +- * dropped from the pagecache once we get real data for the page.
2598 +- *
2599 +- * XXX: This is racy against mmap, and there's nothing we can do about
2600 +- * it. We'll eventually need to shift this down even further so that
2601 +- * we can check if we allocated blocks over a hole first.
2602 +- */
2603 +- if (mapping->nrpages) {
2604 +- ret = invalidate_inode_pages2_range(mapping,
2605 +- pos >> PAGE_SHIFT,
2606 +- (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT);
2607 +- WARN_ON_ONCE(ret);
2608 +- }
2609 +-
2610 + while (iov_iter_count(iter)) {
2611 + ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
2612 + iter, iomap_dax_actor);
2613 +diff --git a/fs/libfs.c b/fs/libfs.c
2614 +index 48826d4da189..9588780ad43e 100644
2615 +--- a/fs/libfs.c
2616 ++++ b/fs/libfs.c
2617 +@@ -245,7 +245,8 @@ struct dentry *mount_pseudo_xattr(struct file_system_type *fs_type, char *name,
2618 + struct inode *root;
2619 + struct qstr d_name = QSTR_INIT(name, strlen(name));
2620 +
2621 +- s = sget(fs_type, NULL, set_anon_super, MS_NOUSER, NULL);
2622 ++ s = sget_userns(fs_type, NULL, set_anon_super, MS_KERNMOUNT|MS_NOUSER,
2623 ++ &init_user_ns, NULL);
2624 + if (IS_ERR(s))
2625 + return ERR_CAST(s);
2626 +
2627 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
2628 +index 67845220fc27..4638654e26f3 100644
2629 +--- a/fs/nfs/nfs4proc.c
2630 ++++ b/fs/nfs/nfs4proc.c
2631 +@@ -38,7 +38,6 @@
2632 + #include <linux/mm.h>
2633 + #include <linux/delay.h>
2634 + #include <linux/errno.h>
2635 +-#include <linux/file.h>
2636 + #include <linux/string.h>
2637 + #include <linux/ratelimit.h>
2638 + #include <linux/printk.h>
2639 +@@ -6006,7 +6005,6 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
2640 + p->server = server;
2641 + atomic_inc(&lsp->ls_count);
2642 + p->ctx = get_nfs_open_context(ctx);
2643 +- get_file(fl->fl_file);
2644 + memcpy(&p->fl, fl, sizeof(p->fl));
2645 + return p;
2646 + out_free_seqid:
2647 +@@ -6119,7 +6117,6 @@ static void nfs4_lock_release(void *calldata)
2648 + nfs_free_seqid(data->arg.lock_seqid);
2649 + nfs4_put_lock_state(data->lsp);
2650 + put_nfs_open_context(data->ctx);
2651 +- fput(data->fl.fl_file);
2652 + kfree(data);
2653 + dprintk("%s: done!\n", __func__);
2654 + }
2655 +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
2656 +index 92671914067f..71deeae6eefd 100644
2657 +--- a/fs/nfs/nfs4state.c
2658 ++++ b/fs/nfs/nfs4state.c
2659 +@@ -1718,7 +1718,6 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
2660 + break;
2661 + case -NFS4ERR_STALE_CLIENTID:
2662 + set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
2663 +- nfs4_state_clear_reclaim_reboot(clp);
2664 + nfs4_state_start_reclaim_reboot(clp);
2665 + break;
2666 + case -NFS4ERR_EXPIRED:
2667 +diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
2668 +index 447a915db25d..4431ea2c8802 100644
2669 +--- a/include/linux/buffer_head.h
2670 ++++ b/include/linux/buffer_head.h
2671 +@@ -239,12 +239,10 @@ static inline int block_page_mkwrite_return(int err)
2672 + {
2673 + if (err == 0)
2674 + return VM_FAULT_LOCKED;
2675 +- if (err == -EFAULT)
2676 ++ if (err == -EFAULT || err == -EAGAIN)
2677 + return VM_FAULT_NOPAGE;
2678 + if (err == -ENOMEM)
2679 + return VM_FAULT_OOM;
2680 +- if (err == -EAGAIN)
2681 +- return VM_FAULT_RETRY;
2682 + /* -ENOSPC, -EDQUOT, -EIO ... */
2683 + return VM_FAULT_SIGBUS;
2684 + }
2685 +diff --git a/include/linux/fence.h b/include/linux/fence.h
2686 +index 0d763053f97a..9bb2c0c97a21 100644
2687 +--- a/include/linux/fence.h
2688 ++++ b/include/linux/fence.h
2689 +@@ -47,7 +47,7 @@ struct fence_cb;
2690 + * can be compared to decide which fence would be signaled later.
2691 + * @flags: A mask of FENCE_FLAG_* defined below
2692 + * @timestamp: Timestamp when the fence was signaled.
2693 +- * @status: Optional, only valid if < 0, must be set before calling
2694 ++ * @error: Optional, only valid if < 0, must be set before calling
2695 + * fence_signal, indicates that the fence has completed with an error.
2696 + *
2697 + * the flags member must be manipulated and read using the appropriate
2698 +@@ -79,7 +79,7 @@ struct fence {
2699 + unsigned seqno;
2700 + unsigned long flags;
2701 + ktime_t timestamp;
2702 +- int status;
2703 ++ int error;
2704 + };
2705 +
2706 + enum fence_flag_bits {
2707 +@@ -132,7 +132,7 @@ struct fence_cb {
2708 + * or some failure occurred that made it impossible to enable
2709 + * signaling. True indicates successful enabling.
2710 + *
2711 +- * fence->status may be set in enable_signaling, but only when false is
2712 ++ * fence->error may be set in enable_signaling, but only when false is
2713 + * returned.
2714 + *
2715 + * Calling fence_signal before enable_signaling is called allows
2716 +@@ -144,7 +144,7 @@ struct fence_cb {
2717 + * the second time will be a noop since it was already signaled.
2718 + *
2719 + * Notes on signaled:
2720 +- * May set fence->status if returning true.
2721 ++ * May set fence->error if returning true.
2722 + *
2723 + * Notes on wait:
2724 + * Must not be NULL, set to fence_default_wait for default implementation.
2725 +@@ -280,6 +280,19 @@ fence_is_signaled(struct fence *fence)
2726 + return false;
2727 + }
2728 +
2729 ++/**
2730 ++ * __fence_is_later - return if f1 is chronologically later than f2
2731 ++ * @f1: [in] the first fence's seqno
2732 ++ * @f2: [in] the second fence's seqno from the same context
2733 ++ *
2734 ++ * Returns true if f1 is chronologically later than f2. Both fences must be
2735 ++ * from the same context, since a seqno is not common across contexts.
2736 ++ */
2737 ++static inline bool __fence_is_later(u32 f1, u32 f2)
2738 ++{
2739 ++ return (int)(f1 - f2) > 0;
2740 ++}
2741 ++
2742 + /**
2743 + * fence_is_later - return if f1 is chronologically later than f2
2744 + * @f1: [in] the first fence from the same context
2745 +@@ -293,7 +306,7 @@ static inline bool fence_is_later(struct fence *f1, struct fence *f2)
2746 + if (WARN_ON(f1->context != f2->context))
2747 + return false;
2748 +
2749 +- return (int)(f1->seqno - f2->seqno) > 0;
2750 ++ return __fence_is_later(f1->seqno, f2->seqno);
2751 + }
2752 +
2753 + /**
2754 +@@ -321,6 +334,50 @@ static inline struct fence *fence_later(struct fence *f1, struct fence *f2)
2755 + return fence_is_signaled(f2) ? NULL : f2;
2756 + }
2757 +
2758 ++/**
2759 ++ * fence_get_status_locked - returns the status upon completion
2760 ++ * @fence: [in] the fence to query
2761 ++ *
2762 ++ * Drivers can supply an optional error status condition before they signal
2763 ++ * the fence (to indicate whether the fence was completed due to an error
2764 ++ * rather than success). The value of the status condition is only valid
2765 ++ * if the fence has been signaled, fence_get_status_locked() first checks
2766 ++ * the signal state before reporting the error status.
2767 ++ *
2768 ++ * Returns 0 if the fence has not yet been signaled, 1 if the fence has
2769 ++ * been signaled without an error condition, or a negative error code
2770 ++ * if the fence has been completed in err.
2771 ++ */
2772 ++static inline int fence_get_status_locked(struct fence *fence)
2773 ++{
2774 ++ if (fence_is_signaled_locked(fence))
2775 ++ return fence->error ?: 1;
2776 ++ else
2777 ++ return 0;
2778 ++}
2779 ++
2780 ++int fence_get_status(struct fence *fence);
2781 ++
2782 ++/**
2783 ++ * fence_set_error - flag an error condition on the fence
2784 ++ * @fence: [in] the fence
2785 ++ * @error: [in] the error to store
2786 ++ *
2787 ++ * Drivers can supply an optional error status condition before they signal
2788 ++ * the fence, to indicate that the fence was completed due to an error
2789 ++ * rather than success. This must be set before signaling (so that the value
2790 ++ * is visible before any waiters on the signal callback are woken). This
2791 ++ * helper exists to help catching erroneous setting of #fence.error.
2792 ++ */
2793 ++static inline void fence_set_error(struct fence *fence,
2794 ++ int error)
2795 ++{
2796 ++ BUG_ON(test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags));
2797 ++ BUG_ON(error >= 0 || error < -MAX_ERRNO);
2798 ++
2799 ++ fence->error = error;
2800 ++}
2801 ++
2802 + signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout);
2803 + signed long fence_wait_any_timeout(struct fence **fences, uint32_t count,
2804 + bool intr, signed long timeout);
2805 +diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
2806 +index 4741ecdb9817..78ed8105e64d 100644
2807 +--- a/include/linux/perf_event.h
2808 ++++ b/include/linux/perf_event.h
2809 +@@ -1259,6 +1259,7 @@ extern void perf_event_disable(struct perf_event *event);
2810 + extern void perf_event_disable_local(struct perf_event *event);
2811 + extern void perf_event_disable_inatomic(struct perf_event *event);
2812 + extern void perf_event_task_tick(void);
2813 ++extern int perf_event_account_interrupt(struct perf_event *event);
2814 + #else /* !CONFIG_PERF_EVENTS: */
2815 + static inline void *
2816 + perf_aux_output_begin(struct perf_output_handle *handle,
2817 +diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
2818 +index 5e64a86989a5..ab1dadba9923 100644
2819 +--- a/include/uapi/linux/usb/ch9.h
2820 ++++ b/include/uapi/linux/usb/ch9.h
2821 +@@ -854,6 +854,8 @@ struct usb_wireless_cap_descriptor { /* Ultra Wide Band */
2822 + __u8 bReserved;
2823 + } __attribute__((packed));
2824 +
2825 ++#define USB_DT_USB_WIRELESS_CAP_SIZE 11
2826 ++
2827 + /* USB 2.0 Extension descriptor */
2828 + #define USB_CAP_TYPE_EXT 2
2829 +
2830 +@@ -1046,6 +1048,7 @@ struct usb_ptm_cap_descriptor {
2831 + __u8 bDevCapabilityType;
2832 + } __attribute__((packed));
2833 +
2834 ++#define USB_DT_USB_PTM_ID_SIZE 3
2835 + /*
2836 + * The size of the descriptor for the Sublink Speed Attribute Count
2837 + * (SSAC) specified in bmAttributes[4:0].
2838 +diff --git a/kernel/events/core.c b/kernel/events/core.c
2839 +index 36ff2d93f222..13b9784427b0 100644
2840 +--- a/kernel/events/core.c
2841 ++++ b/kernel/events/core.c
2842 +@@ -7088,25 +7088,12 @@ static void perf_log_itrace_start(struct perf_event *event)
2843 + perf_output_end(&handle);
2844 + }
2845 +
2846 +-/*
2847 +- * Generic event overflow handling, sampling.
2848 +- */
2849 +-
2850 +-static int __perf_event_overflow(struct perf_event *event,
2851 +- int throttle, struct perf_sample_data *data,
2852 +- struct pt_regs *regs)
2853 ++static int
2854 ++__perf_event_account_interrupt(struct perf_event *event, int throttle)
2855 + {
2856 +- int events = atomic_read(&event->event_limit);
2857 + struct hw_perf_event *hwc = &event->hw;
2858 +- u64 seq;
2859 + int ret = 0;
2860 +-
2861 +- /*
2862 +- * Non-sampling counters might still use the PMI to fold short
2863 +- * hardware counters, ignore those.
2864 +- */
2865 +- if (unlikely(!is_sampling_event(event)))
2866 +- return 0;
2867 ++ u64 seq;
2868 +
2869 + seq = __this_cpu_read(perf_throttled_seq);
2870 + if (seq != hwc->interrupts_seq) {
2871 +@@ -7134,6 +7121,34 @@ static int __perf_event_overflow(struct perf_event *event,
2872 + perf_adjust_period(event, delta, hwc->last_period, true);
2873 + }
2874 +
2875 ++ return ret;
2876 ++}
2877 ++
2878 ++int perf_event_account_interrupt(struct perf_event *event)
2879 ++{
2880 ++ return __perf_event_account_interrupt(event, 1);
2881 ++}
2882 ++
2883 ++/*
2884 ++ * Generic event overflow handling, sampling.
2885 ++ */
2886 ++
2887 ++static int __perf_event_overflow(struct perf_event *event,
2888 ++ int throttle, struct perf_sample_data *data,
2889 ++ struct pt_regs *regs)
2890 ++{
2891 ++ int events = atomic_read(&event->event_limit);
2892 ++ int ret = 0;
2893 ++
2894 ++ /*
2895 ++ * Non-sampling counters might still use the PMI to fold short
2896 ++ * hardware counters, ignore those.
2897 ++ */
2898 ++ if (unlikely(!is_sampling_event(event)))
2899 ++ return 0;
2900 ++
2901 ++ ret = __perf_event_account_interrupt(event, throttle);
2902 ++
2903 + /*
2904 + * XXX event_limit might not quite work as expected on inherited
2905 + * events
2906 +diff --git a/mm/oom_kill.c b/mm/oom_kill.c
2907 +index d631d251c150..4a184157cc3d 100644
2908 +--- a/mm/oom_kill.c
2909 ++++ b/mm/oom_kill.c
2910 +@@ -524,7 +524,6 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
2911 + */
2912 + set_bit(MMF_UNSTABLE, &mm->flags);
2913 +
2914 +- tlb_gather_mmu(&tlb, mm, 0, -1);
2915 + for (vma = mm->mmap ; vma; vma = vma->vm_next) {
2916 + if (is_vm_hugetlb_page(vma))
2917 + continue;
2918 +@@ -546,11 +545,13 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
2919 + * we do not want to block exit_mmap by keeping mm ref
2920 + * count elevated without a good reason.
2921 + */
2922 +- if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED))
2923 ++ if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
2924 ++ tlb_gather_mmu(&tlb, mm, vma->vm_start, vma->vm_end);
2925 + unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end,
2926 + &details);
2927 ++ tlb_finish_mmu(&tlb, vma->vm_start, vma->vm_end);
2928 ++ }
2929 + }
2930 +- tlb_finish_mmu(&tlb, 0, -1);
2931 + pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
2932 + task_pid_nr(tsk), tsk->comm,
2933 + K(get_mm_counter(mm, MM_ANONPAGES)),
2934 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
2935 +index ef5ee56095e8..fbc38888252b 100644
2936 +--- a/mm/page_alloc.c
2937 ++++ b/mm/page_alloc.c
2938 +@@ -2592,30 +2592,23 @@ int __isolate_free_page(struct page *page, unsigned int order)
2939 + * Update NUMA hit/miss statistics
2940 + *
2941 + * Must be called with interrupts disabled.
2942 +- *
2943 +- * When __GFP_OTHER_NODE is set assume the node of the preferred
2944 +- * zone is the local node. This is useful for daemons who allocate
2945 +- * memory on behalf of other processes.
2946 + */
2947 + static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
2948 + gfp_t flags)
2949 + {
2950 + #ifdef CONFIG_NUMA
2951 +- int local_nid = numa_node_id();
2952 + enum zone_stat_item local_stat = NUMA_LOCAL;
2953 +
2954 +- if (unlikely(flags & __GFP_OTHER_NODE)) {
2955 ++ if (z->node != numa_node_id())
2956 + local_stat = NUMA_OTHER;
2957 +- local_nid = preferred_zone->node;
2958 +- }
2959 +
2960 +- if (z->node == local_nid) {
2961 ++ if (z->node == preferred_zone->node)
2962 + __inc_zone_state(z, NUMA_HIT);
2963 +- __inc_zone_state(z, local_stat);
2964 +- } else {
2965 ++ else {
2966 + __inc_zone_state(z, NUMA_MISS);
2967 + __inc_zone_state(preferred_zone, NUMA_FOREIGN);
2968 + }
2969 ++ __inc_zone_state(z, local_stat);
2970 + #endif
2971 + }
2972 +
2973 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
2974 +index 8fcd0c642742..05255a286888 100644
2975 +--- a/net/ipv4/tcp_input.c
2976 ++++ b/net/ipv4/tcp_input.c
2977 +@@ -5081,7 +5081,7 @@ static void tcp_check_space(struct sock *sk)
2978 + if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
2979 + sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
2980 + /* pairs with tcp_poll() */
2981 +- smp_mb__after_atomic();
2982 ++ smp_mb();
2983 + if (sk->sk_socket &&
2984 + test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
2985 + tcp_new_space(sk);
2986 +diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
2987 +index 816f79d1a8a3..67e882d49195 100644
2988 +--- a/net/ipv6/ip6_vti.c
2989 ++++ b/net/ipv6/ip6_vti.c
2990 +@@ -189,12 +189,12 @@ static int vti6_tnl_create2(struct net_device *dev)
2991 + struct vti6_net *ip6n = net_generic(net, vti6_net_id);
2992 + int err;
2993 +
2994 ++ dev->rtnl_link_ops = &vti6_link_ops;
2995 + err = register_netdevice(dev);
2996 + if (err < 0)
2997 + goto out;
2998 +
2999 + strcpy(t->parms.name, dev->name);
3000 +- dev->rtnl_link_ops = &vti6_link_ops;
3001 +
3002 + dev_hold(dev);
3003 + vti6_tnl_link(ip6n, t);
3004 +diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
3005 +index 3468d5635d0a..9d77a54e8854 100644
3006 +--- a/net/l2tp/l2tp_ip.c
3007 ++++ b/net/l2tp/l2tp_ip.c
3008 +@@ -48,7 +48,8 @@ static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
3009 + return (struct l2tp_ip_sock *)sk;
3010 + }
3011 +
3012 +-static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
3013 ++static struct sock *__l2tp_ip_bind_lookup(const struct net *net, __be32 laddr,
3014 ++ __be32 raddr, int dif, u32 tunnel_id)
3015 + {
3016 + struct sock *sk;
3017 +
3018 +@@ -62,6 +63,7 @@ static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif
3019 + if ((l2tp->conn_id == tunnel_id) &&
3020 + net_eq(sock_net(sk), net) &&
3021 + !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
3022 ++ (!inet->inet_daddr || !raddr || inet->inet_daddr == raddr) &&
3023 + (!sk->sk_bound_dev_if || !dif ||
3024 + sk->sk_bound_dev_if == dif))
3025 + goto found;
3026 +@@ -72,15 +74,6 @@ static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif
3027 + return sk;
3028 + }
3029 +
3030 +-static inline struct sock *l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
3031 +-{
3032 +- struct sock *sk = __l2tp_ip_bind_lookup(net, laddr, dif, tunnel_id);
3033 +- if (sk)
3034 +- sock_hold(sk);
3035 +-
3036 +- return sk;
3037 +-}
3038 +-
3039 + /* When processing receive frames, there are two cases to
3040 + * consider. Data frames consist of a non-zero session-id and an
3041 + * optional cookie. Control frames consist of a regular L2TP header
3042 +@@ -186,8 +179,8 @@ static int l2tp_ip_recv(struct sk_buff *skb)
3043 + struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
3044 +
3045 + read_lock_bh(&l2tp_ip_lock);
3046 +- sk = __l2tp_ip_bind_lookup(net, iph->daddr, inet_iif(skb),
3047 +- tunnel_id);
3048 ++ sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr,
3049 ++ inet_iif(skb), tunnel_id);
3050 + if (!sk) {
3051 + read_unlock_bh(&l2tp_ip_lock);
3052 + goto discard;
3053 +@@ -289,7 +282,7 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
3054 + inet->inet_saddr = 0; /* Use device */
3055 +
3056 + write_lock_bh(&l2tp_ip_lock);
3057 +- if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr,
3058 ++ if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, 0,
3059 + sk->sk_bound_dev_if, addr->l2tp_conn_id)) {
3060 + write_unlock_bh(&l2tp_ip_lock);
3061 + ret = -EADDRINUSE;
3062 +diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
3063 +index 1d522ce833e6..247097289fd0 100644
3064 +--- a/net/l2tp/l2tp_ip6.c
3065 ++++ b/net/l2tp/l2tp_ip6.c
3066 +@@ -59,12 +59,14 @@ static inline struct l2tp_ip6_sock *l2tp_ip6_sk(const struct sock *sk)
3067 +
3068 + static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
3069 + struct in6_addr *laddr,
3070 ++ const struct in6_addr *raddr,
3071 + int dif, u32 tunnel_id)
3072 + {
3073 + struct sock *sk;
3074 +
3075 + sk_for_each_bound(sk, &l2tp_ip6_bind_table) {
3076 + const struct in6_addr *sk_laddr = inet6_rcv_saddr(sk);
3077 ++ const struct in6_addr *sk_raddr = &sk->sk_v6_daddr;
3078 + struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
3079 +
3080 + if (l2tp == NULL)
3081 +@@ -73,6 +75,7 @@ static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
3082 + if ((l2tp->conn_id == tunnel_id) &&
3083 + net_eq(sock_net(sk), net) &&
3084 + (!sk_laddr || ipv6_addr_any(sk_laddr) || ipv6_addr_equal(sk_laddr, laddr)) &&
3085 ++ (!raddr || ipv6_addr_any(sk_raddr) || ipv6_addr_equal(sk_raddr, raddr)) &&
3086 + (!sk->sk_bound_dev_if || !dif ||
3087 + sk->sk_bound_dev_if == dif))
3088 + goto found;
3089 +@@ -83,17 +86,6 @@ static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
3090 + return sk;
3091 + }
3092 +
3093 +-static inline struct sock *l2tp_ip6_bind_lookup(struct net *net,
3094 +- struct in6_addr *laddr,
3095 +- int dif, u32 tunnel_id)
3096 +-{
3097 +- struct sock *sk = __l2tp_ip6_bind_lookup(net, laddr, dif, tunnel_id);
3098 +- if (sk)
3099 +- sock_hold(sk);
3100 +-
3101 +- return sk;
3102 +-}
3103 +-
3104 + /* When processing receive frames, there are two cases to
3105 + * consider. Data frames consist of a non-zero session-id and an
3106 + * optional cookie. Control frames consist of a regular L2TP header
3107 +@@ -200,8 +192,8 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
3108 + struct ipv6hdr *iph = ipv6_hdr(skb);
3109 +
3110 + read_lock_bh(&l2tp_ip6_lock);
3111 +- sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, inet6_iif(skb),
3112 +- tunnel_id);
3113 ++ sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr,
3114 ++ inet6_iif(skb), tunnel_id);
3115 + if (!sk) {
3116 + read_unlock_bh(&l2tp_ip6_lock);
3117 + goto discard;
3118 +@@ -339,7 +331,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
3119 + rcu_read_unlock();
3120 +
3121 + write_lock_bh(&l2tp_ip6_lock);
3122 +- if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, bound_dev_if,
3123 ++ if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, NULL, bound_dev_if,
3124 + addr->l2tp_conn_id)) {
3125 + write_unlock_bh(&l2tp_ip6_lock);
3126 + err = -EADDRINUSE;
3127 +diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
3128 +index e75cbf6ecc26..a0d901d8992e 100644
3129 +--- a/net/mac80211/chan.c
3130 ++++ b/net/mac80211/chan.c
3131 +@@ -231,9 +231,6 @@ ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata)
3132 + !(sta->sdata->bss && sta->sdata->bss == sdata->bss))
3133 + continue;
3134 +
3135 +- if (!sta->uploaded || !test_sta_flag(sta, WLAN_STA_ASSOC))
3136 +- continue;
3137 +-
3138 + max_bw = max(max_bw, ieee80211_get_sta_bw(&sta->sta));
3139 + }
3140 + rcu_read_unlock();
3141 +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
3142 +index 274c564bd9af..1ffd1e145c13 100644
3143 +--- a/net/mac80211/tx.c
3144 ++++ b/net/mac80211/tx.c
3145 +@@ -1244,7 +1244,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
3146 +
3147 + static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
3148 + struct ieee80211_vif *vif,
3149 +- struct ieee80211_sta *pubsta,
3150 ++ struct sta_info *sta,
3151 + struct sk_buff *skb)
3152 + {
3153 + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
3154 +@@ -1258,10 +1258,13 @@ static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
3155 + if (!ieee80211_is_data(hdr->frame_control))
3156 + return NULL;
3157 +
3158 +- if (pubsta) {
3159 ++ if (sta) {
3160 + u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
3161 +
3162 +- txq = pubsta->txq[tid];
3163 ++ if (!sta->uploaded)
3164 ++ return NULL;
3165 ++
3166 ++ txq = sta->sta.txq[tid];
3167 + } else if (vif) {
3168 + txq = vif->txq;
3169 + }
3170 +@@ -1499,23 +1502,17 @@ static bool ieee80211_queue_skb(struct ieee80211_local *local,
3171 + struct fq *fq = &local->fq;
3172 + struct ieee80211_vif *vif;
3173 + struct txq_info *txqi;
3174 +- struct ieee80211_sta *pubsta;
3175 +
3176 + if (!local->ops->wake_tx_queue ||
3177 + sdata->vif.type == NL80211_IFTYPE_MONITOR)
3178 + return false;
3179 +
3180 +- if (sta && sta->uploaded)
3181 +- pubsta = &sta->sta;
3182 +- else
3183 +- pubsta = NULL;
3184 +-
3185 + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
3186 + sdata = container_of(sdata->bss,
3187 + struct ieee80211_sub_if_data, u.ap);
3188 +
3189 + vif = &sdata->vif;
3190 +- txqi = ieee80211_get_txq(local, vif, pubsta, skb);
3191 ++ txqi = ieee80211_get_txq(local, vif, sta, skb);
3192 +
3193 + if (!txqi)
3194 + return false;
3195 +diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
3196 +index c985ecbe9bd6..ae5ac175b2be 100644
3197 +--- a/net/qrtr/qrtr.c
3198 ++++ b/net/qrtr/qrtr.c
3199 +@@ -252,7 +252,7 @@ static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node,
3200 + const int pkt_len = 20;
3201 + struct qrtr_hdr *hdr;
3202 + struct sk_buff *skb;
3203 +- u32 *buf;
3204 ++ __le32 *buf;
3205 +
3206 + skb = alloc_skb(QRTR_HDR_SIZE + pkt_len, GFP_KERNEL);
3207 + if (!skb)
3208 +@@ -269,7 +269,7 @@ static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node,
3209 + hdr->dst_node_id = cpu_to_le32(dst_node);
3210 + hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL);
3211 +
3212 +- buf = (u32 *)skb_put(skb, pkt_len);
3213 ++ buf = (__le32 *)skb_put(skb, pkt_len);
3214 + memset(buf, 0, pkt_len);
3215 + buf[0] = cpu_to_le32(QRTR_TYPE_RESUME_TX);
3216 + buf[1] = cpu_to_le32(src_node);
3217 +diff --git a/net/sctp/debug.c b/net/sctp/debug.c
3218 +index 95d7b15dad21..e371a0d90068 100644
3219 +--- a/net/sctp/debug.c
3220 ++++ b/net/sctp/debug.c
3221 +@@ -166,7 +166,7 @@ static const char *const sctp_timer_tbl[] = {
3222 + /* Lookup timer debug name. */
3223 + const char *sctp_tname(const sctp_subtype_t id)
3224 + {
3225 +- if (id.timeout <= SCTP_EVENT_TIMEOUT_MAX)
3226 ++ if (id.timeout < ARRAY_SIZE(sctp_timer_tbl))
3227 + return sctp_timer_tbl[id.timeout];
3228 + return "unknown_timer";
3229 + }
3230 +diff --git a/net/tipc/server.c b/net/tipc/server.c
3231 +index f89c0c2e8c16..3cd6402e812c 100644
3232 +--- a/net/tipc/server.c
3233 ++++ b/net/tipc/server.c
3234 +@@ -86,7 +86,6 @@ struct outqueue_entry {
3235 + static void tipc_recv_work(struct work_struct *work);
3236 + static void tipc_send_work(struct work_struct *work);
3237 + static void tipc_clean_outqueues(struct tipc_conn *con);
3238 +-static void tipc_sock_release(struct tipc_conn *con);
3239 +
3240 + static void tipc_conn_kref_release(struct kref *kref)
3241 + {
3242 +@@ -104,7 +103,6 @@ static void tipc_conn_kref_release(struct kref *kref)
3243 + }
3244 + saddr->scope = -TIPC_NODE_SCOPE;
3245 + kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr));
3246 +- tipc_sock_release(con);
3247 + sock_release(sock);
3248 + con->sock = NULL;
3249 +
3250 +@@ -194,19 +192,15 @@ static void tipc_unregister_callbacks(struct tipc_conn *con)
3251 + write_unlock_bh(&sk->sk_callback_lock);
3252 + }
3253 +
3254 +-static void tipc_sock_release(struct tipc_conn *con)
3255 ++static void tipc_close_conn(struct tipc_conn *con)
3256 + {
3257 + struct tipc_server *s = con->server;
3258 +
3259 +- if (con->conid)
3260 +- s->tipc_conn_release(con->conid, con->usr_data);
3261 +-
3262 +- tipc_unregister_callbacks(con);
3263 +-}
3264 +-
3265 +-static void tipc_close_conn(struct tipc_conn *con)
3266 +-{
3267 + if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
3268 ++ tipc_unregister_callbacks(con);
3269 ++
3270 ++ if (con->conid)
3271 ++ s->tipc_conn_release(con->conid, con->usr_data);
3272 +
3273 + /* We shouldn't flush pending works as we may be in the
3274 + * thread. In fact the races with pending rx/tx work structs
3275 +@@ -625,14 +619,12 @@ int tipc_server_start(struct tipc_server *s)
3276 + void tipc_server_stop(struct tipc_server *s)
3277 + {
3278 + struct tipc_conn *con;
3279 +- int total = 0;
3280 + int id;
3281 +
3282 + spin_lock_bh(&s->idr_lock);
3283 +- for (id = 0; total < s->idr_in_use; id++) {
3284 ++ for (id = 0; s->idr_in_use; id++) {
3285 + con = idr_find(&s->conn_idr, id);
3286 + if (con) {
3287 +- total++;
3288 + spin_unlock_bh(&s->idr_lock);
3289 + tipc_close_conn(con);
3290 + spin_lock_bh(&s->idr_lock);
3291 +diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
3292 +index 0e8762945e79..2b3def14b4fb 100644
3293 +--- a/security/integrity/ima/ima_main.c
3294 ++++ b/security/integrity/ima/ima_main.c
3295 +@@ -51,6 +51,8 @@ static int __init hash_setup(char *str)
3296 + ima_hash_algo = HASH_ALGO_SHA1;
3297 + else if (strncmp(str, "md5", 3) == 0)
3298 + ima_hash_algo = HASH_ALGO_MD5;
3299 ++ else
3300 ++ return 1;
3301 + goto out;
3302 + }
3303 +
3304 +@@ -60,6 +62,8 @@ static int __init hash_setup(char *str)
3305 + break;
3306 + }
3307 + }
3308 ++ if (i == HASH_ALGO__LAST)
3309 ++ return 1;
3310 + out:
3311 + hash_setup_done = 1;
3312 + return 1;
3313 +diff --git a/tools/include/linux/poison.h b/tools/include/linux/poison.h
3314 +index 51334edec506..f306a7642509 100644
3315 +--- a/tools/include/linux/poison.h
3316 ++++ b/tools/include/linux/poison.h
3317 +@@ -14,6 +14,10 @@
3318 + # define POISON_POINTER_DELTA 0
3319 + #endif
3320 +
3321 ++#ifdef __cplusplus
3322 ++#define LIST_POISON1 NULL
3323 ++#define LIST_POISON2 NULL
3324 ++#else
3325 + /*
3326 + * These are non-NULL pointers that will result in page faults
3327 + * under normal circumstances, used to verify that nobody uses
3328 +@@ -21,6 +25,7 @@
3329 + */
3330 + #define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA)
3331 + #define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA)
3332 ++#endif
3333 +
3334 + /********** include/linux/timer.h **********/
3335 + /*
3336 +diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c
3337 +index 28d1605b0338..b60a6fd66517 100644
3338 +--- a/tools/perf/tests/attr.c
3339 ++++ b/tools/perf/tests/attr.c
3340 +@@ -150,7 +150,7 @@ static int run_dir(const char *d, const char *perf)
3341 + snprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %.*s",
3342 + d, d, perf, vcnt, v);
3343 +
3344 +- return system(cmd);
3345 ++ return system(cmd) ? TEST_FAIL : TEST_OK;
3346 + }
3347 +
3348 + int test__attr(int subtest __maybe_unused)
3349 +diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c
3350 +index e717fed80219..f936a3cd3e35 100644
3351 +--- a/tools/testing/selftests/x86/ldt_gdt.c
3352 ++++ b/tools/testing/selftests/x86/ldt_gdt.c
3353 +@@ -360,9 +360,24 @@ static void do_simple_tests(void)
3354 + install_invalid(&desc, false);
3355 +
3356 + desc.seg_not_present = 0;
3357 +- desc.read_exec_only = 0;
3358 + desc.seg_32bit = 1;
3359 ++ desc.read_exec_only = 0;
3360 ++ desc.limit = 0xfffff;
3361 ++
3362 + install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S | AR_P | AR_DB);
3363 ++
3364 ++ desc.limit_in_pages = 1;
3365 ++
3366 ++ install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S | AR_P | AR_DB | AR_G);
3367 ++ desc.read_exec_only = 1;
3368 ++ install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA | AR_S | AR_P | AR_DB | AR_G);
3369 ++ desc.contents = 1;
3370 ++ desc.read_exec_only = 0;
3371 ++ install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA_EXPDOWN | AR_S | AR_P | AR_DB | AR_G);
3372 ++ desc.read_exec_only = 1;
3373 ++ install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA_EXPDOWN | AR_S | AR_P | AR_DB | AR_G);
3374 ++
3375 ++ desc.limit = 0;
3376 + install_invalid(&desc, true);
3377 + }
3378 +
3379 +diff --git a/tools/usb/usbip/Makefile.am b/tools/usb/usbip/Makefile.am
3380 +index 66f8bf038c9f..45eaa70a71e0 100644
3381 +--- a/tools/usb/usbip/Makefile.am
3382 ++++ b/tools/usb/usbip/Makefile.am
3383 +@@ -1,6 +1,7 @@
3384 + SUBDIRS := libsrc src
3385 + includedir = @includedir@/usbip
3386 + include_HEADERS := $(addprefix libsrc/, \
3387 +- usbip_common.h vhci_driver.h usbip_host_driver.h)
3388 ++ usbip_common.h vhci_driver.h usbip_host_driver.h \
3389 ++ list.h sysfs_utils.h usbip_host_common.h)
3390 +
3391 + dist_man_MANS := $(addprefix doc/, usbip.8 usbipd.8)
3392 +diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
3393 +index 27a1f6341d41..7b49a1378c90 100644
3394 +--- a/virt/kvm/arm/arch_timer.c
3395 ++++ b/virt/kvm/arm/arch_timer.c
3396 +@@ -89,9 +89,6 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
3397 + struct kvm_vcpu *vcpu;
3398 +
3399 + vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
3400 +- vcpu->arch.timer_cpu.armed = false;
3401 +-
3402 +- WARN_ON(!kvm_timer_should_fire(vcpu));
3403 +
3404 + /*
3405 + * If the vcpu is blocked we want to wake it up so that it will see