Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Sat, 09 Dec 2017 18:50:45
Message-Id: 1512845426.7489285d217c846f6036ac1c50d93ef495f4d2ad.alicef@gentoo
1 commit: 7489285d217c846f6036ac1c50d93ef495f4d2ad
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Sat Dec 9 18:50:26 2017 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Sat Dec 9 18:50:26 2017 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7489285d
7
8 linux kernel 4.4.105
9
10 0000_README | 4 +
11 1104_linux-4.4.105.patch | 1364 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 1368 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index a31f5b0..4655940 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -459,6 +459,10 @@ Patch: 1103_linux-4.4.104.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.4.104
21
22 +Patch: 1104_linux-4.4.105.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.4.105
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1104_linux-4.4.105.patch b/1104_linux-4.4.105.patch
31 new file mode 100644
32 index 0000000..2441cee
33 --- /dev/null
34 +++ b/1104_linux-4.4.105.patch
35 @@ -0,0 +1,1364 @@
36 +diff --git a/Makefile b/Makefile
37 +index 55500e023f61..69f4ace70276 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 4
43 +-SUBLEVEL = 104
44 ++SUBLEVEL = 105
45 + EXTRAVERSION =
46 + NAME = Blurry Fish Butt
47 +
48 +diff --git a/arch/arm/mach-omap1/dma.c b/arch/arm/mach-omap1/dma.c
49 +index 7b02ed218a42..0c120b2ea2f9 100644
50 +--- a/arch/arm/mach-omap1/dma.c
51 ++++ b/arch/arm/mach-omap1/dma.c
52 +@@ -31,7 +31,6 @@
53 + #include "soc.h"
54 +
55 + #define OMAP1_DMA_BASE (0xfffed800)
56 +-#define OMAP1_LOGICAL_DMA_CH_COUNT 17
57 +
58 + static u32 enable_1510_mode;
59 +
60 +@@ -311,8 +310,6 @@ static int __init omap1_system_dma_init(void)
61 + goto exit_iounmap;
62 + }
63 +
64 +- d->lch_count = OMAP1_LOGICAL_DMA_CH_COUNT;
65 +-
66 + /* Valid attributes for omap1 plus processors */
67 + if (cpu_is_omap15xx())
68 + d->dev_caps = ENABLE_1510_MODE;
69 +@@ -329,13 +326,14 @@ static int __init omap1_system_dma_init(void)
70 + d->dev_caps |= CLEAR_CSR_ON_READ;
71 + d->dev_caps |= IS_WORD_16;
72 +
73 +- if (cpu_is_omap15xx())
74 +- d->chan_count = 9;
75 +- else if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
76 +- if (!(d->dev_caps & ENABLE_1510_MODE))
77 +- d->chan_count = 16;
78 ++ /* available logical channels */
79 ++ if (cpu_is_omap15xx()) {
80 ++ d->lch_count = 9;
81 ++ } else {
82 ++ if (d->dev_caps & ENABLE_1510_MODE)
83 ++ d->lch_count = 9;
84 + else
85 +- d->chan_count = 9;
86 ++ d->lch_count = 16;
87 + }
88 +
89 + p = dma_plat_info;
90 +diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h
91 +index 649eb62c52b3..9e02cb7955c1 100644
92 +--- a/arch/s390/include/asm/pci_insn.h
93 ++++ b/arch/s390/include/asm/pci_insn.h
94 +@@ -81,6 +81,6 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
95 + int zpci_load(u64 *data, u64 req, u64 offset);
96 + int zpci_store(u64 data, u64 req, u64 offset);
97 + int zpci_store_block(const u64 *data, u64 req, u64 offset);
98 +-void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
99 ++int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
100 +
101 + #endif
102 +diff --git a/arch/s390/include/asm/runtime_instr.h b/arch/s390/include/asm/runtime_instr.h
103 +index 402ad6df4897..c54a9310d814 100644
104 +--- a/arch/s390/include/asm/runtime_instr.h
105 ++++ b/arch/s390/include/asm/runtime_instr.h
106 +@@ -85,6 +85,8 @@ static inline void restore_ri_cb(struct runtime_instr_cb *cb_next,
107 + load_runtime_instr_cb(&runtime_instr_empty_cb);
108 + }
109 +
110 +-void exit_thread_runtime_instr(void);
111 ++struct task_struct;
112 ++
113 ++void runtime_instr_release(struct task_struct *tsk);
114 +
115 + #endif /* _RUNTIME_INSTR_H */
116 +diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
117 +index efa035a31b98..7bc4e4c5d5b8 100644
118 +--- a/arch/s390/kernel/process.c
119 ++++ b/arch/s390/kernel/process.c
120 +@@ -72,7 +72,6 @@ extern void kernel_thread_starter(void);
121 + */
122 + void exit_thread(void)
123 + {
124 +- exit_thread_runtime_instr();
125 + }
126 +
127 + void flush_thread(void)
128 +@@ -87,6 +86,7 @@ void arch_release_task_struct(struct task_struct *tsk)
129 + {
130 + /* Free either the floating-point or the vector register save area */
131 + kfree(tsk->thread.fpu.regs);
132 ++ runtime_instr_release(tsk);
133 + }
134 +
135 + int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
136 +diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
137 +index 70cdb03d4acd..fd03a7569e10 100644
138 +--- a/arch/s390/kernel/runtime_instr.c
139 ++++ b/arch/s390/kernel/runtime_instr.c
140 +@@ -18,11 +18,24 @@
141 + /* empty control block to disable RI by loading it */
142 + struct runtime_instr_cb runtime_instr_empty_cb;
143 +
144 ++void runtime_instr_release(struct task_struct *tsk)
145 ++{
146 ++ kfree(tsk->thread.ri_cb);
147 ++}
148 ++
149 + static void disable_runtime_instr(void)
150 + {
151 +- struct pt_regs *regs = task_pt_regs(current);
152 ++ struct task_struct *task = current;
153 ++ struct pt_regs *regs;
154 +
155 ++ if (!task->thread.ri_cb)
156 ++ return;
157 ++ regs = task_pt_regs(task);
158 ++ preempt_disable();
159 + load_runtime_instr_cb(&runtime_instr_empty_cb);
160 ++ kfree(task->thread.ri_cb);
161 ++ task->thread.ri_cb = NULL;
162 ++ preempt_enable();
163 +
164 + /*
165 + * Make sure the RI bit is deleted from the PSW. If the user did not
166 +@@ -43,19 +56,6 @@ static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
167 + cb->valid = 1;
168 + }
169 +
170 +-void exit_thread_runtime_instr(void)
171 +-{
172 +- struct task_struct *task = current;
173 +-
174 +- preempt_disable();
175 +- if (!task->thread.ri_cb)
176 +- return;
177 +- disable_runtime_instr();
178 +- kfree(task->thread.ri_cb);
179 +- task->thread.ri_cb = NULL;
180 +- preempt_enable();
181 +-}
182 +-
183 + SYSCALL_DEFINE1(s390_runtime_instr, int, command)
184 + {
185 + struct runtime_instr_cb *cb;
186 +@@ -64,7 +64,7 @@ SYSCALL_DEFINE1(s390_runtime_instr, int, command)
187 + return -EOPNOTSUPP;
188 +
189 + if (command == S390_RUNTIME_INSTR_STOP) {
190 +- exit_thread_runtime_instr();
191 ++ disable_runtime_instr();
192 + return 0;
193 + }
194 +
195 +diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
196 +index f2f6720a3331..ef0499b76c50 100644
197 +--- a/arch/s390/pci/pci.c
198 ++++ b/arch/s390/pci/pci.c
199 +@@ -359,7 +359,8 @@ static void zpci_irq_handler(struct airq_struct *airq)
200 + /* End of second scan with interrupts on. */
201 + break;
202 + /* First scan complete, reenable interrupts. */
203 +- zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
204 ++ if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC))
205 ++ break;
206 + si = 0;
207 + continue;
208 + }
209 +@@ -921,7 +922,7 @@ static int __init pci_base_init(void)
210 + if (!s390_pci_probe)
211 + return 0;
212 +
213 +- if (!test_facility(69) || !test_facility(71) || !test_facility(72))
214 ++ if (!test_facility(69) || !test_facility(71))
215 + return 0;
216 +
217 + rc = zpci_debug_init();
218 +diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c
219 +index 10ca15dcab11..bc065392f7ab 100644
220 +--- a/arch/s390/pci/pci_insn.c
221 ++++ b/arch/s390/pci/pci_insn.c
222 +@@ -7,6 +7,7 @@
223 + #include <linux/export.h>
224 + #include <linux/errno.h>
225 + #include <linux/delay.h>
226 ++#include <asm/facility.h>
227 + #include <asm/pci_insn.h>
228 + #include <asm/pci_debug.h>
229 + #include <asm/processor.h>
230 +@@ -91,11 +92,14 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
231 + }
232 +
233 + /* Set Interruption Controls */
234 +-void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc)
235 ++int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc)
236 + {
237 ++ if (!test_facility(72))
238 ++ return -EIO;
239 + asm volatile (
240 + " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
241 + : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused));
242 ++ return 0;
243 + }
244 +
245 + /* PCI Load */
246 +diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
247 +index 91dfcafe27a6..bad25bb80679 100644
248 +--- a/arch/x86/include/asm/syscalls.h
249 ++++ b/arch/x86/include/asm/syscalls.h
250 +@@ -21,7 +21,7 @@ asmlinkage long sys_ioperm(unsigned long, unsigned long, int);
251 + asmlinkage long sys_iopl(unsigned int);
252 +
253 + /* kernel/ldt.c */
254 +-asmlinkage int sys_modify_ldt(int, void __user *, unsigned long);
255 ++asmlinkage long sys_modify_ldt(int, void __user *, unsigned long);
256 +
257 + /* kernel/signal.c */
258 + asmlinkage long sys_rt_sigreturn(void);
259 +diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
260 +index 5f8f0b3cc674..2c0b0b645a74 100644
261 +--- a/arch/x86/kernel/kprobes/ftrace.c
262 ++++ b/arch/x86/kernel/kprobes/ftrace.c
263 +@@ -26,7 +26,7 @@
264 + #include "common.h"
265 +
266 + static nokprobe_inline
267 +-int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
268 ++void __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
269 + struct kprobe_ctlblk *kcb, unsigned long orig_ip)
270 + {
271 + /*
272 +@@ -41,20 +41,21 @@ int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
273 + __this_cpu_write(current_kprobe, NULL);
274 + if (orig_ip)
275 + regs->ip = orig_ip;
276 +- return 1;
277 + }
278 +
279 + int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
280 + struct kprobe_ctlblk *kcb)
281 + {
282 +- if (kprobe_ftrace(p))
283 +- return __skip_singlestep(p, regs, kcb, 0);
284 +- else
285 +- return 0;
286 ++ if (kprobe_ftrace(p)) {
287 ++ __skip_singlestep(p, regs, kcb, 0);
288 ++ preempt_enable_no_resched();
289 ++ return 1;
290 ++ }
291 ++ return 0;
292 + }
293 + NOKPROBE_SYMBOL(skip_singlestep);
294 +
295 +-/* Ftrace callback handler for kprobes */
296 ++/* Ftrace callback handler for kprobes -- called under preepmt disabed */
297 + void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
298 + struct ftrace_ops *ops, struct pt_regs *regs)
299 + {
300 +@@ -77,13 +78,17 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
301 + /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
302 + regs->ip = ip + sizeof(kprobe_opcode_t);
303 +
304 ++ /* To emulate trap based kprobes, preempt_disable here */
305 ++ preempt_disable();
306 + __this_cpu_write(current_kprobe, p);
307 + kcb->kprobe_status = KPROBE_HIT_ACTIVE;
308 +- if (!p->pre_handler || !p->pre_handler(p, regs))
309 ++ if (!p->pre_handler || !p->pre_handler(p, regs)) {
310 + __skip_singlestep(p, regs, kcb, orig_ip);
311 ++ preempt_enable_no_resched();
312 ++ }
313 + /*
314 + * If pre_handler returns !0, it sets regs->ip and
315 +- * resets current kprobe.
316 ++ * resets current kprobe, and keep preempt count +1.
317 + */
318 + }
319 + end:
320 +diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
321 +index 6acc9dd91f36..d6279593bcdd 100644
322 +--- a/arch/x86/kernel/ldt.c
323 ++++ b/arch/x86/kernel/ldt.c
324 +@@ -12,6 +12,7 @@
325 + #include <linux/string.h>
326 + #include <linux/mm.h>
327 + #include <linux/smp.h>
328 ++#include <linux/syscalls.h>
329 + #include <linux/slab.h>
330 + #include <linux/vmalloc.h>
331 + #include <linux/uaccess.h>
332 +@@ -271,8 +272,8 @@ out:
333 + return error;
334 + }
335 +
336 +-asmlinkage int sys_modify_ldt(int func, void __user *ptr,
337 +- unsigned long bytecount)
338 ++SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
339 ++ unsigned long , bytecount)
340 + {
341 + int ret = -ENOSYS;
342 +
343 +@@ -290,5 +291,14 @@ asmlinkage int sys_modify_ldt(int func, void __user *ptr,
344 + ret = write_ldt(ptr, bytecount, 0);
345 + break;
346 + }
347 +- return ret;
348 ++ /*
349 ++ * The SYSCALL_DEFINE() macros give us an 'unsigned long'
350 ++ * return type, but tht ABI for sys_modify_ldt() expects
351 ++ * 'int'. This cast gives us an int-sized value in %rax
352 ++ * for the return code. The 'unsigned' is necessary so
353 ++ * the compiler does not try to sign-extend the negative
354 ++ * return codes into the high half of the register when
355 ++ * taking the value from int->long.
356 ++ */
357 ++ return (unsigned int)ret;
358 + }
359 +diff --git a/arch/x86/um/ldt.c b/arch/x86/um/ldt.c
360 +index 836a1eb5df43..3ee234b6234d 100644
361 +--- a/arch/x86/um/ldt.c
362 ++++ b/arch/x86/um/ldt.c
363 +@@ -6,6 +6,7 @@
364 + #include <linux/mm.h>
365 + #include <linux/sched.h>
366 + #include <linux/slab.h>
367 ++#include <linux/syscalls.h>
368 + #include <linux/uaccess.h>
369 + #include <asm/unistd.h>
370 + #include <os.h>
371 +@@ -369,7 +370,9 @@ void free_ldt(struct mm_context *mm)
372 + mm->arch.ldt.entry_count = 0;
373 + }
374 +
375 +-int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
376 ++SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
377 ++ unsigned long , bytecount)
378 + {
379 +- return do_modify_ldt_skas(func, ptr, bytecount);
380 ++ /* See non-um modify_ldt() for why we do this cast */
381 ++ return (unsigned int)do_modify_ldt_skas(func, ptr, bytecount);
382 + }
383 +diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
384 +index 8250950aab8b..66d84bcf9bbf 100644
385 +--- a/drivers/dma/pl330.c
386 ++++ b/drivers/dma/pl330.c
387 +@@ -1657,7 +1657,6 @@ static bool _chan_ns(const struct pl330_dmac *pl330, int i)
388 + static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
389 + {
390 + struct pl330_thread *thrd = NULL;
391 +- unsigned long flags;
392 + int chans, i;
393 +
394 + if (pl330->state == DYING)
395 +@@ -1665,8 +1664,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
396 +
397 + chans = pl330->pcfg.num_chan;
398 +
399 +- spin_lock_irqsave(&pl330->lock, flags);
400 +-
401 + for (i = 0; i < chans; i++) {
402 + thrd = &pl330->channels[i];
403 + if ((thrd->free) && (!_manager_ns(thrd) ||
404 +@@ -1684,8 +1681,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
405 + thrd = NULL;
406 + }
407 +
408 +- spin_unlock_irqrestore(&pl330->lock, flags);
409 +-
410 + return thrd;
411 + }
412 +
413 +@@ -1703,7 +1698,6 @@ static inline void _free_event(struct pl330_thread *thrd, int ev)
414 + static void pl330_release_channel(struct pl330_thread *thrd)
415 + {
416 + struct pl330_dmac *pl330;
417 +- unsigned long flags;
418 +
419 + if (!thrd || thrd->free)
420 + return;
421 +@@ -1715,10 +1709,8 @@ static void pl330_release_channel(struct pl330_thread *thrd)
422 +
423 + pl330 = thrd->dmac;
424 +
425 +- spin_lock_irqsave(&pl330->lock, flags);
426 + _free_event(thrd, thrd->ev);
427 + thrd->free = true;
428 +- spin_unlock_irqrestore(&pl330->lock, flags);
429 + }
430 +
431 + /* Initialize the structure for PL330 configuration, that can be used
432 +@@ -2085,20 +2077,20 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
433 + struct pl330_dmac *pl330 = pch->dmac;
434 + unsigned long flags;
435 +
436 +- spin_lock_irqsave(&pch->lock, flags);
437 ++ spin_lock_irqsave(&pl330->lock, flags);
438 +
439 + dma_cookie_init(chan);
440 + pch->cyclic = false;
441 +
442 + pch->thread = pl330_request_channel(pl330);
443 + if (!pch->thread) {
444 +- spin_unlock_irqrestore(&pch->lock, flags);
445 ++ spin_unlock_irqrestore(&pl330->lock, flags);
446 + return -ENOMEM;
447 + }
448 +
449 + tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
450 +
451 +- spin_unlock_irqrestore(&pch->lock, flags);
452 ++ spin_unlock_irqrestore(&pl330->lock, flags);
453 +
454 + return 1;
455 + }
456 +@@ -2201,12 +2193,13 @@ static int pl330_pause(struct dma_chan *chan)
457 + static void pl330_free_chan_resources(struct dma_chan *chan)
458 + {
459 + struct dma_pl330_chan *pch = to_pchan(chan);
460 ++ struct pl330_dmac *pl330 = pch->dmac;
461 + unsigned long flags;
462 +
463 + tasklet_kill(&pch->task);
464 +
465 + pm_runtime_get_sync(pch->dmac->ddma.dev);
466 +- spin_lock_irqsave(&pch->lock, flags);
467 ++ spin_lock_irqsave(&pl330->lock, flags);
468 +
469 + pl330_release_channel(pch->thread);
470 + pch->thread = NULL;
471 +@@ -2214,7 +2207,7 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
472 + if (pch->cyclic)
473 + list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
474 +
475 +- spin_unlock_irqrestore(&pch->lock, flags);
476 ++ spin_unlock_irqrestore(&pl330->lock, flags);
477 + pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
478 + pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
479 + }
480 +diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
481 +index ca64b174f8a3..a4e1f6939c39 100644
482 +--- a/drivers/edac/sb_edac.c
483 ++++ b/drivers/edac/sb_edac.c
484 +@@ -1773,6 +1773,7 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
485 + break;
486 + case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
487 + pvt->pci_ta = pdev;
488 ++ break;
489 + case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
490 + pvt->pci_ras = pdev;
491 + break;
492 +diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
493 +index fbe1b3174f75..34cebcdc2fc4 100644
494 +--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
495 ++++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
496 +@@ -180,6 +180,8 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
497 +
498 + /* enable output and display signal */
499 + decon_set_bits(ctx, DECON_VIDCON0, VIDCON0_ENVID | VIDCON0_ENVID_F, ~0);
500 ++
501 ++ decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
502 + }
503 +
504 + static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
505 +diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
506 +index 6c4c7caea693..525ce56524ba 100644
507 +--- a/drivers/md/bcache/request.c
508 ++++ b/drivers/md/bcache/request.c
509 +@@ -708,7 +708,14 @@ static void cached_dev_read_error(struct closure *cl)
510 + struct search *s = container_of(cl, struct search, cl);
511 + struct bio *bio = &s->bio.bio;
512 +
513 +- if (s->recoverable) {
514 ++ /*
515 ++ * If read request hit dirty data (s->read_dirty_data is true),
516 ++ * then recovery a failed read request from cached device may
517 ++ * get a stale data back. So read failure recovery is only
518 ++ * permitted when read request hit clean data in cache device,
519 ++ * or when cache read race happened.
520 ++ */
521 ++ if (s->recoverable && !s->read_dirty_data) {
522 + /* Retry from the backing device: */
523 + trace_bcache_read_retry(s->orig_bio);
524 +
525 +diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
526 +index e90c6a7333d7..2e4649655181 100644
527 +--- a/drivers/net/appletalk/ipddp.c
528 ++++ b/drivers/net/appletalk/ipddp.c
529 +@@ -191,7 +191,7 @@ static netdev_tx_t ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
530 + */
531 + static int ipddp_create(struct ipddp_route *new_rt)
532 + {
533 +- struct ipddp_route *rt = kmalloc(sizeof(*rt), GFP_KERNEL);
534 ++ struct ipddp_route *rt = kzalloc(sizeof(*rt), GFP_KERNEL);
535 +
536 + if (rt == NULL)
537 + return -ENOMEM;
538 +diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
539 +index 8860e74aa28f..027705117086 100644
540 +--- a/drivers/net/ethernet/broadcom/bcmsysport.c
541 ++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
542 +@@ -1045,15 +1045,6 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
543 + goto out;
544 + }
545 +
546 +- /* Insert TSB and checksum infos */
547 +- if (priv->tsb_en) {
548 +- skb = bcm_sysport_insert_tsb(skb, dev);
549 +- if (!skb) {
550 +- ret = NETDEV_TX_OK;
551 +- goto out;
552 +- }
553 +- }
554 +-
555 + /* The Ethernet switch we are interfaced with needs packets to be at
556 + * least 64 bytes (including FCS) otherwise they will be discarded when
557 + * they enter the switch port logic. When Broadcom tags are enabled, we
558 +@@ -1061,13 +1052,21 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
559 + * (including FCS and tag) because the length verification is done after
560 + * the Broadcom tag is stripped off the ingress packet.
561 + */
562 +- if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
563 ++ if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
564 + ret = NETDEV_TX_OK;
565 + goto out;
566 + }
567 +
568 +- skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ?
569 +- ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len;
570 ++ /* Insert TSB and checksum infos */
571 ++ if (priv->tsb_en) {
572 ++ skb = bcm_sysport_insert_tsb(skb, dev);
573 ++ if (!skb) {
574 ++ ret = NETDEV_TX_OK;
575 ++ goto out;
576 ++ }
577 ++ }
578 ++
579 ++ skb_len = skb->len;
580 +
581 + mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
582 + if (dma_mapping_error(kdev, mapping)) {
583 +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
584 +index ab716042bdd2..458e2d97d096 100644
585 +--- a/drivers/net/ethernet/freescale/fec_main.c
586 ++++ b/drivers/net/ethernet/freescale/fec_main.c
587 +@@ -2968,6 +2968,7 @@ static void set_multicast_list(struct net_device *ndev)
588 + struct netdev_hw_addr *ha;
589 + unsigned int i, bit, data, crc, tmp;
590 + unsigned char hash;
591 ++ unsigned int hash_high = 0, hash_low = 0;
592 +
593 + if (ndev->flags & IFF_PROMISC) {
594 + tmp = readl(fep->hwp + FEC_R_CNTRL);
595 +@@ -2990,11 +2991,7 @@ static void set_multicast_list(struct net_device *ndev)
596 + return;
597 + }
598 +
599 +- /* Clear filter and add the addresses in hash register
600 +- */
601 +- writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
602 +- writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
603 +-
604 ++ /* Add the addresses in hash register */
605 + netdev_for_each_mc_addr(ha, ndev) {
606 + /* calculate crc32 value of mac address */
607 + crc = 0xffffffff;
608 +@@ -3012,16 +3009,14 @@ static void set_multicast_list(struct net_device *ndev)
609 + */
610 + hash = (crc >> (32 - HASH_BITS)) & 0x3f;
611 +
612 +- if (hash > 31) {
613 +- tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
614 +- tmp |= 1 << (hash - 32);
615 +- writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
616 +- } else {
617 +- tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
618 +- tmp |= 1 << hash;
619 +- writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
620 +- }
621 ++ if (hash > 31)
622 ++ hash_high |= 1 << (hash - 32);
623 ++ else
624 ++ hash_low |= 1 << hash;
625 + }
626 ++
627 ++ writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
628 ++ writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
629 + }
630 +
631 + /* Set a MAC change in hardware. */
632 +diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
633 +index 585e90f8341d..f735dfcb64ae 100644
634 +--- a/drivers/net/ethernet/renesas/ravb_main.c
635 ++++ b/drivers/net/ethernet/renesas/ravb_main.c
636 +@@ -831,14 +831,10 @@ static int ravb_poll(struct napi_struct *napi, int budget)
637 + /* Receive error message handling */
638 + priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
639 + priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
640 +- if (priv->rx_over_errors != ndev->stats.rx_over_errors) {
641 ++ if (priv->rx_over_errors != ndev->stats.rx_over_errors)
642 + ndev->stats.rx_over_errors = priv->rx_over_errors;
643 +- netif_err(priv, rx_err, ndev, "Receive Descriptor Empty\n");
644 +- }
645 +- if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) {
646 ++ if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
647 + ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
648 +- netif_err(priv, rx_err, ndev, "Receive FIFO Overflow\n");
649 +- }
650 + out:
651 + return budget - quota;
652 + }
653 +diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
654 +index 34a062ccb11d..fd221cc4cb79 100644
655 +--- a/drivers/net/xen-netfront.c
656 ++++ b/drivers/net/xen-netfront.c
657 +@@ -1840,27 +1840,19 @@ static int talk_to_netback(struct xenbus_device *dev,
658 + xennet_destroy_queues(info);
659 +
660 + err = xennet_create_queues(info, &num_queues);
661 +- if (err < 0)
662 +- goto destroy_ring;
663 ++ if (err < 0) {
664 ++ xenbus_dev_fatal(dev, err, "creating queues");
665 ++ kfree(info->queues);
666 ++ info->queues = NULL;
667 ++ goto out;
668 ++ }
669 +
670 + /* Create shared ring, alloc event channel -- for each queue */
671 + for (i = 0; i < num_queues; ++i) {
672 + queue = &info->queues[i];
673 + err = setup_netfront(dev, queue, feature_split_evtchn);
674 +- if (err) {
675 +- /* setup_netfront() will tidy up the current
676 +- * queue on error, but we need to clean up
677 +- * those already allocated.
678 +- */
679 +- if (i > 0) {
680 +- rtnl_lock();
681 +- netif_set_real_num_tx_queues(info->netdev, i);
682 +- rtnl_unlock();
683 +- goto destroy_ring;
684 +- } else {
685 +- goto out;
686 +- }
687 +- }
688 ++ if (err)
689 ++ goto destroy_ring;
690 + }
691 +
692 + again:
693 +@@ -1950,9 +1942,9 @@ abort_transaction_no_dev_fatal:
694 + xenbus_transaction_end(xbt, 1);
695 + destroy_ring:
696 + xennet_disconnect_backend(info);
697 +- kfree(info->queues);
698 +- info->queues = NULL;
699 ++ xennet_destroy_queues(info);
700 + out:
701 ++ device_unregister(&dev->dev);
702 + return err;
703 + }
704 +
705 +diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
706 +index d22de4c8c399..3de39bd794b6 100644
707 +--- a/drivers/spi/spi-sh-msiof.c
708 ++++ b/drivers/spi/spi-sh-msiof.c
709 +@@ -863,7 +863,7 @@ static int sh_msiof_transfer_one(struct spi_master *master,
710 + break;
711 + copy32 = copy_bswap32;
712 + } else if (bits <= 16) {
713 +- if (l & 1)
714 ++ if (l & 3)
715 + break;
716 + copy32 = copy_wswap32;
717 + } else {
718 +diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
719 +index 7df978371c9a..44fffbd1bc74 100644
720 +--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
721 ++++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
722 +@@ -402,15 +402,13 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
723 + result = VM_FAULT_LOCKED;
724 + break;
725 + case -ENODATA:
726 ++ case -EAGAIN:
727 + case -EFAULT:
728 + result = VM_FAULT_NOPAGE;
729 + break;
730 + case -ENOMEM:
731 + result = VM_FAULT_OOM;
732 + break;
733 +- case -EAGAIN:
734 +- result = VM_FAULT_RETRY;
735 +- break;
736 + default:
737 + result = VM_FAULT_SIGBUS;
738 + break;
739 +diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
740 +index 89474399ab89..1d5a9e5fb069 100644
741 +--- a/drivers/tty/serial/8250/8250_fintek.c
742 ++++ b/drivers/tty/serial/8250/8250_fintek.c
743 +@@ -117,7 +117,7 @@ static int fintek_8250_rs485_config(struct uart_port *port,
744 +
745 + if ((!!(rs485->flags & SER_RS485_RTS_ON_SEND)) ==
746 + (!!(rs485->flags & SER_RS485_RTS_AFTER_SEND)))
747 +- rs485->flags &= SER_RS485_ENABLED;
748 ++ rs485->flags &= ~SER_RS485_ENABLED;
749 + else
750 + config |= RS485_URA;
751 +
752 +diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
753 +index cf3da51a3536..7025f47fa284 100644
754 +--- a/drivers/tty/serial/8250/8250_pci.c
755 ++++ b/drivers/tty/serial/8250/8250_pci.c
756 +@@ -5797,6 +5797,9 @@ static struct pci_device_id serial_pci_tbl[] = {
757 + { PCI_DEVICE(0x1601, 0x0800), .driver_data = pbn_b0_4_1250000 },
758 + { PCI_DEVICE(0x1601, 0xa801), .driver_data = pbn_b0_4_1250000 },
759 +
760 ++ /* Amazon PCI serial device */
761 ++ { PCI_DEVICE(0x1d0f, 0x8250), .driver_data = pbn_b0_1_115200 },
762 ++
763 + /*
764 + * These entries match devices with class COMMUNICATION_SERIAL,
765 + * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
766 +diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
767 +index 56ccbcefdd85..d42d66b72d5a 100644
768 +--- a/drivers/tty/serial/8250/8250_port.c
769 ++++ b/drivers/tty/serial/8250/8250_port.c
770 +@@ -2223,8 +2223,11 @@ static void serial8250_set_divisor(struct uart_port *port, unsigned int baud,
771 + serial_dl_write(up, quot);
772 +
773 + /* XR17V35x UARTs have an extra fractional divisor register (DLD) */
774 +- if (up->port.type == PORT_XR17V35X)
775 ++ if (up->port.type == PORT_XR17V35X) {
776 ++ /* Preserve bits not related to baudrate; DLD[7:4]. */
777 ++ quot_frac |= serial_port_in(port, 0x2) & 0xf0;
778 + serial_port_out(port, 0x2, quot_frac);
779 ++ }
780 + }
781 +
782 + static unsigned int
783 +diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
784 +index 1fa4128eb88e..b07f864f68e8 100644
785 +--- a/drivers/tty/sysrq.c
786 ++++ b/drivers/tty/sysrq.c
787 +@@ -237,8 +237,10 @@ static void sysrq_handle_showallcpus(int key)
788 + * architecture has no support for it:
789 + */
790 + if (!trigger_all_cpu_backtrace()) {
791 +- struct pt_regs *regs = get_irq_regs();
792 ++ struct pt_regs *regs = NULL;
793 +
794 ++ if (in_irq())
795 ++ regs = get_irq_regs();
796 + if (regs) {
797 + pr_info("CPU%d:\n", smp_processor_id());
798 + show_regs(regs);
799 +@@ -257,7 +259,10 @@ static struct sysrq_key_op sysrq_showallcpus_op = {
800 +
801 + static void sysrq_handle_showregs(int key)
802 + {
803 +- struct pt_regs *regs = get_irq_regs();
804 ++ struct pt_regs *regs = NULL;
805 ++
806 ++ if (in_irq())
807 ++ regs = get_irq_regs();
808 + if (regs)
809 + show_regs(regs);
810 + perf_event_print_debug();
811 +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
812 +index 5172bec612eb..b1ece1f618c8 100644
813 +--- a/drivers/usb/core/config.c
814 ++++ b/drivers/usb/core/config.c
815 +@@ -871,14 +871,25 @@ void usb_release_bos_descriptor(struct usb_device *dev)
816 + }
817 + }
818 +
819 ++static const __u8 bos_desc_len[256] = {
820 ++ [USB_CAP_TYPE_WIRELESS_USB] = USB_DT_USB_WIRELESS_CAP_SIZE,
821 ++ [USB_CAP_TYPE_EXT] = USB_DT_USB_EXT_CAP_SIZE,
822 ++ [USB_SS_CAP_TYPE] = USB_DT_USB_SS_CAP_SIZE,
823 ++ [USB_SSP_CAP_TYPE] = USB_DT_USB_SSP_CAP_SIZE(1),
824 ++ [CONTAINER_ID_TYPE] = USB_DT_USB_SS_CONTN_ID_SIZE,
825 ++ [USB_PTM_CAP_TYPE] = USB_DT_USB_PTM_ID_SIZE,
826 ++};
827 ++
828 + /* Get BOS descriptor set */
829 + int usb_get_bos_descriptor(struct usb_device *dev)
830 + {
831 + struct device *ddev = &dev->dev;
832 + struct usb_bos_descriptor *bos;
833 + struct usb_dev_cap_header *cap;
834 ++ struct usb_ssp_cap_descriptor *ssp_cap;
835 + unsigned char *buffer;
836 +- int length, total_len, num, i;
837 ++ int length, total_len, num, i, ssac;
838 ++ __u8 cap_type;
839 + int ret;
840 +
841 + bos = kzalloc(sizeof(struct usb_bos_descriptor), GFP_KERNEL);
842 +@@ -931,7 +942,13 @@ int usb_get_bos_descriptor(struct usb_device *dev)
843 + dev->bos->desc->bNumDeviceCaps = i;
844 + break;
845 + }
846 ++ cap_type = cap->bDevCapabilityType;
847 + length = cap->bLength;
848 ++ if (bos_desc_len[cap_type] && length < bos_desc_len[cap_type]) {
849 ++ dev->bos->desc->bNumDeviceCaps = i;
850 ++ break;
851 ++ }
852 ++
853 + total_len -= length;
854 +
855 + if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
856 +@@ -939,7 +956,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
857 + continue;
858 + }
859 +
860 +- switch (cap->bDevCapabilityType) {
861 ++ switch (cap_type) {
862 + case USB_CAP_TYPE_WIRELESS_USB:
863 + /* Wireless USB cap descriptor is handled by wusb */
864 + break;
865 +@@ -952,13 +969,19 @@ int usb_get_bos_descriptor(struct usb_device *dev)
866 + (struct usb_ss_cap_descriptor *)buffer;
867 + break;
868 + case USB_SSP_CAP_TYPE:
869 +- dev->bos->ssp_cap =
870 +- (struct usb_ssp_cap_descriptor *)buffer;
871 ++ ssp_cap = (struct usb_ssp_cap_descriptor *)buffer;
872 ++ ssac = (le32_to_cpu(ssp_cap->bmAttributes) &
873 ++ USB_SSP_SUBLINK_SPEED_ATTRIBS) + 1;
874 ++ if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac))
875 ++ dev->bos->ssp_cap = ssp_cap;
876 + break;
877 + case CONTAINER_ID_TYPE:
878 + dev->bos->ss_id =
879 + (struct usb_ss_container_id_descriptor *)buffer;
880 + break;
881 ++ case USB_PTM_CAP_TYPE:
882 ++ dev->bos->ptm_cap =
883 ++ (struct usb_ptm_cap_descriptor *)buffer;
884 + default:
885 + break;
886 + }
887 +diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
888 +index f4c3a37e00ba..ad2e6d235c30 100644
889 +--- a/drivers/usb/core/devio.c
890 ++++ b/drivers/usb/core/devio.c
891 +@@ -113,42 +113,38 @@ enum snoop_when {
892 + #define USB_DEVICE_DEV MKDEV(USB_DEVICE_MAJOR, 0)
893 +
894 + /* Limit on the total amount of memory we can allocate for transfers */
895 +-static unsigned usbfs_memory_mb = 16;
896 ++static u32 usbfs_memory_mb = 16;
897 + module_param(usbfs_memory_mb, uint, 0644);
898 + MODULE_PARM_DESC(usbfs_memory_mb,
899 + "maximum MB allowed for usbfs buffers (0 = no limit)");
900 +
901 + /* Hard limit, necessary to avoid arithmetic overflow */
902 +-#define USBFS_XFER_MAX (UINT_MAX / 2 - 1000000)
903 ++#define USBFS_XFER_MAX (UINT_MAX / 2 - 1000000)
904 +
905 +-static atomic_t usbfs_memory_usage; /* Total memory currently allocated */
906 ++static atomic64_t usbfs_memory_usage; /* Total memory currently allocated */
907 +
908 + /* Check whether it's okay to allocate more memory for a transfer */
909 +-static int usbfs_increase_memory_usage(unsigned amount)
910 ++static int usbfs_increase_memory_usage(u64 amount)
911 + {
912 +- unsigned lim;
913 ++ u64 lim;
914 +
915 +- /*
916 +- * Convert usbfs_memory_mb to bytes, avoiding overflows.
917 +- * 0 means use the hard limit (effectively unlimited).
918 +- */
919 + lim = ACCESS_ONCE(usbfs_memory_mb);
920 +- if (lim == 0 || lim > (USBFS_XFER_MAX >> 20))
921 +- lim = USBFS_XFER_MAX;
922 +- else
923 +- lim <<= 20;
924 ++ lim <<= 20;
925 +
926 +- atomic_add(amount, &usbfs_memory_usage);
927 +- if (atomic_read(&usbfs_memory_usage) <= lim)
928 +- return 0;
929 +- atomic_sub(amount, &usbfs_memory_usage);
930 +- return -ENOMEM;
931 ++ atomic64_add(amount, &usbfs_memory_usage);
932 ++
933 ++ if (lim > 0 && atomic64_read(&usbfs_memory_usage) > lim) {
934 ++ atomic64_sub(amount, &usbfs_memory_usage);
935 ++ return -ENOMEM;
936 ++ }
937 ++
938 ++ return 0;
939 + }
940 +
941 + /* Memory for a transfer is being deallocated */
942 +-static void usbfs_decrease_memory_usage(unsigned amount)
943 ++static void usbfs_decrease_memory_usage(u64 amount)
944 + {
945 +- atomic_sub(amount, &usbfs_memory_usage);
946 ++ atomic64_sub(amount, &usbfs_memory_usage);
947 + }
948 +
949 + static int connected(struct usb_dev_state *ps)
950 +@@ -1077,7 +1073,7 @@ static int proc_bulk(struct usb_dev_state *ps, void __user *arg)
951 + if (!usb_maxpacket(dev, pipe, !(bulk.ep & USB_DIR_IN)))
952 + return -EINVAL;
953 + len1 = bulk.len;
954 +- if (len1 >= USBFS_XFER_MAX)
955 ++ if (len1 >= (INT_MAX - sizeof(struct urb)))
956 + return -EINVAL;
957 + ret = usbfs_increase_memory_usage(len1 + sizeof(struct urb));
958 + if (ret)
959 +@@ -1297,13 +1293,19 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
960 + int number_of_packets = 0;
961 + unsigned int stream_id = 0;
962 + void *buf;
963 +-
964 +- if (uurb->flags & ~(USBDEVFS_URB_ISO_ASAP |
965 +- USBDEVFS_URB_SHORT_NOT_OK |
966 ++ unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK |
967 + USBDEVFS_URB_BULK_CONTINUATION |
968 + USBDEVFS_URB_NO_FSBR |
969 + USBDEVFS_URB_ZERO_PACKET |
970 +- USBDEVFS_URB_NO_INTERRUPT))
971 ++ USBDEVFS_URB_NO_INTERRUPT;
972 ++ /* USBDEVFS_URB_ISO_ASAP is a special case */
973 ++ if (uurb->type == USBDEVFS_URB_TYPE_ISO)
974 ++ mask |= USBDEVFS_URB_ISO_ASAP;
975 ++
976 ++ if (uurb->flags & ~mask)
977 ++ return -EINVAL;
978 ++
979 ++ if ((unsigned int)uurb->buffer_length >= USBFS_XFER_MAX)
980 + return -EINVAL;
981 + if (uurb->buffer_length > 0 && !uurb->buffer)
982 + return -EINVAL;
983 +@@ -1424,10 +1426,6 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
984 + return -EINVAL;
985 + }
986 +
987 +- if (uurb->buffer_length >= USBFS_XFER_MAX) {
988 +- ret = -EINVAL;
989 +- goto error;
990 +- }
991 + if (uurb->buffer_length > 0 &&
992 + !access_ok(is_in ? VERIFY_WRITE : VERIFY_READ,
993 + uurb->buffer, uurb->buffer_length)) {
994 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
995 +index 22e61786354a..0f38f577c047 100644
996 +--- a/drivers/usb/core/hub.c
997 ++++ b/drivers/usb/core/hub.c
998 +@@ -4858,6 +4858,15 @@ loop:
999 + usb_put_dev(udev);
1000 + if ((status == -ENOTCONN) || (status == -ENOTSUPP))
1001 + break;
1002 ++
1003 ++ /* When halfway through our retry count, power-cycle the port */
1004 ++ if (i == (SET_CONFIG_TRIES / 2) - 1) {
1005 ++ dev_info(&port_dev->dev, "attempt power cycle\n");
1006 ++ usb_hub_set_port_power(hdev, hub, port1, false);
1007 ++ msleep(2 * hub_power_on_good_delay(hub));
1008 ++ usb_hub_set_port_power(hdev, hub, port1, true);
1009 ++ msleep(hub_power_on_good_delay(hub));
1010 ++ }
1011 + }
1012 + if (hub->hdev->parent ||
1013 + !hcd->driver->port_handed_over ||
1014 +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
1015 +index 37c418e581fb..50010282c010 100644
1016 +--- a/drivers/usb/core/quirks.c
1017 ++++ b/drivers/usb/core/quirks.c
1018 +@@ -151,6 +151,9 @@ static const struct usb_device_id usb_quirk_list[] = {
1019 + /* appletouch */
1020 + { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
1021 +
1022 ++ /* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */
1023 ++ { USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM },
1024 ++
1025 + /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
1026 + { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
1027 +
1028 +diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
1029 +index b26b96e25a13..8e0b9377644b 100644
1030 +--- a/drivers/usb/host/ehci-dbg.c
1031 ++++ b/drivers/usb/host/ehci-dbg.c
1032 +@@ -851,7 +851,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
1033 + default: /* unknown */
1034 + break;
1035 + }
1036 +- temp = (cap >> 8) & 0xff;
1037 ++ offset = (cap >> 8) & 0xff;
1038 + }
1039 + }
1040 + #endif
1041 +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
1042 +index cf6bbaff42d0..6a07570a90e6 100644
1043 +--- a/drivers/usb/host/xhci-mem.c
1044 ++++ b/drivers/usb/host/xhci-mem.c
1045 +@@ -981,6 +981,12 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
1046 + if (!vdev)
1047 + return;
1048 +
1049 ++ if (vdev->real_port == 0 ||
1050 ++ vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
1051 ++ xhci_dbg(xhci, "Bad vdev->real_port.\n");
1052 ++ goto out;
1053 ++ }
1054 ++
1055 + tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
1056 + list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
1057 + /* is this a hub device that added a tt_info to the tts list */
1058 +@@ -994,6 +1000,7 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
1059 + }
1060 + }
1061 + }
1062 ++out:
1063 + /* we are now at a leaf device */
1064 + xhci_free_virt_device(xhci, slot_id);
1065 + }
1066 +diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c
1067 +index ab5d364f6e8c..335a1ef35224 100644
1068 +--- a/drivers/usb/phy/phy-tahvo.c
1069 ++++ b/drivers/usb/phy/phy-tahvo.c
1070 +@@ -368,7 +368,8 @@ static int tahvo_usb_probe(struct platform_device *pdev)
1071 + tu->extcon = devm_extcon_dev_allocate(&pdev->dev, tahvo_cable);
1072 + if (IS_ERR(tu->extcon)) {
1073 + dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
1074 +- return -ENOMEM;
1075 ++ ret = PTR_ERR(tu->extcon);
1076 ++ goto err_disable_clk;
1077 + }
1078 +
1079 + ret = devm_extcon_dev_register(&pdev->dev, tu->extcon);
1080 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1081 +index db3d34c2c82e..ffa8ec917ff5 100644
1082 +--- a/drivers/usb/serial/option.c
1083 ++++ b/drivers/usb/serial/option.c
1084 +@@ -241,6 +241,7 @@ static void option_instat_callback(struct urb *urb);
1085 + /* These Quectel products use Quectel's vendor ID */
1086 + #define QUECTEL_PRODUCT_EC21 0x0121
1087 + #define QUECTEL_PRODUCT_EC25 0x0125
1088 ++#define QUECTEL_PRODUCT_BG96 0x0296
1089 +
1090 + #define CMOTECH_VENDOR_ID 0x16d8
1091 + #define CMOTECH_PRODUCT_6001 0x6001
1092 +@@ -1185,6 +1186,8 @@ static const struct usb_device_id option_ids[] = {
1093 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1094 + { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
1095 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1096 ++ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
1097 ++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1098 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
1099 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
1100 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
1101 +diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h
1102 +index a155cd02bce2..ecc83c405a8b 100644
1103 +--- a/drivers/usb/storage/uas-detect.h
1104 ++++ b/drivers/usb/storage/uas-detect.h
1105 +@@ -111,6 +111,10 @@ static int uas_use_uas_driver(struct usb_interface *intf,
1106 + }
1107 + }
1108 +
1109 ++ /* All Seagate disk enclosures have broken ATA pass-through support */
1110 ++ if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bc2)
1111 ++ flags |= US_FL_NO_ATA_1X;
1112 ++
1113 + usb_stor_adjust_quirks(udev, &flags);
1114 +
1115 + if (flags & US_FL_IGNORE_UAS) {
1116 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
1117 +index 6fef53f18dcf..8ef6f70c9e25 100644
1118 +--- a/fs/nfs/nfs4proc.c
1119 ++++ b/fs/nfs/nfs4proc.c
1120 +@@ -38,7 +38,6 @@
1121 + #include <linux/mm.h>
1122 + #include <linux/delay.h>
1123 + #include <linux/errno.h>
1124 +-#include <linux/file.h>
1125 + #include <linux/string.h>
1126 + #include <linux/ratelimit.h>
1127 + #include <linux/printk.h>
1128 +@@ -5738,7 +5737,6 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
1129 + p->server = server;
1130 + atomic_inc(&lsp->ls_count);
1131 + p->ctx = get_nfs_open_context(ctx);
1132 +- get_file(fl->fl_file);
1133 + memcpy(&p->fl, fl, sizeof(p->fl));
1134 + return p;
1135 + out_free_seqid:
1136 +@@ -5851,7 +5849,6 @@ static void nfs4_lock_release(void *calldata)
1137 + nfs_free_seqid(data->arg.lock_seqid);
1138 + nfs4_put_lock_state(data->lsp);
1139 + put_nfs_open_context(data->ctx);
1140 +- fput(data->fl.fl_file);
1141 + kfree(data);
1142 + dprintk("%s: done!\n", __func__);
1143 + }
1144 +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
1145 +index e8d1d6c5000c..9a0b219ff74d 100644
1146 +--- a/fs/nfs/nfs4state.c
1147 ++++ b/fs/nfs/nfs4state.c
1148 +@@ -1680,7 +1680,6 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
1149 + break;
1150 + case -NFS4ERR_STALE_CLIENTID:
1151 + set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1152 +- nfs4_state_clear_reclaim_reboot(clp);
1153 + nfs4_state_start_reclaim_reboot(clp);
1154 + break;
1155 + case -NFS4ERR_EXPIRED:
1156 +diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
1157 +index 501ecc4a1ac4..1d738723a41a 100644
1158 +--- a/fs/ocfs2/file.c
1159 ++++ b/fs/ocfs2/file.c
1160 +@@ -1166,13 +1166,6 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
1161 + }
1162 + size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
1163 + if (size_change) {
1164 +- /*
1165 +- * Here we should wait dio to finish before inode lock
1166 +- * to avoid a deadlock between ocfs2_setattr() and
1167 +- * ocfs2_dio_end_io_write()
1168 +- */
1169 +- inode_dio_wait(inode);
1170 +-
1171 + status = ocfs2_rw_lock(inode, 1);
1172 + if (status < 0) {
1173 + mlog_errno(status);
1174 +@@ -1193,6 +1186,8 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
1175 + if (status)
1176 + goto bail_unlock;
1177 +
1178 ++ inode_dio_wait(inode);
1179 ++
1180 + if (i_size_read(inode) >= attr->ia_size) {
1181 + if (ocfs2_should_order_data(inode)) {
1182 + status = ocfs2_begin_ordered_truncate(inode,
1183 +diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
1184 +index 89d9aa9e79bf..6fe974dbe741 100644
1185 +--- a/include/linux/buffer_head.h
1186 ++++ b/include/linux/buffer_head.h
1187 +@@ -234,12 +234,10 @@ static inline int block_page_mkwrite_return(int err)
1188 + {
1189 + if (err == 0)
1190 + return VM_FAULT_LOCKED;
1191 +- if (err == -EFAULT)
1192 ++ if (err == -EFAULT || err == -EAGAIN)
1193 + return VM_FAULT_NOPAGE;
1194 + if (err == -ENOMEM)
1195 + return VM_FAULT_OOM;
1196 +- if (err == -EAGAIN)
1197 +- return VM_FAULT_RETRY;
1198 + /* -ENOSPC, -EDQUOT, -EIO ... */
1199 + return VM_FAULT_SIGBUS;
1200 + }
1201 +diff --git a/include/linux/usb.h b/include/linux/usb.h
1202 +index 8c75af6b7d5b..092b5658b9c3 100644
1203 +--- a/include/linux/usb.h
1204 ++++ b/include/linux/usb.h
1205 +@@ -330,6 +330,7 @@ struct usb_host_bos {
1206 + struct usb_ss_cap_descriptor *ss_cap;
1207 + struct usb_ssp_cap_descriptor *ssp_cap;
1208 + struct usb_ss_container_id_descriptor *ss_id;
1209 ++ struct usb_ptm_cap_descriptor *ptm_cap;
1210 + };
1211 +
1212 + int __usb_get_extra_descriptor(char *buffer, unsigned size,
1213 +diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
1214 +index 91ab75c1013c..ec6c8543732f 100644
1215 +--- a/include/uapi/linux/usb/ch9.h
1216 ++++ b/include/uapi/linux/usb/ch9.h
1217 +@@ -812,6 +812,8 @@ struct usb_wireless_cap_descriptor { /* Ultra Wide Band */
1218 + __u8 bReserved;
1219 + } __attribute__((packed));
1220 +
1221 ++#define USB_DT_USB_WIRELESS_CAP_SIZE 11
1222 ++
1223 + /* USB 2.0 Extension descriptor */
1224 + #define USB_CAP_TYPE_EXT 2
1225 +
1226 +@@ -895,6 +897,22 @@ struct usb_ssp_cap_descriptor {
1227 + #define USB_SSP_SUBLINK_SPEED_LSM (0xff << 16) /* Lanespeed mantissa */
1228 + } __attribute__((packed));
1229 +
1230 ++/*
1231 ++ * Precision time measurement capability descriptor: advertised by devices and
1232 ++ * hubs that support PTM
1233 ++ */
1234 ++#define USB_PTM_CAP_TYPE 0xb
1235 ++struct usb_ptm_cap_descriptor {
1236 ++ __u8 bLength;
1237 ++ __u8 bDescriptorType;
1238 ++ __u8 bDevCapabilityType;
1239 ++} __attribute__((packed));
1240 ++
1241 ++/*
1242 ++ * The size of the descriptor for the Sublink Speed Attribute Count
1243 ++ * (SSAC) specified in bmAttributes[4:0].
1244 ++ */
1245 ++#define USB_DT_USB_SSP_CAP_SIZE(ssac) (16 + ssac * 4)
1246 +
1247 + /*-------------------------------------------------------------------------*/
1248 +
1249 +@@ -991,6 +1009,7 @@ enum usb3_link_state {
1250 + USB3_LPM_U3
1251 + };
1252 +
1253 ++#define USB_DT_USB_PTM_ID_SIZE 3
1254 + /*
1255 + * A U1 timeout of 0x0 means the parent hub will reject any transitions to U1.
1256 + * 0xff means the parent hub will accept transitions to U1, but will not
1257 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
1258 +index 9e8d70160d20..71290fb7d500 100644
1259 +--- a/net/ipv4/tcp_input.c
1260 ++++ b/net/ipv4/tcp_input.c
1261 +@@ -4942,7 +4942,7 @@ static void tcp_check_space(struct sock *sk)
1262 + if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
1263 + sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
1264 + /* pairs with tcp_poll() */
1265 +- smp_mb__after_atomic();
1266 ++ smp_mb();
1267 + if (sk->sk_socket &&
1268 + test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
1269 + tcp_new_space(sk);
1270 +diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
1271 +index 7ebb14def2cb..f58ad70f693e 100644
1272 +--- a/net/ipv6/ip6_vti.c
1273 ++++ b/net/ipv6/ip6_vti.c
1274 +@@ -189,12 +189,12 @@ static int vti6_tnl_create2(struct net_device *dev)
1275 + struct vti6_net *ip6n = net_generic(net, vti6_net_id);
1276 + int err;
1277 +
1278 ++ dev->rtnl_link_ops = &vti6_link_ops;
1279 + err = register_netdevice(dev);
1280 + if (err < 0)
1281 + goto out;
1282 +
1283 + strcpy(t->parms.name, dev->name);
1284 +- dev->rtnl_link_ops = &vti6_link_ops;
1285 +
1286 + dev_hold(dev);
1287 + vti6_tnl_link(ip6n, t);
1288 +diff --git a/net/sctp/debug.c b/net/sctp/debug.c
1289 +index 95d7b15dad21..e371a0d90068 100644
1290 +--- a/net/sctp/debug.c
1291 ++++ b/net/sctp/debug.c
1292 +@@ -166,7 +166,7 @@ static const char *const sctp_timer_tbl[] = {
1293 + /* Lookup timer debug name. */
1294 + const char *sctp_tname(const sctp_subtype_t id)
1295 + {
1296 +- if (id.timeout <= SCTP_EVENT_TIMEOUT_MAX)
1297 ++ if (id.timeout < ARRAY_SIZE(sctp_timer_tbl))
1298 + return sctp_timer_tbl[id.timeout];
1299 + return "unknown_timer";
1300 + }
1301 +diff --git a/net/tipc/server.c b/net/tipc/server.c
1302 +index 50f5b0ca7b3c..c416e5184a3f 100644
1303 +--- a/net/tipc/server.c
1304 ++++ b/net/tipc/server.c
1305 +@@ -618,14 +618,12 @@ int tipc_server_start(struct tipc_server *s)
1306 + void tipc_server_stop(struct tipc_server *s)
1307 + {
1308 + struct tipc_conn *con;
1309 +- int total = 0;
1310 + int id;
1311 +
1312 + spin_lock_bh(&s->idr_lock);
1313 +- for (id = 0; total < s->idr_in_use; id++) {
1314 ++ for (id = 0; s->idr_in_use; id++) {
1315 + con = idr_find(&s->conn_idr, id);
1316 + if (con) {
1317 +- total++;
1318 + spin_unlock_bh(&s->idr_lock);
1319 + tipc_close_conn(con);
1320 + spin_lock_bh(&s->idr_lock);
1321 +diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
1322 +index c21f09bf8b99..98289ba2a2e6 100644
1323 +--- a/security/integrity/ima/ima_main.c
1324 ++++ b/security/integrity/ima/ima_main.c
1325 +@@ -52,6 +52,8 @@ static int __init hash_setup(char *str)
1326 + ima_hash_algo = HASH_ALGO_SHA1;
1327 + else if (strncmp(str, "md5", 3) == 0)
1328 + ima_hash_algo = HASH_ALGO_MD5;
1329 ++ else
1330 ++ return 1;
1331 + goto out;
1332 + }
1333 +
1334 +@@ -61,6 +63,8 @@ static int __init hash_setup(char *str)
1335 + break;
1336 + }
1337 + }
1338 ++ if (i == HASH_ALGO__LAST)
1339 ++ return 1;
1340 + out:
1341 + hash_setup_done = 1;
1342 + return 1;
1343 +diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c
1344 +index 638875a0960a..79547c225c14 100644
1345 +--- a/tools/perf/tests/attr.c
1346 ++++ b/tools/perf/tests/attr.c
1347 +@@ -150,7 +150,7 @@ static int run_dir(const char *d, const char *perf)
1348 + snprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %.*s",
1349 + d, d, perf, vcnt, v);
1350 +
1351 +- return system(cmd);
1352 ++ return system(cmd) ? TEST_FAIL : TEST_OK;
1353 + }
1354 +
1355 + int test__attr(void)
1356 +diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c
1357 +index 923e59eb82c7..412b845412d2 100644
1358 +--- a/tools/testing/selftests/x86/ldt_gdt.c
1359 ++++ b/tools/testing/selftests/x86/ldt_gdt.c
1360 +@@ -351,9 +351,24 @@ static void do_simple_tests(void)
1361 + install_invalid(&desc, false);
1362 +
1363 + desc.seg_not_present = 0;
1364 +- desc.read_exec_only = 0;
1365 + desc.seg_32bit = 1;
1366 ++ desc.read_exec_only = 0;
1367 ++ desc.limit = 0xfffff;
1368 ++
1369 + install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S | AR_P | AR_DB);
1370 ++
1371 ++ desc.limit_in_pages = 1;
1372 ++
1373 ++ install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S | AR_P | AR_DB | AR_G);
1374 ++ desc.read_exec_only = 1;
1375 ++ install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA | AR_S | AR_P | AR_DB | AR_G);
1376 ++ desc.contents = 1;
1377 ++ desc.read_exec_only = 0;
1378 ++ install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA_EXPDOWN | AR_S | AR_P | AR_DB | AR_G);
1379 ++ desc.read_exec_only = 1;
1380 ++ install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA_EXPDOWN | AR_S | AR_P | AR_DB | AR_G);
1381 ++
1382 ++ desc.limit = 0;
1383 + install_invalid(&desc, true);
1384 + }
1385 +
1386 +diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
1387 +index a7b9022b5c8f..7f38db2a46c8 100644
1388 +--- a/virt/kvm/arm/arch_timer.c
1389 ++++ b/virt/kvm/arm/arch_timer.c
1390 +@@ -84,9 +84,6 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
1391 + struct kvm_vcpu *vcpu;
1392 +
1393 + vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
1394 +- vcpu->arch.timer_cpu.armed = false;
1395 +-
1396 +- WARN_ON(!kvm_timer_should_fire(vcpu));
1397 +
1398 + /*
1399 + * If the vcpu is blocked we want to wake it up so that it will see