Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.1 commit in: /
Date: Wed, 30 Nov 2016 11:46:04
Message-Id: 1480506351.5eab8276b7a473c02bb93990416e06e7c0bb7f5f.mpagano@gentoo
1 commit: 5eab8276b7a473c02bb93990416e06e7c0bb7f5f
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Nov 30 11:45:51 2016 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Nov 30 11:45:51 2016 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5eab8276
7
8 Linux patch 4.1.36
9
10 0000_README | 4 +
11 1035_linux-4.1.36.patch | 2346 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 2350 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 5bb6b6b..87cf515 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -183,6 +183,10 @@ Patch: 1034_linux-4.1.35.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.1.35
21
22 +Patch: 1035_linux-4.1.36.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.1.36
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1035_linux-4.1.36.patch b/1035_linux-4.1.36.patch
31 new file mode 100644
32 index 0000000..57e0271
33 --- /dev/null
34 +++ b/1035_linux-4.1.36.patch
35 @@ -0,0 +1,2346 @@
36 +diff --git a/Documentation/x86/exception-tables.txt b/Documentation/x86/exception-tables.txt
37 +index 32901aa36f0a..e396bcd8d830 100644
38 +--- a/Documentation/x86/exception-tables.txt
39 ++++ b/Documentation/x86/exception-tables.txt
40 +@@ -290,3 +290,38 @@ Due to the way that the exception table is built and needs to be ordered,
41 + only use exceptions for code in the .text section. Any other section
42 + will cause the exception table to not be sorted correctly, and the
43 + exceptions will fail.
44 ++
45 ++Things changed when 64-bit support was added to x86 Linux. Rather than
46 ++double the size of the exception table by expanding the two entries
47 ++from 32-bits to 64 bits, a clever trick was used to store addresses
48 ++as relative offsets from the table itself. The assembly code changed
49 ++from:
50 ++ .long 1b,3b
51 ++to:
52 ++ .long (from) - .
53 ++ .long (to) - .
54 ++
55 ++and the C-code that uses these values converts back to absolute addresses
56 ++like this:
57 ++
58 ++ ex_insn_addr(const struct exception_table_entry *x)
59 ++ {
60 ++ return (unsigned long)&x->insn + x->insn;
61 ++ }
62 ++
63 ++In v4.6 the exception table entry was expanded with a new field "handler".
64 ++This is also 32-bits wide and contains a third relative function
65 ++pointer which points to one of:
66 ++
67 ++1) int ex_handler_default(const struct exception_table_entry *fixup)
68 ++ This is legacy case that just jumps to the fixup code
69 ++2) int ex_handler_fault(const struct exception_table_entry *fixup)
70 ++ This case provides the fault number of the trap that occurred at
71 ++ entry->insn. It is used to distinguish page faults from machine
72 ++ check.
73 ++3) int ex_handler_ext(const struct exception_table_entry *fixup)
74 ++ This case is used for uaccess_err ... we need to set a flag
75 ++ in the task structure. Before the handler functions existed this
76 ++ case was handled by adding a large offset to the fixup to tag
77 ++ it as special.
78 ++More functions can easily be added.
79 +diff --git a/Makefile b/Makefile
80 +index 21f657f2c4e6..aa9fbee620ff 100644
81 +--- a/Makefile
82 ++++ b/Makefile
83 +@@ -1,6 +1,6 @@
84 + VERSION = 4
85 + PATCHLEVEL = 1
86 +-SUBLEVEL = 35
87 ++SUBLEVEL = 36
88 + EXTRAVERSION =
89 + NAME = Series 4800
90 +
91 +@@ -610,6 +610,8 @@ all: vmlinux
92 + include arch/$(SRCARCH)/Makefile
93 +
94 + KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
95 ++KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
96 ++KBUILD_AFLAGS += $(call cc-option,-fno-PIE)
97 +
98 + ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
99 + KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,)
100 +diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
101 +index a9c80a2ea1a7..642934a5ae9b 100644
102 +--- a/arch/arm/include/asm/kvm_emulate.h
103 ++++ b/arch/arm/include/asm/kvm_emulate.h
104 +@@ -131,6 +131,11 @@ static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
105 + return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
106 + }
107 +
108 ++static inline bool kvm_vcpu_dabt_is_cm(struct kvm_vcpu *vcpu)
109 ++{
110 ++ return !!(kvm_vcpu_get_hsr(vcpu) & HSR_DABT_CM);
111 ++}
112 ++
113 + /* Get Access Size from a data abort */
114 + static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
115 + {
116 +diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
117 +index a33af44230da..4681b6832d9f 100644
118 +--- a/arch/arm/kvm/mmu.c
119 ++++ b/arch/arm/kvm/mmu.c
120 +@@ -1435,6 +1435,22 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
121 + }
122 +
123 + /*
124 ++ * Check for a cache maintenance operation. Since we
125 ++ * ended-up here, we know it is outside of any memory
126 ++ * slot. But we can't find out if that is for a device,
127 ++ * or if the guest is just being stupid. The only thing
128 ++ * we know for sure is that this range cannot be cached.
129 ++ *
130 ++ * So let's assume that the guest is just being
131 ++ * cautious, and skip the instruction.
132 ++ */
133 ++ if (kvm_vcpu_dabt_is_cm(vcpu)) {
134 ++ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
135 ++ ret = 1;
136 ++ goto out_unlock;
137 ++ }
138 ++
139 ++ /*
140 + * The IPA is reported as [MAX:12], so we need to
141 + * complement it with the bottom 12 bits from the
142 + * faulting VA. This is always 12 bits, irrespective
143 +diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
144 +index 3ca894ecf699..3e3c4c7a5082 100644
145 +--- a/arch/arm64/include/asm/kvm_emulate.h
146 ++++ b/arch/arm64/include/asm/kvm_emulate.h
147 +@@ -153,11 +153,6 @@ static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
148 + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
149 + }
150 +
151 +-static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
152 +-{
153 +- return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR);
154 +-}
155 +-
156 + static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
157 + {
158 + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
159 +@@ -178,6 +173,17 @@ static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
160 + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
161 + }
162 +
163 ++static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
164 ++{
165 ++ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
166 ++ kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
167 ++}
168 ++
169 ++static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
170 ++{
171 ++ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
172 ++}
173 ++
174 + static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
175 + {
176 + return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
177 +diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
178 +index 4fde8c1df97f..9d096bc89287 100644
179 +--- a/arch/arm64/include/asm/percpu.h
180 ++++ b/arch/arm64/include/asm/percpu.h
181 +@@ -52,48 +52,44 @@ static inline unsigned long __percpu_##op(void *ptr, \
182 + \
183 + switch (size) { \
184 + case 1: \
185 +- do { \
186 +- asm ("//__per_cpu_" #op "_1\n" \
187 +- "ldxrb %w[ret], %[ptr]\n" \
188 ++ asm ("//__per_cpu_" #op "_1\n" \
189 ++ "1: ldxrb %w[ret], %[ptr]\n" \
190 + #asm_op " %w[ret], %w[ret], %w[val]\n" \
191 +- "stxrb %w[loop], %w[ret], %[ptr]\n" \
192 +- : [loop] "=&r" (loop), [ret] "=&r" (ret), \
193 +- [ptr] "+Q"(*(u8 *)ptr) \
194 +- : [val] "Ir" (val)); \
195 +- } while (loop); \
196 ++ " stxrb %w[loop], %w[ret], %[ptr]\n" \
197 ++ " cbnz %w[loop], 1b" \
198 ++ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
199 ++ [ptr] "+Q"(*(u8 *)ptr) \
200 ++ : [val] "Ir" (val)); \
201 + break; \
202 + case 2: \
203 +- do { \
204 +- asm ("//__per_cpu_" #op "_2\n" \
205 +- "ldxrh %w[ret], %[ptr]\n" \
206 ++ asm ("//__per_cpu_" #op "_2\n" \
207 ++ "1: ldxrh %w[ret], %[ptr]\n" \
208 + #asm_op " %w[ret], %w[ret], %w[val]\n" \
209 +- "stxrh %w[loop], %w[ret], %[ptr]\n" \
210 +- : [loop] "=&r" (loop), [ret] "=&r" (ret), \
211 +- [ptr] "+Q"(*(u16 *)ptr) \
212 +- : [val] "Ir" (val)); \
213 +- } while (loop); \
214 ++ " stxrh %w[loop], %w[ret], %[ptr]\n" \
215 ++ " cbnz %w[loop], 1b" \
216 ++ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
217 ++ [ptr] "+Q"(*(u16 *)ptr) \
218 ++ : [val] "Ir" (val)); \
219 + break; \
220 + case 4: \
221 +- do { \
222 +- asm ("//__per_cpu_" #op "_4\n" \
223 +- "ldxr %w[ret], %[ptr]\n" \
224 ++ asm ("//__per_cpu_" #op "_4\n" \
225 ++ "1: ldxr %w[ret], %[ptr]\n" \
226 + #asm_op " %w[ret], %w[ret], %w[val]\n" \
227 +- "stxr %w[loop], %w[ret], %[ptr]\n" \
228 +- : [loop] "=&r" (loop), [ret] "=&r" (ret), \
229 +- [ptr] "+Q"(*(u32 *)ptr) \
230 +- : [val] "Ir" (val)); \
231 +- } while (loop); \
232 ++ " stxr %w[loop], %w[ret], %[ptr]\n" \
233 ++ " cbnz %w[loop], 1b" \
234 ++ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
235 ++ [ptr] "+Q"(*(u32 *)ptr) \
236 ++ : [val] "Ir" (val)); \
237 + break; \
238 + case 8: \
239 +- do { \
240 +- asm ("//__per_cpu_" #op "_8\n" \
241 +- "ldxr %[ret], %[ptr]\n" \
242 ++ asm ("//__per_cpu_" #op "_8\n" \
243 ++ "1: ldxr %[ret], %[ptr]\n" \
244 + #asm_op " %[ret], %[ret], %[val]\n" \
245 +- "stxr %w[loop], %[ret], %[ptr]\n" \
246 +- : [loop] "=&r" (loop), [ret] "=&r" (ret), \
247 +- [ptr] "+Q"(*(u64 *)ptr) \
248 +- : [val] "Ir" (val)); \
249 +- } while (loop); \
250 ++ " stxr %w[loop], %[ret], %[ptr]\n" \
251 ++ " cbnz %w[loop], 1b" \
252 ++ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
253 ++ [ptr] "+Q"(*(u64 *)ptr) \
254 ++ : [val] "Ir" (val)); \
255 + break; \
256 + default: \
257 + BUILD_BUG(); \
258 +@@ -158,44 +154,40 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
259 +
260 + switch (size) {
261 + case 1:
262 +- do {
263 +- asm ("//__percpu_xchg_1\n"
264 +- "ldxrb %w[ret], %[ptr]\n"
265 +- "stxrb %w[loop], %w[val], %[ptr]\n"
266 +- : [loop] "=&r"(loop), [ret] "=&r"(ret),
267 +- [ptr] "+Q"(*(u8 *)ptr)
268 +- : [val] "r" (val));
269 +- } while (loop);
270 ++ asm ("//__percpu_xchg_1\n"
271 ++ "1: ldxrb %w[ret], %[ptr]\n"
272 ++ " stxrb %w[loop], %w[val], %[ptr]\n"
273 ++ " cbnz %w[loop], 1b"
274 ++ : [loop] "=&r"(loop), [ret] "=&r"(ret),
275 ++ [ptr] "+Q"(*(u8 *)ptr)
276 ++ : [val] "r" (val));
277 + break;
278 + case 2:
279 +- do {
280 +- asm ("//__percpu_xchg_2\n"
281 +- "ldxrh %w[ret], %[ptr]\n"
282 +- "stxrh %w[loop], %w[val], %[ptr]\n"
283 +- : [loop] "=&r"(loop), [ret] "=&r"(ret),
284 +- [ptr] "+Q"(*(u16 *)ptr)
285 +- : [val] "r" (val));
286 +- } while (loop);
287 ++ asm ("//__percpu_xchg_2\n"
288 ++ "1: ldxrh %w[ret], %[ptr]\n"
289 ++ " stxrh %w[loop], %w[val], %[ptr]\n"
290 ++ " cbnz %w[loop], 1b"
291 ++ : [loop] "=&r"(loop), [ret] "=&r"(ret),
292 ++ [ptr] "+Q"(*(u16 *)ptr)
293 ++ : [val] "r" (val));
294 + break;
295 + case 4:
296 +- do {
297 +- asm ("//__percpu_xchg_4\n"
298 +- "ldxr %w[ret], %[ptr]\n"
299 +- "stxr %w[loop], %w[val], %[ptr]\n"
300 +- : [loop] "=&r"(loop), [ret] "=&r"(ret),
301 +- [ptr] "+Q"(*(u32 *)ptr)
302 +- : [val] "r" (val));
303 +- } while (loop);
304 ++ asm ("//__percpu_xchg_4\n"
305 ++ "1: ldxr %w[ret], %[ptr]\n"
306 ++ " stxr %w[loop], %w[val], %[ptr]\n"
307 ++ " cbnz %w[loop], 1b"
308 ++ : [loop] "=&r"(loop), [ret] "=&r"(ret),
309 ++ [ptr] "+Q"(*(u32 *)ptr)
310 ++ : [val] "r" (val));
311 + break;
312 + case 8:
313 +- do {
314 +- asm ("//__percpu_xchg_8\n"
315 +- "ldxr %[ret], %[ptr]\n"
316 +- "stxr %w[loop], %[val], %[ptr]\n"
317 +- : [loop] "=&r"(loop), [ret] "=&r"(ret),
318 +- [ptr] "+Q"(*(u64 *)ptr)
319 +- : [val] "r" (val));
320 +- } while (loop);
321 ++ asm ("//__percpu_xchg_8\n"
322 ++ "1: ldxr %[ret], %[ptr]\n"
323 ++ " stxr %w[loop], %[val], %[ptr]\n"
324 ++ " cbnz %w[loop], 1b"
325 ++ : [loop] "=&r"(loop), [ret] "=&r"(ret),
326 ++ [ptr] "+Q"(*(u64 *)ptr)
327 ++ : [val] "r" (val));
328 + break;
329 + default:
330 + BUILD_BUG();
331 +diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
332 +index cc7435c9676e..b346b35f827d 100644
333 +--- a/arch/arm64/kernel/head.S
334 ++++ b/arch/arm64/kernel/head.S
335 +@@ -572,8 +572,9 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
336 + b.lt 4f // Skip if no PMU present
337 + mrs x0, pmcr_el0 // Disable debug access traps
338 + ubfx x0, x0, #11, #5 // to EL2 and allow access to
339 +- msr mdcr_el2, x0 // all PMU counters from EL1
340 + 4:
341 ++ csel x0, xzr, x0, lt // all PMU counters from EL1
342 ++ msr mdcr_el2, x0 // (if they exist)
343 +
344 + /* Stage-2 translation */
345 + msr vttbr_el2, xzr
346 +diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
347 +index ab518d14b7b0..2ab11c31d77c 100644
348 +--- a/arch/mips/include/asm/kvm_host.h
349 ++++ b/arch/mips/include/asm/kvm_host.h
350 +@@ -398,7 +398,10 @@ struct kvm_vcpu_arch {
351 + /* Host KSEG0 address of the EI/DI offset */
352 + void *kseg0_commpage;
353 +
354 +- u32 io_gpr; /* GPR used as IO source/target */
355 ++ /* Resume PC after MMIO completion */
356 ++ unsigned long io_pc;
357 ++ /* GPR used as IO source/target */
358 ++ u32 io_gpr;
359 +
360 + struct hrtimer comparecount_timer;
361 + /* Count timer control KVM register */
362 +@@ -420,8 +423,6 @@ struct kvm_vcpu_arch {
363 + /* Bitmask of pending exceptions to be cleared */
364 + unsigned long pending_exceptions_clr;
365 +
366 +- unsigned long pending_load_cause;
367 +-
368 + /* Save/Restore the entryhi register when are are preempted/scheduled back in */
369 + unsigned long preempt_entryhi;
370 +
371 +diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
372 +index d6476d11212e..7f3183494e69 100644
373 +--- a/arch/mips/kvm/emulate.c
374 ++++ b/arch/mips/kvm/emulate.c
375 +@@ -752,15 +752,15 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
376 + struct mips_coproc *cop0 = vcpu->arch.cop0;
377 + enum emulation_result er = EMULATE_DONE;
378 +
379 +- if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
380 ++ if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
381 ++ kvm_clear_c0_guest_status(cop0, ST0_ERL);
382 ++ vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
383 ++ } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
384 + kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
385 + kvm_read_c0_guest_epc(cop0));
386 + kvm_clear_c0_guest_status(cop0, ST0_EXL);
387 + vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
388 +
389 +- } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
390 +- kvm_clear_c0_guest_status(cop0, ST0_ERL);
391 +- vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
392 + } else {
393 + kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
394 + vcpu->arch.pc);
395 +@@ -1430,6 +1430,7 @@ enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
396 + struct kvm_vcpu *vcpu)
397 + {
398 + enum emulation_result er = EMULATE_DO_MMIO;
399 ++ unsigned long curr_pc;
400 + int32_t op, base, rt, offset;
401 + uint32_t bytes;
402 +
403 +@@ -1438,7 +1439,18 @@ enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
404 + offset = inst & 0xffff;
405 + op = (inst >> 26) & 0x3f;
406 +
407 +- vcpu->arch.pending_load_cause = cause;
408 ++ /*
409 ++ * Find the resume PC now while we have safe and easy access to the
410 ++ * prior branch instruction, and save it for
411 ++ * kvm_mips_complete_mmio_load() to restore later.
412 ++ */
413 ++ curr_pc = vcpu->arch.pc;
414 ++ er = update_pc(vcpu, cause);
415 ++ if (er == EMULATE_FAIL)
416 ++ return er;
417 ++ vcpu->arch.io_pc = vcpu->arch.pc;
418 ++ vcpu->arch.pc = curr_pc;
419 ++
420 + vcpu->arch.io_gpr = rt;
421 +
422 + switch (op) {
423 +@@ -2418,9 +2430,8 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
424 + goto done;
425 + }
426 +
427 +- er = update_pc(vcpu, vcpu->arch.pending_load_cause);
428 +- if (er == EMULATE_FAIL)
429 +- return er;
430 ++ /* Restore saved resume PC */
431 ++ vcpu->arch.pc = vcpu->arch.io_pc;
432 +
433 + switch (run->mmio.len) {
434 + case 4:
435 +@@ -2442,11 +2453,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
436 + break;
437 + }
438 +
439 +- if (vcpu->arch.pending_load_cause & CAUSEF_BD)
440 +- kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
441 +- vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
442 +- vcpu->mmio_needed);
443 +-
444 + done:
445 + return er;
446 + }
447 +diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
448 +index 099c23616901..8f13c7facdd7 100644
449 +--- a/arch/parisc/kernel/syscall.S
450 ++++ b/arch/parisc/kernel/syscall.S
451 +@@ -106,8 +106,6 @@ linux_gateway_entry:
452 + mtsp %r0,%sr4 /* get kernel space into sr4 */
453 + mtsp %r0,%sr5 /* get kernel space into sr5 */
454 + mtsp %r0,%sr6 /* get kernel space into sr6 */
455 +- mfsp %sr7,%r1 /* save user sr7 */
456 +- mtsp %r1,%sr3 /* and store it in sr3 */
457 +
458 + #ifdef CONFIG_64BIT
459 + /* for now we can *always* set the W bit on entry to the syscall
460 +@@ -133,6 +131,14 @@ linux_gateway_entry:
461 + depdi 0, 31, 32, %r21
462 + 1:
463 + #endif
464 ++
465 ++ /* We use a rsm/ssm pair to prevent sr3 from being clobbered
466 ++ * by external interrupts.
467 ++ */
468 ++ mfsp %sr7,%r1 /* save user sr7 */
469 ++ rsm PSW_SM_I, %r0 /* disable interrupts */
470 ++ mtsp %r1,%sr3 /* and store it in sr3 */
471 ++
472 + mfctl %cr30,%r1
473 + xor %r1,%r30,%r30 /* ye olde xor trick */
474 + xor %r1,%r30,%r1
475 +@@ -147,6 +153,7 @@ linux_gateway_entry:
476 + */
477 +
478 + mtsp %r0,%sr7 /* get kernel space into sr7 */
479 ++ ssm PSW_SM_I, %r0 /* enable interrupts */
480 + STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */
481 + mfctl %cr30,%r1 /* get task ptr in %r1 */
482 + LDREG TI_TASK(%r1),%r1
483 +diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
484 +index 112ccf497562..73f638789a38 100644
485 +--- a/arch/powerpc/kernel/idle_power7.S
486 ++++ b/arch/powerpc/kernel/idle_power7.S
487 +@@ -44,7 +44,7 @@
488 + std r0,0(r1); \
489 + ptesync; \
490 + ld r0,0(r1); \
491 +-1: cmp cr0,r0,r0; \
492 ++1: cmpd cr0,r0,r0; \
493 + bne 1b; \
494 + IDLE_INST; \
495 + b .
496 +diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
497 +index f031a47d7701..6fb1b3774b11 100644
498 +--- a/arch/powerpc/mm/copro_fault.c
499 ++++ b/arch/powerpc/mm/copro_fault.c
500 +@@ -106,6 +106,8 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
501 + switch (REGION_ID(ea)) {
502 + case USER_REGION_ID:
503 + pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea);
504 ++ if (mm == NULL)
505 ++ return 1;
506 + psize = get_slice_psize(mm, ea);
507 + ssize = user_segment_size(ea);
508 + vsid = get_vsid(mm->context.id, ea, ssize);
509 +diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
510 +index 7730c1c5c83a..e2015452177d 100644
511 +--- a/arch/x86/include/asm/asm.h
512 ++++ b/arch/x86/include/asm/asm.h
513 +@@ -44,19 +44,22 @@
514 +
515 + /* Exception table entry */
516 + #ifdef __ASSEMBLY__
517 +-# define _ASM_EXTABLE(from,to) \
518 ++# define _ASM_EXTABLE_HANDLE(from, to, handler) \
519 + .pushsection "__ex_table","a" ; \
520 +- .balign 8 ; \
521 ++ .balign 4 ; \
522 + .long (from) - . ; \
523 + .long (to) - . ; \
524 ++ .long (handler) - . ; \
525 + .popsection
526 +
527 +-# define _ASM_EXTABLE_EX(from,to) \
528 +- .pushsection "__ex_table","a" ; \
529 +- .balign 8 ; \
530 +- .long (from) - . ; \
531 +- .long (to) - . + 0x7ffffff0 ; \
532 +- .popsection
533 ++# define _ASM_EXTABLE(from, to) \
534 ++ _ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
535 ++
536 ++# define _ASM_EXTABLE_FAULT(from, to) \
537 ++ _ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
538 ++
539 ++# define _ASM_EXTABLE_EX(from, to) \
540 ++ _ASM_EXTABLE_HANDLE(from, to, ex_handler_ext)
541 +
542 + # define _ASM_NOKPROBE(entry) \
543 + .pushsection "_kprobe_blacklist","aw" ; \
544 +@@ -64,19 +67,24 @@
545 + _ASM_PTR (entry); \
546 + .popsection
547 + #else
548 +-# define _ASM_EXTABLE(from,to) \
549 ++# define _EXPAND_EXTABLE_HANDLE(x) #x
550 ++# define _ASM_EXTABLE_HANDLE(from, to, handler) \
551 + " .pushsection \"__ex_table\",\"a\"\n" \
552 +- " .balign 8\n" \
553 ++ " .balign 4\n" \
554 + " .long (" #from ") - .\n" \
555 + " .long (" #to ") - .\n" \
556 ++ " .long (" _EXPAND_EXTABLE_HANDLE(handler) ") - .\n" \
557 + " .popsection\n"
558 +
559 +-# define _ASM_EXTABLE_EX(from,to) \
560 +- " .pushsection \"__ex_table\",\"a\"\n" \
561 +- " .balign 8\n" \
562 +- " .long (" #from ") - .\n" \
563 +- " .long (" #to ") - . + 0x7ffffff0\n" \
564 +- " .popsection\n"
565 ++# define _ASM_EXTABLE(from, to) \
566 ++ _ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
567 ++
568 ++# define _ASM_EXTABLE_FAULT(from, to) \
569 ++ _ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
570 ++
571 ++# define _ASM_EXTABLE_EX(from, to) \
572 ++ _ASM_EXTABLE_HANDLE(from, to, ex_handler_ext)
573 ++
574 + /* For C file, we already have NOKPROBE_SYMBOL macro */
575 + #endif
576 +
577 +diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
578 +index d081e7e42fb3..81782dc54193 100644
579 +--- a/arch/x86/include/asm/uaccess.h
580 ++++ b/arch/x86/include/asm/uaccess.h
581 +@@ -89,12 +89,11 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
582 + likely(!__range_not_ok(addr, size, user_addr_max()))
583 +
584 + /*
585 +- * The exception table consists of pairs of addresses relative to the
586 +- * exception table enty itself: the first is the address of an
587 +- * instruction that is allowed to fault, and the second is the address
588 +- * at which the program should continue. No registers are modified,
589 +- * so it is entirely up to the continuation code to figure out what to
590 +- * do.
591 ++ * The exception table consists of triples of addresses relative to the
592 ++ * exception table entry itself. The first address is of an instruction
593 ++ * that is allowed to fault, the second is the target at which the program
594 ++ * should continue. The third is a handler function to deal with the fault
595 ++ * caused by the instruction in the first field.
596 + *
597 + * All the routines below use bits of fixup code that are out of line
598 + * with the main instruction path. This means when everything is well,
599 +@@ -103,13 +102,14 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
600 + */
601 +
602 + struct exception_table_entry {
603 +- int insn, fixup;
604 ++ int insn, fixup, handler;
605 + };
606 + /* This is not the generic standard exception_table_entry format */
607 + #define ARCH_HAS_SORT_EXTABLE
608 + #define ARCH_HAS_SEARCH_EXTABLE
609 +
610 +-extern int fixup_exception(struct pt_regs *regs);
611 ++extern int fixup_exception(struct pt_regs *regs, int trapnr);
612 ++extern bool ex_has_fault_handler(unsigned long ip);
613 + extern int early_fixup_exception(unsigned long *ip);
614 +
615 + /*
616 +diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
617 +index 023c442c33bb..e1d1f6cbaf11 100644
618 +--- a/arch/x86/kernel/kprobes/core.c
619 ++++ b/arch/x86/kernel/kprobes/core.c
620 +@@ -1000,7 +1000,7 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
621 + * In case the user-specified fault handler returned
622 + * zero, try to fix up.
623 + */
624 +- if (fixup_exception(regs))
625 ++ if (fixup_exception(regs, trapnr))
626 + return 1;
627 +
628 + /*
629 +diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
630 +index 324ab5247687..020248f2cec4 100644
631 +--- a/arch/x86/kernel/traps.c
632 ++++ b/arch/x86/kernel/traps.c
633 +@@ -208,7 +208,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
634 + }
635 +
636 + if (!user_mode(regs)) {
637 +- if (!fixup_exception(regs)) {
638 ++ if (!fixup_exception(regs, trapnr)) {
639 + tsk->thread.error_code = error_code;
640 + tsk->thread.trap_nr = trapnr;
641 + die(str, regs, error_code);
642 +@@ -469,7 +469,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
643 +
644 + tsk = current;
645 + if (!user_mode(regs)) {
646 +- if (fixup_exception(regs))
647 ++ if (fixup_exception(regs, X86_TRAP_GP))
648 + goto exit;
649 +
650 + tsk->thread.error_code = error_code;
651 +@@ -720,7 +720,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
652 +
653 + if (!user_mode(regs))
654 + {
655 +- if (!fixup_exception(regs)) {
656 ++ if (!fixup_exception(regs, trapnr)) {
657 + task->thread.error_code = error_code;
658 + task->thread.trap_nr = trapnr;
659 + die(str, regs, error_code);
660 +diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
661 +index 28146f03c514..74c3285dfdcf 100644
662 +--- a/arch/x86/kvm/ioapic.c
663 ++++ b/arch/x86/kvm/ioapic.c
664 +@@ -596,7 +596,7 @@ static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
665 + ioapic->irr = 0;
666 + ioapic->irr_delivered = 0;
667 + ioapic->id = 0;
668 +- memset(ioapic->irq_eoi, 0x00, IOAPIC_NUM_PINS);
669 ++ memset(ioapic->irq_eoi, 0x00, sizeof(ioapic->irq_eoi));
670 + rtc_irq_eoi_tracking_reset(ioapic);
671 + update_handled_vectors(ioapic);
672 + }
673 +diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
674 +index 903ec1e9c326..9dd7e4b7fcde 100644
675 +--- a/arch/x86/mm/extable.c
676 ++++ b/arch/x86/mm/extable.c
677 +@@ -3,6 +3,9 @@
678 + #include <linux/sort.h>
679 + #include <asm/uaccess.h>
680 +
681 ++typedef bool (*ex_handler_t)(const struct exception_table_entry *,
682 ++ struct pt_regs *, int);
683 ++
684 + static inline unsigned long
685 + ex_insn_addr(const struct exception_table_entry *x)
686 + {
687 +@@ -13,11 +16,56 @@ ex_fixup_addr(const struct exception_table_entry *x)
688 + {
689 + return (unsigned long)&x->fixup + x->fixup;
690 + }
691 ++static inline ex_handler_t
692 ++ex_fixup_handler(const struct exception_table_entry *x)
693 ++{
694 ++ return (ex_handler_t)((unsigned long)&x->handler + x->handler);
695 ++}
696 +
697 +-int fixup_exception(struct pt_regs *regs)
698 ++bool ex_handler_default(const struct exception_table_entry *fixup,
699 ++ struct pt_regs *regs, int trapnr)
700 + {
701 +- const struct exception_table_entry *fixup;
702 +- unsigned long new_ip;
703 ++ regs->ip = ex_fixup_addr(fixup);
704 ++ return true;
705 ++}
706 ++EXPORT_SYMBOL(ex_handler_default);
707 ++
708 ++bool ex_handler_fault(const struct exception_table_entry *fixup,
709 ++ struct pt_regs *regs, int trapnr)
710 ++{
711 ++ regs->ip = ex_fixup_addr(fixup);
712 ++ regs->ax = trapnr;
713 ++ return true;
714 ++}
715 ++EXPORT_SYMBOL_GPL(ex_handler_fault);
716 ++
717 ++bool ex_handler_ext(const struct exception_table_entry *fixup,
718 ++ struct pt_regs *regs, int trapnr)
719 ++{
720 ++ /* Special hack for uaccess_err */
721 ++ current_thread_info()->uaccess_err = 1;
722 ++ regs->ip = ex_fixup_addr(fixup);
723 ++ return true;
724 ++}
725 ++EXPORT_SYMBOL(ex_handler_ext);
726 ++
727 ++bool ex_has_fault_handler(unsigned long ip)
728 ++{
729 ++ const struct exception_table_entry *e;
730 ++ ex_handler_t handler;
731 ++
732 ++ e = search_exception_tables(ip);
733 ++ if (!e)
734 ++ return false;
735 ++ handler = ex_fixup_handler(e);
736 ++
737 ++ return handler == ex_handler_fault;
738 ++}
739 ++
740 ++int fixup_exception(struct pt_regs *regs, int trapnr)
741 ++{
742 ++ const struct exception_table_entry *e;
743 ++ ex_handler_t handler;
744 +
745 + #ifdef CONFIG_PNPBIOS
746 + if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
747 +@@ -33,42 +81,34 @@ int fixup_exception(struct pt_regs *regs)
748 + }
749 + #endif
750 +
751 +- fixup = search_exception_tables(regs->ip);
752 +- if (fixup) {
753 +- new_ip = ex_fixup_addr(fixup);
754 +-
755 +- if (fixup->fixup - fixup->insn >= 0x7ffffff0 - 4) {
756 +- /* Special hack for uaccess_err */
757 +- current_thread_info()->uaccess_err = 1;
758 +- new_ip -= 0x7ffffff0;
759 +- }
760 +- regs->ip = new_ip;
761 +- return 1;
762 +- }
763 ++ e = search_exception_tables(regs->ip);
764 ++ if (!e)
765 ++ return 0;
766 +
767 +- return 0;
768 ++ handler = ex_fixup_handler(e);
769 ++ return handler(e, regs, trapnr);
770 + }
771 +
772 + /* Restricted version used during very early boot */
773 + int __init early_fixup_exception(unsigned long *ip)
774 + {
775 +- const struct exception_table_entry *fixup;
776 ++ const struct exception_table_entry *e;
777 + unsigned long new_ip;
778 ++ ex_handler_t handler;
779 +
780 +- fixup = search_exception_tables(*ip);
781 +- if (fixup) {
782 +- new_ip = ex_fixup_addr(fixup);
783 ++ e = search_exception_tables(*ip);
784 ++ if (!e)
785 ++ return 0;
786 +
787 +- if (fixup->fixup - fixup->insn >= 0x7ffffff0 - 4) {
788 +- /* uaccess handling not supported during early boot */
789 +- return 0;
790 +- }
791 ++ new_ip = ex_fixup_addr(e);
792 ++ handler = ex_fixup_handler(e);
793 +
794 +- *ip = new_ip;
795 +- return 1;
796 +- }
797 ++ /* special handling not supported during early boot */
798 ++ if (handler != ex_handler_default)
799 ++ return 0;
800 +
801 +- return 0;
802 ++ *ip = new_ip;
803 ++ return 1;
804 + }
805 +
806 + /*
807 +@@ -133,6 +173,8 @@ void sort_extable(struct exception_table_entry *start,
808 + i += 4;
809 + p->fixup += i;
810 + i += 4;
811 ++ p->handler += i;
812 ++ i += 4;
813 + }
814 +
815 + sort(start, finish - start, sizeof(struct exception_table_entry),
816 +@@ -145,6 +187,8 @@ void sort_extable(struct exception_table_entry *start,
817 + i += 4;
818 + p->fixup -= i;
819 + i += 4;
820 ++ p->handler -= i;
821 ++ i += 4;
822 + }
823 + }
824 +
825 +diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
826 +index 62855ac37ab7..27bc31f0da52 100644
827 +--- a/arch/x86/mm/fault.c
828 ++++ b/arch/x86/mm/fault.c
829 +@@ -659,7 +659,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
830 + int sig;
831 +
832 + /* Are we prepared to handle this kernel fault? */
833 +- if (fixup_exception(regs)) {
834 ++ if (fixup_exception(regs, X86_TRAP_PF)) {
835 + /*
836 + * Any interrupt that takes a fault gets the fixup. This makes
837 + * the below recursive fault logic only apply to a faults from
838 +diff --git a/drivers/android/binder.c b/drivers/android/binder.c
839 +index f1a26d937d98..6f086415727c 100644
840 +--- a/drivers/android/binder.c
841 ++++ b/drivers/android/binder.c
842 +@@ -1003,7 +1003,7 @@ static int binder_dec_node(struct binder_node *node, int strong, int internal)
843 +
844 +
845 + static struct binder_ref *binder_get_ref(struct binder_proc *proc,
846 +- uint32_t desc)
847 ++ u32 desc, bool need_strong_ref)
848 + {
849 + struct rb_node *n = proc->refs_by_desc.rb_node;
850 + struct binder_ref *ref;
851 +@@ -1011,12 +1011,16 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
852 + while (n) {
853 + ref = rb_entry(n, struct binder_ref, rb_node_desc);
854 +
855 +- if (desc < ref->desc)
856 ++ if (desc < ref->desc) {
857 + n = n->rb_left;
858 +- else if (desc > ref->desc)
859 ++ } else if (desc > ref->desc) {
860 + n = n->rb_right;
861 +- else
862 ++ } else if (need_strong_ref && !ref->strong) {
863 ++ binder_user_error("tried to use weak ref as strong ref\n");
864 ++ return NULL;
865 ++ } else {
866 + return ref;
867 ++ }
868 + }
869 + return NULL;
870 + }
871 +@@ -1286,7 +1290,10 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
872 + } break;
873 + case BINDER_TYPE_HANDLE:
874 + case BINDER_TYPE_WEAK_HANDLE: {
875 +- struct binder_ref *ref = binder_get_ref(proc, fp->handle);
876 ++ struct binder_ref *ref;
877 ++
878 ++ ref = binder_get_ref(proc, fp->handle,
879 ++ fp->type == BINDER_TYPE_HANDLE);
880 +
881 + if (ref == NULL) {
882 + pr_err("transaction release %d bad handle %d\n",
883 +@@ -1380,7 +1387,7 @@ static void binder_transaction(struct binder_proc *proc,
884 + if (tr->target.handle) {
885 + struct binder_ref *ref;
886 +
887 +- ref = binder_get_ref(proc, tr->target.handle);
888 ++ ref = binder_get_ref(proc, tr->target.handle, true);
889 + if (ref == NULL) {
890 + binder_user_error("%d:%d got transaction to invalid handle\n",
891 + proc->pid, thread->pid);
892 +@@ -1571,7 +1578,9 @@ static void binder_transaction(struct binder_proc *proc,
893 + fp->type = BINDER_TYPE_HANDLE;
894 + else
895 + fp->type = BINDER_TYPE_WEAK_HANDLE;
896 ++ fp->binder = 0;
897 + fp->handle = ref->desc;
898 ++ fp->cookie = 0;
899 + binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
900 + &thread->todo);
901 +
902 +@@ -1583,7 +1592,10 @@ static void binder_transaction(struct binder_proc *proc,
903 + } break;
904 + case BINDER_TYPE_HANDLE:
905 + case BINDER_TYPE_WEAK_HANDLE: {
906 +- struct binder_ref *ref = binder_get_ref(proc, fp->handle);
907 ++ struct binder_ref *ref;
908 ++
909 ++ ref = binder_get_ref(proc, fp->handle,
910 ++ fp->type == BINDER_TYPE_HANDLE);
911 +
912 + if (ref == NULL) {
913 + binder_user_error("%d:%d got transaction with invalid handle, %d\n",
914 +@@ -1618,7 +1630,9 @@ static void binder_transaction(struct binder_proc *proc,
915 + return_error = BR_FAILED_REPLY;
916 + goto err_binder_get_ref_for_node_failed;
917 + }
918 ++ fp->binder = 0;
919 + fp->handle = new_ref->desc;
920 ++ fp->cookie = 0;
921 + binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
922 + trace_binder_transaction_ref_to_ref(t, ref,
923 + new_ref);
924 +@@ -1672,6 +1686,7 @@ static void binder_transaction(struct binder_proc *proc,
925 + binder_debug(BINDER_DEBUG_TRANSACTION,
926 + " fd %d -> %d\n", fp->handle, target_fd);
927 + /* TODO: fput? */
928 ++ fp->binder = 0;
929 + fp->handle = target_fd;
930 + } break;
931 +
932 +@@ -1794,7 +1809,9 @@ static int binder_thread_write(struct binder_proc *proc,
933 + ref->desc);
934 + }
935 + } else
936 +- ref = binder_get_ref(proc, target);
937 ++ ref = binder_get_ref(proc, target,
938 ++ cmd == BC_ACQUIRE ||
939 ++ cmd == BC_RELEASE);
940 + if (ref == NULL) {
941 + binder_user_error("%d:%d refcount change on invalid ref %d\n",
942 + proc->pid, thread->pid, target);
943 +@@ -1990,7 +2007,7 @@ static int binder_thread_write(struct binder_proc *proc,
944 + if (get_user(cookie, (binder_uintptr_t __user *)ptr))
945 + return -EFAULT;
946 + ptr += sizeof(binder_uintptr_t);
947 +- ref = binder_get_ref(proc, target);
948 ++ ref = binder_get_ref(proc, target, false);
949 + if (ref == NULL) {
950 + binder_user_error("%d:%d %s invalid ref %d\n",
951 + proc->pid, thread->pid,
952 +diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
953 +index 50754d203310..8cc67132d55d 100644
954 +--- a/drivers/char/virtio_console.c
955 ++++ b/drivers/char/virtio_console.c
956 +@@ -1533,19 +1533,29 @@ static void remove_port_data(struct port *port)
957 + spin_lock_irq(&port->inbuf_lock);
958 + /* Remove unused data this port might have received. */
959 + discard_port_data(port);
960 ++ spin_unlock_irq(&port->inbuf_lock);
961 +
962 + /* Remove buffers we queued up for the Host to send us data in. */
963 +- while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
964 +- free_buf(buf, true);
965 +- spin_unlock_irq(&port->inbuf_lock);
966 ++ do {
967 ++ spin_lock_irq(&port->inbuf_lock);
968 ++ buf = virtqueue_detach_unused_buf(port->in_vq);
969 ++ spin_unlock_irq(&port->inbuf_lock);
970 ++ if (buf)
971 ++ free_buf(buf, true);
972 ++ } while (buf);
973 +
974 + spin_lock_irq(&port->outvq_lock);
975 + reclaim_consumed_buffers(port);
976 ++ spin_unlock_irq(&port->outvq_lock);
977 +
978 + /* Free pending buffers from the out-queue. */
979 +- while ((buf = virtqueue_detach_unused_buf(port->out_vq)))
980 +- free_buf(buf, true);
981 +- spin_unlock_irq(&port->outvq_lock);
982 ++ do {
983 ++ spin_lock_irq(&port->outvq_lock);
984 ++ buf = virtqueue_detach_unused_buf(port->out_vq);
985 ++ spin_unlock_irq(&port->outvq_lock);
986 ++ if (buf)
987 ++ free_buf(buf, true);
988 ++ } while (buf);
989 + }
990 +
991 + /*
992 +diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
993 +index f4ea80d602f7..b9d2f76a0cf7 100644
994 +--- a/drivers/firewire/net.c
995 ++++ b/drivers/firewire/net.c
996 +@@ -73,13 +73,13 @@ struct rfc2734_header {
997 +
998 + #define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30)
999 + #define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff))
1000 +-#define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16)
1001 ++#define fwnet_get_hdr_dg_size(h) ((((h)->w0 & 0x0fff0000) >> 16) + 1)
1002 + #define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff))
1003 + #define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16)
1004 +
1005 +-#define fwnet_set_hdr_lf(lf) ((lf) << 30)
1006 ++#define fwnet_set_hdr_lf(lf) ((lf) << 30)
1007 + #define fwnet_set_hdr_ether_type(et) (et)
1008 +-#define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16)
1009 ++#define fwnet_set_hdr_dg_size(dgs) (((dgs) - 1) << 16)
1010 + #define fwnet_set_hdr_fg_off(fgo) (fgo)
1011 +
1012 + #define fwnet_set_hdr_dgl(dgl) ((dgl) << 16)
1013 +@@ -578,6 +578,9 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
1014 + int retval;
1015 + u16 ether_type;
1016 +
1017 ++ if (len <= RFC2374_UNFRAG_HDR_SIZE)
1018 ++ return 0;
1019 ++
1020 + hdr.w0 = be32_to_cpu(buf[0]);
1021 + lf = fwnet_get_hdr_lf(&hdr);
1022 + if (lf == RFC2374_HDR_UNFRAG) {
1023 +@@ -602,7 +605,12 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
1024 + return fwnet_finish_incoming_packet(net, skb, source_node_id,
1025 + is_broadcast, ether_type);
1026 + }
1027 ++
1028 + /* A datagram fragment has been received, now the fun begins. */
1029 ++
1030 ++ if (len <= RFC2374_FRAG_HDR_SIZE)
1031 ++ return 0;
1032 ++
1033 + hdr.w1 = ntohl(buf[1]);
1034 + buf += 2;
1035 + len -= RFC2374_FRAG_HDR_SIZE;
1036 +@@ -614,7 +622,10 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
1037 + fg_off = fwnet_get_hdr_fg_off(&hdr);
1038 + }
1039 + datagram_label = fwnet_get_hdr_dgl(&hdr);
1040 +- dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */
1041 ++ dg_size = fwnet_get_hdr_dg_size(&hdr);
1042 ++
1043 ++ if (fg_off + len > dg_size)
1044 ++ return 0;
1045 +
1046 + spin_lock_irqsave(&dev->lock, flags);
1047 +
1048 +@@ -722,6 +733,22 @@ static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
1049 + fw_send_response(card, r, rcode);
1050 + }
1051 +
1052 ++static int gasp_source_id(__be32 *p)
1053 ++{
1054 ++ return be32_to_cpu(p[0]) >> 16;
1055 ++}
1056 ++
1057 ++static u32 gasp_specifier_id(__be32 *p)
1058 ++{
1059 ++ return (be32_to_cpu(p[0]) & 0xffff) << 8 |
1060 ++ (be32_to_cpu(p[1]) & 0xff000000) >> 24;
1061 ++}
1062 ++
1063 ++static u32 gasp_version(__be32 *p)
1064 ++{
1065 ++ return be32_to_cpu(p[1]) & 0xffffff;
1066 ++}
1067 ++
1068 + static void fwnet_receive_broadcast(struct fw_iso_context *context,
1069 + u32 cycle, size_t header_length, void *header, void *data)
1070 + {
1071 +@@ -731,9 +758,6 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
1072 + __be32 *buf_ptr;
1073 + int retval;
1074 + u32 length;
1075 +- u16 source_node_id;
1076 +- u32 specifier_id;
1077 +- u32 ver;
1078 + unsigned long offset;
1079 + unsigned long flags;
1080 +
1081 +@@ -750,22 +774,17 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
1082 +
1083 + spin_unlock_irqrestore(&dev->lock, flags);
1084 +
1085 +- specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8
1086 +- | (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24;
1087 +- ver = be32_to_cpu(buf_ptr[1]) & 0xffffff;
1088 +- source_node_id = be32_to_cpu(buf_ptr[0]) >> 16;
1089 +-
1090 +- if (specifier_id == IANA_SPECIFIER_ID &&
1091 +- (ver == RFC2734_SW_VERSION
1092 ++ if (length > IEEE1394_GASP_HDR_SIZE &&
1093 ++ gasp_specifier_id(buf_ptr) == IANA_SPECIFIER_ID &&
1094 ++ (gasp_version(buf_ptr) == RFC2734_SW_VERSION
1095 + #if IS_ENABLED(CONFIG_IPV6)
1096 +- || ver == RFC3146_SW_VERSION
1097 ++ || gasp_version(buf_ptr) == RFC3146_SW_VERSION
1098 + #endif
1099 +- )) {
1100 +- buf_ptr += 2;
1101 +- length -= IEEE1394_GASP_HDR_SIZE;
1102 +- fwnet_incoming_packet(dev, buf_ptr, length, source_node_id,
1103 ++ ))
1104 ++ fwnet_incoming_packet(dev, buf_ptr + 2,
1105 ++ length - IEEE1394_GASP_HDR_SIZE,
1106 ++ gasp_source_id(buf_ptr),
1107 + context->card->generation, true);
1108 +- }
1109 +
1110 + packet.payload_length = dev->rcv_buffer_size;
1111 + packet.interrupt = 1;
1112 +diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
1113 +index 52dea773bb1b..ef773bf58a25 100644
1114 +--- a/drivers/gpu/drm/drm_dp_mst_topology.c
1115 ++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
1116 +@@ -909,6 +909,7 @@ static void drm_dp_destroy_port(struct kref *kref)
1117 + /* no need to clean up vcpi
1118 + * as if we have no connector we never setup a vcpi */
1119 + drm_dp_port_teardown_pdt(port, port->pdt);
1120 ++ port->pdt = DP_PEER_DEVICE_NONE;
1121 + }
1122 + kfree(port);
1123 + }
1124 +@@ -1155,7 +1156,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1125 + drm_dp_put_port(port);
1126 + goto out;
1127 + }
1128 +- if (port->port_num >= 8) {
1129 ++ if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
1130 ++ port->pdt == DP_PEER_DEVICE_SST_SINK) &&
1131 ++ port->port_num >= DP_MST_LOGICAL_PORT_0) {
1132 + port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
1133 + }
1134 + }
1135 +@@ -2860,6 +2863,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
1136 + mgr->cbs->destroy_connector(mgr, port->connector);
1137 +
1138 + drm_dp_port_teardown_pdt(port, port->pdt);
1139 ++ port->pdt = DP_PEER_DEVICE_NONE;
1140 +
1141 + if (!port->input && port->vcpi.vcpi > 0) {
1142 + drm_dp_mst_reset_vcpi_slots(mgr, port);
1143 +diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
1144 +index 64d3a771920d..fc305ee22471 100644
1145 +--- a/drivers/gpu/drm/radeon/ni.c
1146 ++++ b/drivers/gpu/drm/radeon/ni.c
1147 +@@ -1371,9 +1371,7 @@ static void cayman_pcie_gart_fini(struct radeon_device *rdev)
1148 + void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
1149 + int ring, u32 cp_int_cntl)
1150 + {
1151 +- u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
1152 +-
1153 +- WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
1154 ++ WREG32(SRBM_GFX_CNTL, RINGID(ring));
1155 + WREG32(CP_INT_CNTL, cp_int_cntl);
1156 + }
1157 +
1158 +diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
1159 +index fa2154493cf1..470af4aa4a6a 100644
1160 +--- a/drivers/gpu/drm/radeon/r600_dpm.c
1161 ++++ b/drivers/gpu/drm/radeon/r600_dpm.c
1162 +@@ -156,19 +156,20 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
1163 + struct drm_device *dev = rdev->ddev;
1164 + struct drm_crtc *crtc;
1165 + struct radeon_crtc *radeon_crtc;
1166 +- u32 line_time_us, vblank_lines;
1167 ++ u32 vblank_in_pixels;
1168 + u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
1169 +
1170 + if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1171 + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1172 + radeon_crtc = to_radeon_crtc(crtc);
1173 + if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
1174 +- line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) /
1175 +- radeon_crtc->hw_mode.clock;
1176 +- vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end -
1177 +- radeon_crtc->hw_mode.crtc_vdisplay +
1178 +- (radeon_crtc->v_border * 2);
1179 +- vblank_time_us = vblank_lines * line_time_us;
1180 ++ vblank_in_pixels =
1181 ++ radeon_crtc->hw_mode.crtc_htotal *
1182 ++ (radeon_crtc->hw_mode.crtc_vblank_end -
1183 ++ radeon_crtc->hw_mode.crtc_vdisplay +
1184 ++ (radeon_crtc->v_border * 2));
1185 ++
1186 ++ vblank_time_us = vblank_in_pixels * 1000 / radeon_crtc->hw_mode.clock;
1187 + break;
1188 + }
1189 + }
1190 +diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
1191 +index db64e0062689..3b0c229d7dcd 100644
1192 +--- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
1193 ++++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
1194 +@@ -105,7 +105,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
1195 +
1196 + tmp &= AUX_HPD_SEL(0x7);
1197 + tmp |= AUX_HPD_SEL(chan->rec.hpd);
1198 +- tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1);
1199 ++ tmp |= AUX_EN | AUX_LS_READ_EN;
1200 +
1201 + WREG32(AUX_CONTROL + aux_offset[instance], tmp);
1202 +
1203 +diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
1204 +index f666277a8993..a808ba001ee7 100644
1205 +--- a/drivers/gpu/drm/radeon/si_dpm.c
1206 ++++ b/drivers/gpu/drm/radeon/si_dpm.c
1207 +@@ -2948,6 +2948,49 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
1208 + int i;
1209 + struct si_dpm_quirk *p = si_dpm_quirk_list;
1210 +
1211 ++ /* limit all SI kickers */
1212 ++ if (rdev->family == CHIP_PITCAIRN) {
1213 ++ if ((rdev->pdev->revision == 0x81) ||
1214 ++ (rdev->pdev->device == 0x6810) ||
1215 ++ (rdev->pdev->device == 0x6811) ||
1216 ++ (rdev->pdev->device == 0x6816) ||
1217 ++ (rdev->pdev->device == 0x6817) ||
1218 ++ (rdev->pdev->device == 0x6806))
1219 ++ max_mclk = 120000;
1220 ++ } else if (rdev->family == CHIP_VERDE) {
1221 ++ if ((rdev->pdev->revision == 0x81) ||
1222 ++ (rdev->pdev->revision == 0x83) ||
1223 ++ (rdev->pdev->revision == 0x87) ||
1224 ++ (rdev->pdev->device == 0x6820) ||
1225 ++ (rdev->pdev->device == 0x6821) ||
1226 ++ (rdev->pdev->device == 0x6822) ||
1227 ++ (rdev->pdev->device == 0x6823) ||
1228 ++ (rdev->pdev->device == 0x682A) ||
1229 ++ (rdev->pdev->device == 0x682B)) {
1230 ++ max_sclk = 75000;
1231 ++ max_mclk = 80000;
1232 ++ }
1233 ++ } else if (rdev->family == CHIP_OLAND) {
1234 ++ if ((rdev->pdev->revision == 0xC7) ||
1235 ++ (rdev->pdev->revision == 0x80) ||
1236 ++ (rdev->pdev->revision == 0x81) ||
1237 ++ (rdev->pdev->revision == 0x83) ||
1238 ++ (rdev->pdev->device == 0x6604) ||
1239 ++ (rdev->pdev->device == 0x6605)) {
1240 ++ max_sclk = 75000;
1241 ++ max_mclk = 80000;
1242 ++ }
1243 ++ } else if (rdev->family == CHIP_HAINAN) {
1244 ++ if ((rdev->pdev->revision == 0x81) ||
1245 ++ (rdev->pdev->revision == 0x83) ||
1246 ++ (rdev->pdev->revision == 0xC3) ||
1247 ++ (rdev->pdev->device == 0x6664) ||
1248 ++ (rdev->pdev->device == 0x6665) ||
1249 ++ (rdev->pdev->device == 0x6667)) {
1250 ++ max_sclk = 75000;
1251 ++ max_mclk = 80000;
1252 ++ }
1253 ++ }
1254 + /* Apply dpm quirks */
1255 + while (p && p->chip_device != 0) {
1256 + if (rdev->pdev->vendor == p->chip_vendor &&
1257 +@@ -2960,10 +3003,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
1258 + }
1259 + ++p;
1260 + }
1261 +- /* limit mclk on all R7 370 parts for stability */
1262 +- if (rdev->pdev->device == 0x6811 &&
1263 +- rdev->pdev->revision == 0x81)
1264 +- max_mclk = 120000;
1265 +
1266 + if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
1267 + ni_dpm_vblank_too_short(rdev))
1268 +diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
1269 +index 7994ec2e4151..41f5896224bd 100644
1270 +--- a/drivers/hv/hv_util.c
1271 ++++ b/drivers/hv/hv_util.c
1272 +@@ -283,10 +283,14 @@ static void heartbeat_onchannelcallback(void *context)
1273 + u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
1274 + struct icmsg_negotiate *negop = NULL;
1275 +
1276 +- vmbus_recvpacket(channel, hbeat_txf_buf,
1277 +- PAGE_SIZE, &recvlen, &requestid);
1278 ++ while (1) {
1279 ++
1280 ++ vmbus_recvpacket(channel, hbeat_txf_buf,
1281 ++ PAGE_SIZE, &recvlen, &requestid);
1282 ++
1283 ++ if (!recvlen)
1284 ++ break;
1285 +
1286 +- if (recvlen > 0) {
1287 + icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
1288 + sizeof(struct vmbuspipe_hdr)];
1289 +
1290 +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
1291 +index 68f5f4a0f1e7..418701947800 100644
1292 +--- a/drivers/input/serio/i8042-x86ia64io.h
1293 ++++ b/drivers/input/serio/i8042-x86ia64io.h
1294 +@@ -793,6 +793,13 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
1295 + DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
1296 + },
1297 + },
1298 ++ {
1299 ++ /* Schenker XMG C504 - Elantech touchpad */
1300 ++ .matches = {
1301 ++ DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
1302 ++ DMI_MATCH(DMI_PRODUCT_NAME, "C504"),
1303 ++ },
1304 ++ },
1305 + { }
1306 + };
1307 +
1308 +diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
1309 +index 1dbae580e8ca..f1b15a0b3774 100644
1310 +--- a/drivers/irqchip/irq-gic-v3.c
1311 ++++ b/drivers/irqchip/irq-gic-v3.c
1312 +@@ -180,7 +180,7 @@ static void gic_enable_redist(bool enable)
1313 + return; /* No PM support in this redistributor */
1314 + }
1315 +
1316 +- while (count--) {
1317 ++ while (--count) {
1318 + val = readl_relaxed(rbase + GICR_WAKER);
1319 + if (enable ^ (val & GICR_WAKER_ChildrenAsleep))
1320 + break;
1321 +diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
1322 +index e411ccba0af6..94533bdcbef2 100644
1323 +--- a/drivers/md/dm-table.c
1324 ++++ b/drivers/md/dm-table.c
1325 +@@ -700,37 +700,32 @@ int dm_table_add_target(struct dm_table *t, const char *type,
1326 +
1327 + tgt->type = dm_get_target_type(type);
1328 + if (!tgt->type) {
1329 +- DMERR("%s: %s: unknown target type", dm_device_name(t->md),
1330 +- type);
1331 ++ DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
1332 + return -EINVAL;
1333 + }
1334 +
1335 + if (dm_target_needs_singleton(tgt->type)) {
1336 + if (t->num_targets) {
1337 +- DMERR("%s: target type %s must appear alone in table",
1338 +- dm_device_name(t->md), type);
1339 +- return -EINVAL;
1340 ++ tgt->error = "singleton target type must appear alone in table";
1341 ++ goto bad;
1342 + }
1343 + t->singleton = 1;
1344 + }
1345 +
1346 + if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
1347 +- DMERR("%s: target type %s may not be included in read-only tables",
1348 +- dm_device_name(t->md), type);
1349 +- return -EINVAL;
1350 ++ tgt->error = "target type may not be included in a read-only table";
1351 ++ goto bad;
1352 + }
1353 +
1354 + if (t->immutable_target_type) {
1355 + if (t->immutable_target_type != tgt->type) {
1356 +- DMERR("%s: immutable target type %s cannot be mixed with other target types",
1357 +- dm_device_name(t->md), t->immutable_target_type->name);
1358 +- return -EINVAL;
1359 ++ tgt->error = "immutable target type cannot be mixed with other target types";
1360 ++ goto bad;
1361 + }
1362 + } else if (dm_target_is_immutable(tgt->type)) {
1363 + if (t->num_targets) {
1364 +- DMERR("%s: immutable target type %s cannot be mixed with other target types",
1365 +- dm_device_name(t->md), tgt->type->name);
1366 +- return -EINVAL;
1367 ++ tgt->error = "immutable target type cannot be mixed with other target types";
1368 ++ goto bad;
1369 + }
1370 + t->immutable_target_type = tgt->type;
1371 + }
1372 +@@ -745,7 +740,6 @@ int dm_table_add_target(struct dm_table *t, const char *type,
1373 + */
1374 + if (!adjoin(t, tgt)) {
1375 + tgt->error = "Gap in table";
1376 +- r = -EINVAL;
1377 + goto bad;
1378 + }
1379 +
1380 +diff --git a/drivers/md/md.c b/drivers/md/md.c
1381 +index 72dc91de80f8..7453c3ed4b8f 100644
1382 +--- a/drivers/md/md.c
1383 ++++ b/drivers/md/md.c
1384 +@@ -7847,6 +7847,9 @@ void md_do_sync(struct md_thread *thread)
1385 + break;
1386 +
1387 + j += sectors;
1388 ++ if (j > max_sectors)
1389 ++ /* when skipping, extra large numbers can be returned. */
1390 ++ j = max_sectors;
1391 + if (j > 2)
1392 + mddev->curr_resync = j;
1393 + if (mddev_is_clustered(mddev))
1394 +@@ -7915,6 +7918,12 @@ void md_do_sync(struct md_thread *thread)
1395 + blk_finish_plug(&plug);
1396 + wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
1397 +
1398 ++ if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1399 ++ !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
1400 ++ mddev->curr_resync > 3) {
1401 ++ mddev->curr_resync_completed = mddev->curr_resync;
1402 ++ sysfs_notify(&mddev->kobj, NULL, "sync_completed");
1403 ++ }
1404 + /* tell personality that we are finished */
1405 + mddev->pers->sync_request(mddev, max_sectors, &skipped);
1406 +
1407 +@@ -7922,7 +7931,7 @@ void md_do_sync(struct md_thread *thread)
1408 + md_cluster_ops->resync_finish(mddev);
1409 +
1410 + if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
1411 +- mddev->curr_resync > 2) {
1412 ++ mddev->curr_resync > 3) {
1413 + if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
1414 + if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
1415 + if (mddev->curr_resync >= mddev->recovery_cp) {
1416 +diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c
1417 +index 1105db2355d2..83bfb1659abe 100644
1418 +--- a/drivers/memstick/host/rtsx_usb_ms.c
1419 ++++ b/drivers/memstick/host/rtsx_usb_ms.c
1420 +@@ -524,6 +524,7 @@ static void rtsx_usb_ms_handle_req(struct work_struct *work)
1421 + int rc;
1422 +
1423 + if (!host->req) {
1424 ++ pm_runtime_get_sync(ms_dev(host));
1425 + do {
1426 + rc = memstick_next_req(msh, &host->req);
1427 + dev_dbg(ms_dev(host), "next req %d\n", rc);
1428 +@@ -544,6 +545,7 @@ static void rtsx_usb_ms_handle_req(struct work_struct *work)
1429 + host->req->error);
1430 + }
1431 + } while (!rc);
1432 ++ pm_runtime_put(ms_dev(host));
1433 + }
1434 +
1435 + }
1436 +@@ -570,6 +572,7 @@ static int rtsx_usb_ms_set_param(struct memstick_host *msh,
1437 + dev_dbg(ms_dev(host), "%s: param = %d, value = %d\n",
1438 + __func__, param, value);
1439 +
1440 ++ pm_runtime_get_sync(ms_dev(host));
1441 + mutex_lock(&ucr->dev_mutex);
1442 +
1443 + err = rtsx_usb_card_exclusive_check(ucr, RTSX_USB_MS_CARD);
1444 +@@ -635,6 +638,7 @@ static int rtsx_usb_ms_set_param(struct memstick_host *msh,
1445 + }
1446 + out:
1447 + mutex_unlock(&ucr->dev_mutex);
1448 ++ pm_runtime_put(ms_dev(host));
1449 +
1450 + /* power-on delay */
1451 + if (param == MEMSTICK_POWER && value == MEMSTICK_POWER_ON)
1452 +@@ -681,6 +685,7 @@ static int rtsx_usb_detect_ms_card(void *__host)
1453 + int err;
1454 +
1455 + for (;;) {
1456 ++ pm_runtime_get_sync(ms_dev(host));
1457 + mutex_lock(&ucr->dev_mutex);
1458 +
1459 + /* Check pending MS card changes */
1460 +@@ -703,6 +708,7 @@ static int rtsx_usb_detect_ms_card(void *__host)
1461 + }
1462 +
1463 + poll_again:
1464 ++ pm_runtime_put(ms_dev(host));
1465 + if (host->eject)
1466 + break;
1467 +
1468 +diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
1469 +index 1ca94e6fa8fb..e9e6f7d61a71 100644
1470 +--- a/drivers/misc/genwqe/card_utils.c
1471 ++++ b/drivers/misc/genwqe/card_utils.c
1472 +@@ -351,17 +351,27 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
1473 + if (copy_from_user(sgl->lpage, user_addr + user_size -
1474 + sgl->lpage_size, sgl->lpage_size)) {
1475 + rc = -EFAULT;
1476 +- goto err_out1;
1477 ++ goto err_out2;
1478 + }
1479 + }
1480 + return 0;
1481 +
1482 ++ err_out2:
1483 ++ __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
1484 ++ sgl->lpage_dma_addr);
1485 ++ sgl->lpage = NULL;
1486 ++ sgl->lpage_dma_addr = 0;
1487 + err_out1:
1488 + __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
1489 + sgl->fpage_dma_addr);
1490 ++ sgl->fpage = NULL;
1491 ++ sgl->fpage_dma_addr = 0;
1492 + err_out:
1493 + __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
1494 + sgl->sgl_dma_addr);
1495 ++ sgl->sgl = NULL;
1496 ++ sgl->sgl_dma_addr = 0;
1497 ++ sgl->sgl_size = 0;
1498 + return -ENOMEM;
1499 + }
1500 +
1501 +diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
1502 +index bae680c648ff..396d75d9fb11 100644
1503 +--- a/drivers/misc/mei/hw-txe.c
1504 ++++ b/drivers/misc/mei/hw-txe.c
1505 +@@ -972,11 +972,13 @@ static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack)
1506 + hisr = mei_txe_br_reg_read(hw, HISR_REG);
1507 +
1508 + aliveness = mei_txe_aliveness_get(dev);
1509 +- if (hhisr & IPC_HHIER_SEC && aliveness)
1510 ++ if (hhisr & IPC_HHIER_SEC && aliveness) {
1511 + ipc_isr = mei_txe_sec_reg_read_silent(hw,
1512 + SEC_IPC_HOST_INT_STATUS_REG);
1513 +- else
1514 ++ } else {
1515 + ipc_isr = 0;
1516 ++ hhisr &= ~IPC_HHIER_SEC;
1517 ++ }
1518 +
1519 + generated = generated ||
1520 + (hisr & HISR_INT_STS_MSK) ||
1521 +diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
1522 +index ccefd6ca9c99..25939928d8fe 100644
1523 +--- a/drivers/mmc/card/block.c
1524 ++++ b/drivers/mmc/card/block.c
1525 +@@ -1652,7 +1652,7 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
1526 + struct mmc_blk_data *md = mq->data;
1527 + struct mmc_packed *packed = mqrq->packed;
1528 + bool do_rel_wr, do_data_tag;
1529 +- u32 *packed_cmd_hdr;
1530 ++ __le32 *packed_cmd_hdr;
1531 + u8 hdr_blocks;
1532 + u8 i = 1;
1533 +
1534 +diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
1535 +index 99e6521e6169..f42c11293dd8 100644
1536 +--- a/drivers/mmc/card/queue.h
1537 ++++ b/drivers/mmc/card/queue.h
1538 +@@ -24,7 +24,7 @@ enum mmc_packed_type {
1539 +
1540 + struct mmc_packed {
1541 + struct list_head list;
1542 +- u32 cmd_hdr[1024];
1543 ++ __le32 cmd_hdr[1024];
1544 + unsigned int blocks;
1545 + u8 nr_entries;
1546 + u8 retries;
1547 +diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
1548 +index 88af827e086b..a9e97a138f3d 100644
1549 +--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
1550 ++++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
1551 +@@ -1138,11 +1138,6 @@ static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1552 + dev_dbg(sdmmc_dev(host), "%s\n", __func__);
1553 + mutex_lock(&ucr->dev_mutex);
1554 +
1555 +- if (rtsx_usb_card_exclusive_check(ucr, RTSX_USB_SD_CARD)) {
1556 +- mutex_unlock(&ucr->dev_mutex);
1557 +- return;
1558 +- }
1559 +-
1560 + sd_set_power_mode(host, ios->power_mode);
1561 + sd_set_bus_width(host, ios->bus_width);
1562 + sd_set_timing(host, ios->timing, &host->ddr_mode);
1563 +@@ -1314,6 +1309,7 @@ static void rtsx_usb_update_led(struct work_struct *work)
1564 + container_of(work, struct rtsx_usb_sdmmc, led_work);
1565 + struct rtsx_ucr *ucr = host->ucr;
1566 +
1567 ++ pm_runtime_get_sync(sdmmc_dev(host));
1568 + mutex_lock(&ucr->dev_mutex);
1569 +
1570 + if (host->led.brightness == LED_OFF)
1571 +@@ -1322,6 +1318,7 @@ static void rtsx_usb_update_led(struct work_struct *work)
1572 + rtsx_usb_turn_on_led(ucr);
1573 +
1574 + mutex_unlock(&ucr->dev_mutex);
1575 ++ pm_runtime_put(sdmmc_dev(host));
1576 + }
1577 + #endif
1578 +
1579 +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
1580 +index c60dde917e49..5eb23ae82def 100644
1581 +--- a/drivers/mmc/host/sdhci.c
1582 ++++ b/drivers/mmc/host/sdhci.c
1583 +@@ -690,7 +690,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1584 + * host->clock is in Hz. target_timeout is in us.
1585 + * Hence, us = 1000000 * cycles / Hz. Round up.
1586 + */
1587 +- val = 1000000 * data->timeout_clks;
1588 ++ val = 1000000ULL * data->timeout_clks;
1589 + if (do_div(val, host->clock))
1590 + target_timeout++;
1591 + target_timeout += val;
1592 +diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
1593 +index 914c39f9f388..2926295a936d 100644
1594 +--- a/drivers/scsi/arcmsr/arcmsr_hba.c
1595 ++++ b/drivers/scsi/arcmsr/arcmsr_hba.c
1596 +@@ -2540,18 +2540,9 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
1597 + struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
1598 + struct CommandControlBlock *ccb;
1599 + int target = cmd->device->id;
1600 +- int lun = cmd->device->lun;
1601 +- uint8_t scsicmd = cmd->cmnd[0];
1602 + cmd->scsi_done = done;
1603 + cmd->host_scribble = NULL;
1604 + cmd->result = 0;
1605 +- if ((scsicmd == SYNCHRONIZE_CACHE) ||(scsicmd == SEND_DIAGNOSTIC)){
1606 +- if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
1607 +- cmd->result = (DID_NO_CONNECT << 16);
1608 +- }
1609 +- cmd->scsi_done(cmd);
1610 +- return 0;
1611 +- }
1612 + if (target == 16) {
1613 + /* virtual device for iop message transfer */
1614 + arcmsr_handle_virtual_command(acb, cmd);
1615 +diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
1616 +index 7a1c4b4e764b..a991690167aa 100644
1617 +--- a/drivers/scsi/megaraid/megaraid_sas_base.c
1618 ++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
1619 +@@ -1622,16 +1622,13 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1620 + goto out_done;
1621 + }
1622 +
1623 +- switch (scmd->cmnd[0]) {
1624 +- case SYNCHRONIZE_CACHE:
1625 +- /*
1626 +- * FW takes care of flush cache on its own
1627 +- * No need to send it down
1628 +- */
1629 ++ /*
1630 ++ * FW takes care of flush cache on its own for Virtual Disk.
1631 ++ * No need to send it down for VD. For JBOD send SYNCHRONIZE_CACHE to FW.
1632 ++ */
1633 ++ if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd)) {
1634 + scmd->result = DID_OK << 16;
1635 + goto out_done;
1636 +- default:
1637 +- break;
1638 + }
1639 +
1640 + if (instance->instancet->build_and_issue_cmd(instance, scmd)) {
1641 +diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
1642 +index 1f8e2dc9c616..c07d1cd28e91 100644
1643 +--- a/drivers/scsi/scsi_debug.c
1644 ++++ b/drivers/scsi/scsi_debug.c
1645 +@@ -4993,6 +4993,7 @@ static void __exit scsi_debug_exit(void)
1646 + if (dif_storep)
1647 + vfree(dif_storep);
1648 +
1649 ++ vfree(map_storep);
1650 + vfree(fake_storep);
1651 + }
1652 +
1653 +diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
1654 +index 6efab1c455e1..f7aa434811b5 100644
1655 +--- a/drivers/scsi/scsi_scan.c
1656 ++++ b/drivers/scsi/scsi_scan.c
1657 +@@ -1516,12 +1516,12 @@ retry:
1658 + out_err:
1659 + kfree(lun_data);
1660 + out:
1661 +- scsi_device_put(sdev);
1662 + if (scsi_device_created(sdev))
1663 + /*
1664 + * the sdev we used didn't appear in the report luns scan
1665 + */
1666 + __scsi_remove_device(sdev);
1667 ++ scsi_device_put(sdev);
1668 + return ret;
1669 + }
1670 +
1671 +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
1672 +index 7580abe7cb45..1cf3c0819b81 100644
1673 +--- a/drivers/target/target_core_transport.c
1674 ++++ b/drivers/target/target_core_transport.c
1675 +@@ -2495,8 +2495,10 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
1676 + * fabric acknowledgement that requires two target_put_sess_cmd()
1677 + * invocations before se_cmd descriptor release.
1678 + */
1679 +- if (ack_kref)
1680 ++ if (ack_kref) {
1681 + kref_get(&se_cmd->cmd_kref);
1682 ++ se_cmd->se_cmd_flags |= SCF_ACK_KREF;
1683 ++ }
1684 +
1685 + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
1686 + if (se_sess->sess_tearing_down) {
1687 +diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
1688 +index 4609305a1591..ddb0d6bc45f2 100644
1689 +--- a/drivers/target/target_core_xcopy.c
1690 ++++ b/drivers/target/target_core_xcopy.c
1691 +@@ -675,6 +675,7 @@ static int target_xcopy_read_source(
1692 + rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
1693 + remote_port, true);
1694 + if (rc < 0) {
1695 ++ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
1696 + transport_generic_free_cmd(se_cmd, 0);
1697 + return rc;
1698 + }
1699 +@@ -686,6 +687,7 @@ static int target_xcopy_read_source(
1700 +
1701 + rc = target_xcopy_issue_pt_cmd(xpt_cmd);
1702 + if (rc < 0) {
1703 ++ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
1704 + transport_generic_free_cmd(se_cmd, 0);
1705 + return rc;
1706 + }
1707 +@@ -736,6 +738,7 @@ static int target_xcopy_write_destination(
1708 + remote_port, false);
1709 + if (rc < 0) {
1710 + struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd;
1711 ++ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
1712 + /*
1713 + * If the failure happened before the t_mem_list hand-off in
1714 + * target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that
1715 +@@ -751,6 +754,7 @@ static int target_xcopy_write_destination(
1716 +
1717 + rc = target_xcopy_issue_pt_cmd(xpt_cmd);
1718 + if (rc < 0) {
1719 ++ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
1720 + se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
1721 + transport_generic_free_cmd(se_cmd, 0);
1722 + return rc;
1723 +@@ -837,9 +841,14 @@ static void target_xcopy_do_work(struct work_struct *work)
1724 + out:
1725 + xcopy_pt_undepend_remotedev(xop);
1726 + kfree(xop);
1727 +-
1728 +- pr_warn("target_xcopy_do_work: Setting X-COPY CHECK_CONDITION -> sending response\n");
1729 +- ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
1730 ++ /*
1731 ++ * Don't override an error scsi status if it has already been set
1732 ++ */
1733 ++ if (ec_cmd->scsi_status == SAM_STAT_GOOD) {
1734 ++ pr_warn_ratelimited("target_xcopy_do_work: rc: %d, Setting X-COPY"
1735 ++ " CHECK_CONDITION -> sending response\n", rc);
1736 ++ ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
1737 ++ }
1738 + target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION);
1739 + }
1740 +
1741 +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
1742 +index ba86956ef4b5..2df90a54509a 100644
1743 +--- a/drivers/tty/vt/vt.c
1744 ++++ b/drivers/tty/vt/vt.c
1745 +@@ -865,10 +865,15 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
1746 + if (new_cols == vc->vc_cols && new_rows == vc->vc_rows)
1747 + return 0;
1748 +
1749 ++ if (new_screen_size > (4 << 20))
1750 ++ return -EINVAL;
1751 + newscreen = kmalloc(new_screen_size, GFP_USER);
1752 + if (!newscreen)
1753 + return -ENOMEM;
1754 +
1755 ++ if (vc == sel_cons)
1756 ++ clear_selection();
1757 ++
1758 + old_rows = vc->vc_rows;
1759 + old_row_size = vc->vc_size_row;
1760 +
1761 +diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
1762 +index f1fd777ef4ec..82e63f73bfd5 100644
1763 +--- a/drivers/usb/gadget/function/u_ether.c
1764 ++++ b/drivers/usb/gadget/function/u_ether.c
1765 +@@ -591,8 +591,9 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
1766 +
1767 + /* throttle high/super speed IRQ rate back slightly */
1768 + if (gadget_is_dualspeed(dev->gadget))
1769 +- req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
1770 +- dev->gadget->speed == USB_SPEED_SUPER)
1771 ++ req->no_interrupt = (((dev->gadget->speed == USB_SPEED_HIGH ||
1772 ++ dev->gadget->speed == USB_SPEED_SUPER)) &&
1773 ++ !list_empty(&dev->tx_reqs))
1774 + ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0)
1775 + : 0;
1776 +
1777 +diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
1778 +index 1dab9dfbca6a..c5de2e24c9e7 100644
1779 +--- a/drivers/usb/host/ohci-hcd.c
1780 ++++ b/drivers/usb/host/ohci-hcd.c
1781 +@@ -72,7 +72,7 @@
1782 + static const char hcd_name [] = "ohci_hcd";
1783 +
1784 + #define STATECHANGE_DELAY msecs_to_jiffies(300)
1785 +-#define IO_WATCHDOG_DELAY msecs_to_jiffies(250)
1786 ++#define IO_WATCHDOG_DELAY msecs_to_jiffies(275)
1787 +
1788 + #include "ohci.h"
1789 + #include "pci-quirks.h"
1790 +diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
1791 +index 9d781d3ccc09..2dd322e92951 100644
1792 +--- a/drivers/usb/host/xhci-hub.c
1793 ++++ b/drivers/usb/host/xhci-hub.c
1794 +@@ -1224,6 +1224,35 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
1795 + return 0;
1796 + }
1797 +
1798 ++/*
1799 ++ * Workaround for missing Cold Attach Status (CAS) if device re-plugged in S3.
1800 ++ * warm reset a USB3 device stuck in polling or compliance mode after resume.
1801 ++ * See Intel 100/c230 series PCH specification update Doc #332692-006 Errata #8
1802 ++ */
1803 ++static bool xhci_port_missing_cas_quirk(int port_index,
1804 ++ __le32 __iomem **port_array)
1805 ++{
1806 ++ u32 portsc;
1807 ++
1808 ++ portsc = readl(port_array[port_index]);
1809 ++
1810 ++ /* if any of these are set we are not stuck */
1811 ++ if (portsc & (PORT_CONNECT | PORT_CAS))
1812 ++ return false;
1813 ++
1814 ++ if (((portsc & PORT_PLS_MASK) != XDEV_POLLING) &&
1815 ++ ((portsc & PORT_PLS_MASK) != XDEV_COMP_MODE))
1816 ++ return false;
1817 ++
1818 ++ /* clear wakeup/change bits, and do a warm port reset */
1819 ++ portsc &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
1820 ++ portsc |= PORT_WR;
1821 ++ writel(portsc, port_array[port_index]);
1822 ++ /* flush write */
1823 ++ readl(port_array[port_index]);
1824 ++ return true;
1825 ++}
1826 ++
1827 + int xhci_bus_resume(struct usb_hcd *hcd)
1828 + {
1829 + struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1830 +@@ -1258,6 +1287,14 @@ int xhci_bus_resume(struct usb_hcd *hcd)
1831 + int slot_id;
1832 +
1833 + temp = readl(port_array[port_index]);
1834 ++
1835 ++ /* warm reset CAS limited ports stuck in polling/compliance */
1836 ++ if ((xhci->quirks & XHCI_MISSING_CAS) &&
1837 ++ (hcd->speed >= HCD_USB3) &&
1838 ++ xhci_port_missing_cas_quirk(port_index, port_array)) {
1839 ++ xhci_dbg(xhci, "reset stuck port %d\n", port_index);
1840 ++ continue;
1841 ++ }
1842 + if (DEV_SUPERSPEED(temp))
1843 + temp &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
1844 + else
1845 +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
1846 +index 54caaf87c567..fc60a9e8a129 100644
1847 +--- a/drivers/usb/host/xhci-pci.c
1848 ++++ b/drivers/usb/host/xhci-pci.c
1849 +@@ -45,11 +45,13 @@
1850 +
1851 + #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
1852 + #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
1853 ++#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI 0x9cb1
1854 + #define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5
1855 + #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
1856 + #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
1857 + #define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
1858 + #define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
1859 ++#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8
1860 +
1861 + static const char hcd_name[] = "xhci_hcd";
1862 +
1863 +@@ -147,7 +149,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
1864 + xhci->quirks |= XHCI_SPURIOUS_REBOOT;
1865 + }
1866 + if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
1867 +- pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
1868 ++ (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI ||
1869 ++ pdev->device == PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI)) {
1870 + xhci->quirks |= XHCI_SPURIOUS_REBOOT;
1871 + xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
1872 + }
1873 +@@ -163,6 +166,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
1874 + pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
1875 + xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
1876 + }
1877 ++ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
1878 ++ (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
1879 ++ pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI))
1880 ++ xhci->quirks |= XHCI_MISSING_CAS;
1881 ++
1882 + if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
1883 + pdev->device == PCI_DEVICE_ID_EJ168) {
1884 + xhci->quirks |= XHCI_RESET_ON_RESUME;
1885 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
1886 +index c5d6963e9cbe..f33028642e31 100644
1887 +--- a/drivers/usb/host/xhci.h
1888 ++++ b/drivers/usb/host/xhci.h
1889 +@@ -286,6 +286,8 @@ struct xhci_op_regs {
1890 + #define XDEV_U2 (0x2 << 5)
1891 + #define XDEV_U3 (0x3 << 5)
1892 + #define XDEV_INACTIVE (0x6 << 5)
1893 ++#define XDEV_POLLING (0x7 << 5)
1894 ++#define XDEV_COMP_MODE (0xa << 5)
1895 + #define XDEV_RESUME (0xf << 5)
1896 + /* true: port has power (see HCC_PPC) */
1897 + #define PORT_POWER (1 << 9)
1898 +@@ -1573,6 +1575,7 @@ struct xhci_hcd {
1899 + #define XHCI_PME_STUCK_QUIRK (1 << 20)
1900 + #define XHCI_SSIC_PORT_UNUSED (1 << 22)
1901 + #define XHCI_NO_64BIT_SUPPORT (1 << 23)
1902 ++#define XHCI_MISSING_CAS (1 << 24)
1903 + unsigned int num_active_eps;
1904 + unsigned int limit_active_eps;
1905 + /* There are two roothubs to keep track of bus suspend info for */
1906 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
1907 +index f49e859ac5ce..6d4d8b828971 100644
1908 +--- a/drivers/usb/serial/cp210x.c
1909 ++++ b/drivers/usb/serial/cp210x.c
1910 +@@ -844,7 +844,9 @@ static int cp210x_tiocmget(struct tty_struct *tty)
1911 + unsigned int control;
1912 + int result;
1913 +
1914 +- cp210x_get_config(port, CP210X_GET_MDMSTS, &control, 1);
1915 ++ result = cp210x_get_config(port, CP210X_GET_MDMSTS, &control, 1);
1916 ++ if (result)
1917 ++ return result;
1918 +
1919 + result = ((control & CONTROL_DTR) ? TIOCM_DTR : 0)
1920 + |((control & CONTROL_RTS) ? TIOCM_RTS : 0)
1921 +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
1922 +index 8c48c9d83d48..494167fe6a2c 100644
1923 +--- a/drivers/usb/serial/ftdi_sio.c
1924 ++++ b/drivers/usb/serial/ftdi_sio.c
1925 +@@ -986,7 +986,8 @@ static const struct usb_device_id id_table_combined[] = {
1926 + /* ekey Devices */
1927 + { USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) },
1928 + /* Infineon Devices */
1929 +- { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
1930 ++ { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC1798_PID, 1) },
1931 ++ { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC2X7_PID, 1) },
1932 + /* GE Healthcare devices */
1933 + { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) },
1934 + /* Active Research (Actisense) devices */
1935 +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
1936 +index 48db84f25cc9..db1a9b3a5f38 100644
1937 +--- a/drivers/usb/serial/ftdi_sio_ids.h
1938 ++++ b/drivers/usb/serial/ftdi_sio_ids.h
1939 +@@ -626,8 +626,9 @@
1940 + /*
1941 + * Infineon Technologies
1942 + */
1943 +-#define INFINEON_VID 0x058b
1944 +-#define INFINEON_TRIBOARD_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
1945 ++#define INFINEON_VID 0x058b
1946 ++#define INFINEON_TRIBOARD_TC1798_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
1947 ++#define INFINEON_TRIBOARD_TC2X7_PID 0x0043 /* DAS JTAG TriBoard TC2X7 V1.0 */
1948 +
1949 + /*
1950 + * Acton Research Corp.
1951 +diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
1952 +index a0ca291bc07f..e7e29c797824 100644
1953 +--- a/drivers/usb/serial/usb-serial.c
1954 ++++ b/drivers/usb/serial/usb-serial.c
1955 +@@ -1077,7 +1077,8 @@ static int usb_serial_probe(struct usb_interface *interface,
1956 +
1957 + serial->disconnected = 0;
1958 +
1959 +- usb_serial_console_init(serial->port[0]->minor);
1960 ++ if (num_ports > 0)
1961 ++ usb_serial_console_init(serial->port[0]->minor);
1962 + exit:
1963 + module_put(type->driver.owner);
1964 + return 0;
1965 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
1966 +index bd3c92b4bcee..32ecb95f6214 100644
1967 +--- a/fs/btrfs/tree-log.c
1968 ++++ b/fs/btrfs/tree-log.c
1969 +@@ -2596,14 +2596,12 @@ static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
1970 + int index, int error)
1971 + {
1972 + struct btrfs_log_ctx *ctx;
1973 ++ struct btrfs_log_ctx *safe;
1974 +
1975 +- if (!error) {
1976 +- INIT_LIST_HEAD(&root->log_ctxs[index]);
1977 +- return;
1978 +- }
1979 +-
1980 +- list_for_each_entry(ctx, &root->log_ctxs[index], list)
1981 ++ list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
1982 ++ list_del_init(&ctx->list);
1983 + ctx->log_ret = error;
1984 ++ }
1985 +
1986 + INIT_LIST_HEAD(&root->log_ctxs[index]);
1987 + }
1988 +@@ -2842,13 +2840,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
1989 + mutex_unlock(&root->log_mutex);
1990 +
1991 + out_wake_log_root:
1992 +- /*
1993 +- * We needn't get log_mutex here because we are sure all
1994 +- * the other tasks are blocked.
1995 +- */
1996 ++ mutex_lock(&log_root_tree->log_mutex);
1997 + btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
1998 +
1999 +- mutex_lock(&log_root_tree->log_mutex);
2000 + log_root_tree->log_transid_committed++;
2001 + atomic_set(&log_root_tree->log_commit[index2], 0);
2002 + mutex_unlock(&log_root_tree->log_mutex);
2003 +@@ -2856,10 +2850,8 @@ out_wake_log_root:
2004 + if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
2005 + wake_up(&log_root_tree->log_commit_wait[index2]);
2006 + out:
2007 +- /* See above. */
2008 +- btrfs_remove_all_log_ctxs(root, index1, ret);
2009 +-
2010 + mutex_lock(&root->log_mutex);
2011 ++ btrfs_remove_all_log_ctxs(root, index1, ret);
2012 + root->log_transid_committed++;
2013 + atomic_set(&root->log_commit[index1], 0);
2014 + mutex_unlock(&root->log_mutex);
2015 +diff --git a/fs/ceph/file.c b/fs/ceph/file.c
2016 +index 3b6b522b4b31..5ae447cbd2b5 100644
2017 +--- a/fs/ceph/file.c
2018 ++++ b/fs/ceph/file.c
2019 +@@ -868,7 +868,8 @@ again:
2020 + statret = __ceph_do_getattr(inode, page,
2021 + CEPH_STAT_CAP_INLINE_DATA, !!page);
2022 + if (statret < 0) {
2023 +- __free_page(page);
2024 ++ if (page)
2025 ++ __free_page(page);
2026 + if (statret == -ENODATA) {
2027 + BUG_ON(retry_op != READ_INLINE);
2028 + goto again;
2029 +diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
2030 +index d67a16f2a45d..350f67fb5b9c 100644
2031 +--- a/fs/isofs/inode.c
2032 ++++ b/fs/isofs/inode.c
2033 +@@ -690,6 +690,11 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
2034 + pri_bh = NULL;
2035 +
2036 + root_found:
2037 ++ /* We don't support read-write mounts */
2038 ++ if (!(s->s_flags & MS_RDONLY)) {
2039 ++ error = -EACCES;
2040 ++ goto out_freebh;
2041 ++ }
2042 +
2043 + if (joliet_level && (pri == NULL || !opt.rock)) {
2044 + /* This is the case of Joliet with the norock mount flag.
2045 +@@ -1503,9 +1508,6 @@ struct inode *__isofs_iget(struct super_block *sb,
2046 + static struct dentry *isofs_mount(struct file_system_type *fs_type,
2047 + int flags, const char *dev_name, void *data)
2048 + {
2049 +- /* We don't support read-write mounts */
2050 +- if (!(flags & MS_RDONLY))
2051 +- return ERR_PTR(-EACCES);
2052 + return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super);
2053 + }
2054 +
2055 +diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
2056 +index ff2f2e6ad311..2abbb2babcae 100644
2057 +--- a/fs/jbd2/transaction.c
2058 ++++ b/fs/jbd2/transaction.c
2059 +@@ -1087,6 +1087,7 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
2060 + JBUFFER_TRACE(jh, "file as BJ_Reserved");
2061 + spin_lock(&journal->j_list_lock);
2062 + __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
2063 ++ spin_unlock(&journal->j_list_lock);
2064 + } else if (jh->b_transaction == journal->j_committing_transaction) {
2065 + /* first access by this transaction */
2066 + jh->b_modified = 0;
2067 +@@ -1094,8 +1095,8 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
2068 + JBUFFER_TRACE(jh, "set next transaction");
2069 + spin_lock(&journal->j_list_lock);
2070 + jh->b_next_transaction = transaction;
2071 ++ spin_unlock(&journal->j_list_lock);
2072 + }
2073 +- spin_unlock(&journal->j_list_lock);
2074 + jbd_unlock_bh_state(bh);
2075 +
2076 + /*
2077 +diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
2078 +index 4d8aa749d9b2..c57cd417802b 100644
2079 +--- a/fs/overlayfs/copy_up.c
2080 ++++ b/fs/overlayfs/copy_up.c
2081 +@@ -129,6 +129,8 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
2082 + len -= bytes;
2083 + }
2084 +
2085 ++ if (!error)
2086 ++ error = vfs_fsync(new_file, 0);
2087 + fput(new_file);
2088 + out_fput:
2089 + fput(old_file);
2090 +diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
2091 +index 27060fc855d4..e0af247f4740 100644
2092 +--- a/fs/ubifs/dir.c
2093 ++++ b/fs/ubifs/dir.c
2094 +@@ -350,7 +350,7 @@ static unsigned int vfs_dent_type(uint8_t type)
2095 + */
2096 + static int ubifs_readdir(struct file *file, struct dir_context *ctx)
2097 + {
2098 +- int err;
2099 ++ int err = 0;
2100 + struct qstr nm;
2101 + union ubifs_key key;
2102 + struct ubifs_dent_node *dent;
2103 +@@ -449,16 +449,23 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
2104 + }
2105 +
2106 + out:
2107 +- if (err != -ENOENT) {
2108 +- ubifs_err(c, "cannot find next direntry, error %d", err);
2109 +- return err;
2110 +- }
2111 +-
2112 + kfree(file->private_data);
2113 + file->private_data = NULL;
2114 ++
2115 ++ if (err != -ENOENT)
2116 ++ ubifs_err(c, "cannot find next direntry, error %d", err);
2117 ++ else
2118 ++ /*
2119 ++ * -ENOENT is a non-fatal error in this context, the TNC uses
2120 ++ * it to indicate that the cursor moved past the current directory
2121 ++ * and readdir() has to stop.
2122 ++ */
2123 ++ err = 0;
2124 ++
2125 ++
2126 + /* 2 is a special value indicating that there are no more direntries */
2127 + ctx->pos = 2;
2128 +- return 0;
2129 ++ return err;
2130 + }
2131 +
2132 + /* Free saved readdir() state when the directory is closed */
2133 +diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
2134 +index fd65b3f1923c..7270162b72e4 100644
2135 +--- a/fs/ubifs/xattr.c
2136 ++++ b/fs/ubifs/xattr.c
2137 +@@ -173,6 +173,7 @@ out_cancel:
2138 + host_ui->xattr_cnt -= 1;
2139 + host_ui->xattr_size -= CALC_DENT_SIZE(nm->len);
2140 + host_ui->xattr_size -= CALC_XATTR_BYTES(size);
2141 ++ host_ui->xattr_names -= nm->len;
2142 + mutex_unlock(&host_ui->ui_mutex);
2143 + out_free:
2144 + make_bad_inode(inode);
2145 +@@ -527,6 +528,7 @@ out_cancel:
2146 + host_ui->xattr_cnt += 1;
2147 + host_ui->xattr_size += CALC_DENT_SIZE(nm->len);
2148 + host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len);
2149 ++ host_ui->xattr_names += nm->len;
2150 + mutex_unlock(&host_ui->ui_mutex);
2151 + ubifs_release_budget(c, &req);
2152 + make_bad_inode(inode);
2153 +diff --git a/fs/xfs/libxfs/xfs_dquot_buf.c b/fs/xfs/libxfs/xfs_dquot_buf.c
2154 +index f48c3040c9ce..6021c322316c 100644
2155 +--- a/fs/xfs/libxfs/xfs_dquot_buf.c
2156 ++++ b/fs/xfs/libxfs/xfs_dquot_buf.c
2157 +@@ -191,8 +191,7 @@ xfs_dquot_buf_verify_crc(
2158 + if (mp->m_quotainfo)
2159 + ndquots = mp->m_quotainfo->qi_dqperchunk;
2160 + else
2161 +- ndquots = xfs_calc_dquots_per_chunk(
2162 +- XFS_BB_TO_FSB(mp, bp->b_length));
2163 ++ ndquots = xfs_calc_dquots_per_chunk(bp->b_length);
2164 +
2165 + for (i = 0; i < ndquots; i++, d++) {
2166 + if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
2167 +diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
2168 +index 523f04c90dea..cede0a45b9c0 100644
2169 +--- a/include/drm/drm_dp_helper.h
2170 ++++ b/include/drm/drm_dp_helper.h
2171 +@@ -568,6 +568,10 @@
2172 + #define MODE_I2C_READ 4
2173 + #define MODE_I2C_STOP 8
2174 +
2175 ++/* DP 1.2 MST PORTs - Section 2.5.1 v1.2a spec */
2176 ++#define DP_MST_PHYSICAL_PORT_0 0
2177 ++#define DP_MST_LOGICAL_PORT_0 8
2178 ++
2179 + #define DP_LINK_STATUS_SIZE 6
2180 + bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
2181 + int lane_count);
2182 +diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
2183 +index cbf1ce800fd1..5ef99b18966d 100644
2184 +--- a/include/linux/irqchip/arm-gic-v3.h
2185 ++++ b/include/linux/irqchip/arm-gic-v3.h
2186 +@@ -218,7 +218,7 @@
2187 + #define GITS_BASER_TYPE_SHIFT (56)
2188 + #define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7)
2189 + #define GITS_BASER_ENTRY_SIZE_SHIFT (48)
2190 +-#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1)
2191 ++#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
2192 + #define GITS_BASER_NonShareable (0UL << 10)
2193 + #define GITS_BASER_InnerShareable (1UL << 10)
2194 + #define GITS_BASER_OuterShareable (2UL << 10)
2195 +diff --git a/mm/list_lru.c b/mm/list_lru.c
2196 +index 909eca2c820e..84b4c21d78d7 100644
2197 +--- a/mm/list_lru.c
2198 ++++ b/mm/list_lru.c
2199 +@@ -532,6 +532,8 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware,
2200 + err = memcg_init_list_lru(lru, memcg_aware);
2201 + if (err) {
2202 + kfree(lru->node);
2203 ++ /* Do this so a list_lru_destroy() doesn't crash: */
2204 ++ lru->node = NULL;
2205 + goto out;
2206 + }
2207 +
2208 +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
2209 +index 3073164a6fcf..06d1732e2094 100644
2210 +--- a/net/mac80211/rx.c
2211 ++++ b/net/mac80211/rx.c
2212 +@@ -2179,16 +2179,22 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
2213 + if (!(status->rx_flags & IEEE80211_RX_AMSDU))
2214 + return RX_CONTINUE;
2215 +
2216 +- if (ieee80211_has_a4(hdr->frame_control) &&
2217 +- rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
2218 +- !rx->sdata->u.vlan.sta)
2219 +- return RX_DROP_UNUSABLE;
2220 ++ if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
2221 ++ switch (rx->sdata->vif.type) {
2222 ++ case NL80211_IFTYPE_AP_VLAN:
2223 ++ if (!rx->sdata->u.vlan.sta)
2224 ++ return RX_DROP_UNUSABLE;
2225 ++ break;
2226 ++ case NL80211_IFTYPE_STATION:
2227 ++ if (!rx->sdata->u.mgd.use_4addr)
2228 ++ return RX_DROP_UNUSABLE;
2229 ++ break;
2230 ++ default:
2231 ++ return RX_DROP_UNUSABLE;
2232 ++ }
2233 ++ }
2234 +
2235 +- if (is_multicast_ether_addr(hdr->addr1) &&
2236 +- ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
2237 +- rx->sdata->u.vlan.sta) ||
2238 +- (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
2239 +- rx->sdata->u.mgd.use_4addr)))
2240 ++ if (is_multicast_ether_addr(hdr->addr1))
2241 + return RX_DROP_UNUSABLE;
2242 +
2243 + skb->dev = dev;
2244 +diff --git a/scripts/sortextable.c b/scripts/sortextable.c
2245 +index 1052d4834a44..4dd16a72a2ef 100644
2246 +--- a/scripts/sortextable.c
2247 ++++ b/scripts/sortextable.c
2248 +@@ -205,6 +205,35 @@ static int compare_relative_table(const void *a, const void *b)
2249 + return 0;
2250 + }
2251 +
2252 ++static void x86_sort_relative_table(char *extab_image, int image_size)
2253 ++{
2254 ++ int i;
2255 ++
2256 ++ i = 0;
2257 ++ while (i < image_size) {
2258 ++ uint32_t *loc = (uint32_t *)(extab_image + i);
2259 ++
2260 ++ w(r(loc) + i, loc);
2261 ++ w(r(loc + 1) + i + 4, loc + 1);
2262 ++ w(r(loc + 2) + i + 8, loc + 2);
2263 ++
2264 ++ i += sizeof(uint32_t) * 3;
2265 ++ }
2266 ++
2267 ++ qsort(extab_image, image_size / 12, 12, compare_relative_table);
2268 ++
2269 ++ i = 0;
2270 ++ while (i < image_size) {
2271 ++ uint32_t *loc = (uint32_t *)(extab_image + i);
2272 ++
2273 ++ w(r(loc) - i, loc);
2274 ++ w(r(loc + 1) - (i + 4), loc + 1);
2275 ++ w(r(loc + 2) - (i + 8), loc + 2);
2276 ++
2277 ++ i += sizeof(uint32_t) * 3;
2278 ++ }
2279 ++}
2280 ++
2281 + static void sort_relative_table(char *extab_image, int image_size)
2282 + {
2283 + int i;
2284 +@@ -277,6 +306,9 @@ do_file(char const *const fname)
2285 + break;
2286 + case EM_386:
2287 + case EM_X86_64:
2288 ++ custom_sort = x86_sort_relative_table;
2289 ++ break;
2290 ++
2291 + case EM_S390:
2292 + custom_sort = sort_relative_table;
2293 + break;
2294 +diff --git a/security/keys/proc.c b/security/keys/proc.c
2295 +index f0611a6368cd..b9f531c9e4fa 100644
2296 +--- a/security/keys/proc.c
2297 ++++ b/security/keys/proc.c
2298 +@@ -181,7 +181,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
2299 + struct timespec now;
2300 + unsigned long timo;
2301 + key_ref_t key_ref, skey_ref;
2302 +- char xbuf[12];
2303 ++ char xbuf[16];
2304 + int rc;
2305 +
2306 + struct keyring_search_context ctx = {
2307 +diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
2308 +index 26ce990592a0..40072d630b49 100644
2309 +--- a/sound/pci/hda/hda_controller.c
2310 ++++ b/sound/pci/hda/hda_controller.c
2311 +@@ -1738,7 +1738,7 @@ irqreturn_t azx_interrupt(int irq, void *dev_id)
2312 + status = azx_readb(chip, RIRBSTS);
2313 + if (status & RIRB_INT_MASK) {
2314 + if (status & RIRB_INT_RESPONSE) {
2315 +- if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)
2316 ++ if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
2317 + udelay(80);
2318 + azx_update_rirb(chip);
2319 + }
2320 +diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
2321 +index 0efdb094d21c..6bb5340cf842 100644
2322 +--- a/sound/pci/hda/hda_controller.h
2323 ++++ b/sound/pci/hda/hda_controller.h
2324 +@@ -158,7 +158,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
2325 + #define AZX_DCAPS_SNOOP_MASK (3 << 10) /* snoop type mask */
2326 + #define AZX_DCAPS_SNOOP_OFF (1 << 12) /* snoop default off */
2327 + #define AZX_DCAPS_RIRB_DELAY (1 << 13) /* Long delay in read loop */
2328 +-#define AZX_DCAPS_RIRB_PRE_DELAY (1 << 14) /* Put a delay before read */
2329 ++/* 14 unused */
2330 + #define AZX_DCAPS_CTX_WORKAROUND (1 << 15) /* X-Fi workaround */
2331 + #define AZX_DCAPS_POSFIX_LPIB (1 << 16) /* Use LPIB as default */
2332 + #define AZX_DCAPS_POSFIX_VIA (1 << 17) /* Use VIACOMBO as default */
2333 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
2334 +index c8506496826a..16d09825a995 100644
2335 +--- a/sound/pci/hda/hda_intel.c
2336 ++++ b/sound/pci/hda/hda_intel.c
2337 +@@ -2220,14 +2220,12 @@ static const struct pci_device_id azx_ids[] = {
2338 + .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
2339 + .class_mask = 0xffffff,
2340 + .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
2341 +- AZX_DCAPS_NO_64BIT |
2342 +- AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB },
2343 ++ AZX_DCAPS_NO_64BIT | AZX_DCAPS_POSFIX_LPIB },
2344 + #else
2345 + /* this entry seems still valid -- i.e. without emu20kx chip */
2346 + { PCI_DEVICE(0x1102, 0x0009),
2347 + .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
2348 +- AZX_DCAPS_NO_64BIT |
2349 +- AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB },
2350 ++ AZX_DCAPS_NO_64BIT | AZX_DCAPS_POSFIX_LPIB },
2351 + #endif
2352 + /* CM8888 */
2353 + { PCI_DEVICE(0x13f6, 0x5011),
2354 +diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
2355 +index ecc2a4ea014d..32719f28aa86 100644
2356 +--- a/sound/usb/quirks-table.h
2357 ++++ b/sound/usb/quirks-table.h
2358 +@@ -2898,6 +2898,23 @@ AU0828_DEVICE(0x2040, 0x7260, "Hauppauge", "HVR-950Q"),
2359 + AU0828_DEVICE(0x2040, 0x7213, "Hauppauge", "HVR-950Q"),
2360 + AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
2361 +
2362 ++/* Syntek STK1160 */
2363 ++{
2364 ++ .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
2365 ++ USB_DEVICE_ID_MATCH_INT_CLASS |
2366 ++ USB_DEVICE_ID_MATCH_INT_SUBCLASS,
2367 ++ .idVendor = 0x05e1,
2368 ++ .idProduct = 0x0408,
2369 ++ .bInterfaceClass = USB_CLASS_AUDIO,
2370 ++ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
2371 ++ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
2372 ++ .vendor_name = "Syntek",
2373 ++ .product_name = "STK1160",
2374 ++ .ifnum = QUIRK_ANY_INTERFACE,
2375 ++ .type = QUIRK_AUDIO_ALIGN_TRANSFER
2376 ++ }
2377 ++},
2378 ++
2379 + /* Digidesign Mbox */
2380 + {
2381 + /* Thanks to Clemens Ladisch <clemens@×××××××.de> */